Home | History | Annotate | Line # | Download | only in ic
i82596.c revision 1.2
      1  1.2  jkunz /* $NetBSD: i82596.c,v 1.2 2004/08/26 16:56:07 jkunz Exp $ */
      2  1.1  jkunz 
      3  1.1  jkunz /*
      4  1.1  jkunz  * Copyright (c) 2003 Jochen Kunz.
      5  1.1  jkunz  * All rights reserved.
      6  1.1  jkunz  *
      7  1.1  jkunz  * Redistribution and use in source and binary forms, with or without
      8  1.1  jkunz  * modification, are permitted provided that the following conditions
      9  1.1  jkunz  * are met:
     10  1.1  jkunz  * 1. Redistributions of source code must retain the above copyright
     11  1.1  jkunz  *    notice, this list of conditions and the following disclaimer.
     12  1.1  jkunz  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1  jkunz  *    notice, this list of conditions and the following disclaimer in the
     14  1.1  jkunz  *    documentation and/or other materials provided with the distribution.
     15  1.1  jkunz  * 3. The name of Jochen Kunz may not be used to endorse or promote
     16  1.1  jkunz  *    products derived from this software without specific prior
     17  1.1  jkunz  *    written permission.
     18  1.1  jkunz  *
     19  1.1  jkunz  * THIS SOFTWARE IS PROVIDED BY JOCHEN KUNZ
     20  1.1  jkunz  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  jkunz  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  jkunz  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL JOCHEN KUNZ
     23  1.1  jkunz  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  jkunz  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  jkunz  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  jkunz  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  jkunz  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  jkunz  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  jkunz  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  jkunz  */
     31  1.1  jkunz 
     32  1.1  jkunz /*
     33  1.1  jkunz  * Driver for the Intel i82596 10MBit/s Ethernet chip.
     34  1.1  jkunz  * It operates the i82596 in 32-Bit Linear Mode, opposed to the old i82586
     35  1.1  jkunz  * ie(4) driver (src/sys/dev/ic/i82586.c), that degrades the i82596 to
     36  1.1  jkunz  * i82586 compatibility mode.
     37  1.1  jkunz  * Documentation about this chip can be found on http://www.openpa.net/
     38  1.1  jkunz  * file names 29021806.pdf and 29021906.pdf
     39  1.1  jkunz  */
     40  1.1  jkunz 
     41  1.1  jkunz #include <sys/cdefs.h>
     42  1.2  jkunz __KERNEL_RCSID(0, "$NetBSD: i82596.c,v 1.2 2004/08/26 16:56:07 jkunz Exp $");
     43  1.1  jkunz 
     44  1.1  jkunz /* autoconfig and device stuff */
     45  1.1  jkunz #include <sys/param.h>
     46  1.1  jkunz #include <sys/device.h>
     47  1.1  jkunz #include <sys/conf.h>
     48  1.1  jkunz #include <machine/iomod.h>
     49  1.1  jkunz #include <machine/autoconf.h>
     50  1.1  jkunz #include "locators.h"
     51  1.1  jkunz #include "ioconf.h"
     52  1.1  jkunz 
     53  1.1  jkunz /* bus_space / bus_dma etc. */
     54  1.1  jkunz #include <machine/bus.h>
     55  1.1  jkunz #include <machine/intr.h>
     56  1.1  jkunz 
     57  1.1  jkunz /* general system data and functions */
     58  1.1  jkunz #include <sys/systm.h>
     59  1.1  jkunz #include <sys/ioctl.h>
     60  1.1  jkunz #include <sys/ioccom.h>
     61  1.1  jkunz #include <sys/types.h>
     62  1.1  jkunz 
     63  1.1  jkunz /* tsleep / sleep / wakeup */
     64  1.1  jkunz #include <sys/proc.h>
     65  1.1  jkunz /* hz for above */
     66  1.1  jkunz #include <sys/kernel.h>
     67  1.1  jkunz 
     68  1.1  jkunz /* network stuff */
     69  1.1  jkunz #include <net/if.h>
     70  1.1  jkunz #include <net/if_dl.h>
     71  1.1  jkunz #include <net/if_media.h>
     72  1.1  jkunz #include <net/if_ether.h>
     73  1.1  jkunz #include <sys/socket.h>
     74  1.1  jkunz #include <sys/mbuf.h>
     75  1.1  jkunz 
     76  1.1  jkunz #include "bpfilter.h"
     77  1.1  jkunz #if NBPFILTER > 0
     78  1.1  jkunz #include <net/bpf.h>
     79  1.1  jkunz #endif
     80  1.1  jkunz 
     81  1.1  jkunz #include <dev/ic/i82596reg.h>
     82  1.1  jkunz #include <dev/ic/i82596var.h>
     83  1.1  jkunz 
     84  1.1  jkunz 
     85  1.1  jkunz 
     86  1.1  jkunz /* Supported chip variants */
     87  1.1  jkunz char *i82596_typenames[] = { "unknowen", "DX/SX", "CA" };
     88  1.1  jkunz 
     89  1.1  jkunz 
     90  1.1  jkunz 
     91  1.1  jkunz /* media change and status callback */
     92  1.1  jkunz static int iee_mediachange(struct ifnet *);
     93  1.1  jkunz static void iee_mediastatus(struct ifnet *, struct ifmediareq *);
     94  1.1  jkunz 
     95  1.1  jkunz /* interface routines to upper protocols */
     96  1.1  jkunz static void iee_start(struct ifnet *);			/* initiate output */
     97  1.1  jkunz static int iee_ioctl(struct ifnet *, u_long, caddr_t);	/* ioctl routine */
     98  1.1  jkunz static int iee_init(struct ifnet *);			/* init routine */
     99  1.1  jkunz static void iee_stop(struct ifnet *, int);		/* stop routine */
    100  1.1  jkunz static void iee_watchdog(struct ifnet *);		/* timer routine */
    101  1.1  jkunz static void iee_drain(struct ifnet *);			/* release resources */
    102  1.1  jkunz 
    103  1.1  jkunz /* internal helper functions */
    104  1.1  jkunz static void iee_cb_setup(struct iee_softc *, u_int32_t);
    105  1.1  jkunz 
    106  1.1  jkunz /*
    107  1.1  jkunz Things a MD frontend has to provide:
    108  1.1  jkunz 
    109  1.1  jkunz The functions via function pointers in the softc:
    110  1.1  jkunz         int (*sc_iee_cmd)(struct iee_softc *sc, u_int32_t cmd);
    111  1.1  jkunz         int (*sc_iee_reset)(struct iee_softc *sc);
    112  1.1  jkunz         void (*sc_mediastatus)(struct ifnet *, struct ifmediareq *);
    113  1.1  jkunz         int (*sc_mediachange)(struct ifnet *);
    114  1.1  jkunz 
    115  1.1  jkunz sc_iee_cmd(): send a command to the i82596 by writing the cmd parameter
    116  1.1  jkunz 	to the SCP cmd word and issuing a Channel Attention.
    117  1.1  jkunz sc_iee_reset(): initiate a reset, supply the address of the SCP to the
    118  1.1  jkunz 	chip, wait for the chip to initialize and ACK interrupts that
    119  1.1  jkunz 	this may have caused by caling (sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    120  1.1  jkunz This functions must carefully bus_dmamap_sync() all data they have touched!
    121  1.1  jkunz 
    122  1.1  jkunz sc_mediastatus() and  sc_mediachange() are just MD hooks to the according
    123  1.1  jkunz MI functions. The MD frontend may set this pointers to NULL when they
    124  1.1  jkunz are not needed.
    125  1.1  jkunz 
    126  1.1  jkunz sc->sc_type has to be set to I82596_UNKNOWN or I82596_DX or I82596_CA.
    127  1.1  jkunz This is for printing out the correct chip type at attach time only. The
    128  1.1  jkunz MI backend doesn't distinguish different chip types when programming
    129  1.1  jkunz the chip.
    130  1.1  jkunz 
    131  1.1  jkunz sc->sc_flags has to be set to 0 on litle endian hardware and to
    132  1.1  jkunz IEE_NEED_SWAP on big endian hardware, when endianes conversion is not
    133  1.1  jkunz done by the bus attachment. Usually you need to set IEE_NEED_SWAP
    134  1.1  jkunz when IEE_SYSBUS_BE is set in the sysbus byte.
    135  1.1  jkunz 
    136  1.1  jkunz sc->sc_cl_align bust be set to 1 or to the cache line size. When set to
    137  1.1  jkunz 1 no special alignment of DMA descriptors is done. If sc->sc_cl_align != 1
    138  1.1  jkunz it forces alignment of the data structres in the shared memory to a multiple
    139  1.1  jkunz of sc->sc_cl_align. This is needed on archs like hp700 that have non DMA
    140  1.1  jkunz I/O coherent caches and are unable to map the shared memory uncachable.
    141  1.1  jkunz (At least pre PA7100LC CPUs are unable to map memory uncachable.)
    142  1.1  jkunz 
    143  1.1  jkunz sc->sc_cl_align MUST BE INITIALIZED BEFORE THE FOLOWING MACROS ARE USED:
    144  1.1  jkunz SC_* IEE_*_SZ IEE_*_OFF IEE_SHMEM_MAX (shell style glob(3) pattern)
    145  1.1  jkunz 
    146  1.1  jkunz The MD frontend has to allocate a piece of DMA memory at least of
    147  1.1  jkunz IEE_SHMEM_MAX bytes size. All communication with the chip is done via
    148  1.1  jkunz this shared memory. If possible map this memory non-cachable on
    149  1.1  jkunz archs with non DMA I/O coherent caches. The base of the memory needs
    150  1.1  jkunz to be aligend to an even address if sc->sc_cl_align == 1 and aligend
    151  1.1  jkunz to a cache line if sc->sc_cl_align != 1.
    152  1.1  jkunz 
    153  1.1  jkunz An interrupt with iee_intr() as handler must be established.
    154  1.1  jkunz 
    155  1.1  jkunz Call void iee_attach(struct iee_softc *sc, u_int8_t *ether_address,
    156  1.1  jkunz int *media, int nmedia, int defmedia); when everything is set up. First
    157  1.1  jkunz parameter is a pointer to the MI softc, ether_address is an array that
    158  1.1  jkunz contains the ethernet address. media is an array of the media types
    159  1.1  jkunz provided by the hardware. The members of this array are supplied to
    160  1.1  jkunz ifmedia_add() in sequence. nmedia is the count of elements in media.
    161  1.1  jkunz defmedia is the default media that is set via ifmedia_set().
    162  1.1  jkunz nmedia and defmedia are ignored when media == NULL.
    163  1.1  jkunz 
    164  1.1  jkunz The MD backend may call iee_detach() to detach the device.
    165  1.1  jkunz 
    166  1.1  jkunz See sys/arch/hp700/gsc/if_iee.c for an example.
    167  1.1  jkunz */
    168  1.1  jkunz 
    169  1.1  jkunz 
    170  1.1  jkunz /*
    171  1.1  jkunz How frame reception is done:
    172  1.1  jkunz Each Recieve Frame Descriptor has one associated Recieve Buffer Descriptor.
    173  1.1  jkunz Each RBD points to the data area of a mbuf cluster. The RFDs are linked
    174  1.1  jkunz together in a circular list. sc->sc_rx_done is the count of RFDs in the
    175  1.1  jkunz list already processed / the number of the RFD that has to be checked for
    176  1.1  jkunz a new frame first at the next RX interrupt. Upon successful reception of
    177  1.1  jkunz a frame the mbuf cluster is handled to upper protocol layers, a new mbuf
    178  1.1  jkunz cluster is allocated and the RFD / RBD are reinitialized accordingly.
    179  1.1  jkunz 
    180  1.1  jkunz When a RFD list overrun occured the whole RFD and RBD lists are reinitialized
    181  1.1  jkunz and frame reception is started again.
    182  1.1  jkunz */
    183  1.1  jkunz int
    184  1.1  jkunz iee_intr(void *intarg)
    185  1.1  jkunz {
    186  1.1  jkunz 	struct iee_softc *sc = intarg;
    187  1.1  jkunz 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    188  1.1  jkunz 	struct iee_rfd *rfd;
    189  1.1  jkunz 	struct iee_rbd *rbd;
    190  1.1  jkunz 	bus_dmamap_t rx_map;
    191  1.1  jkunz 	struct mbuf *rx_mbuf;
    192  1.1  jkunz 	struct mbuf *new_mbuf;
    193  1.1  jkunz 	int scb_status;
    194  1.1  jkunz 	int scb_cmd;
    195  1.1  jkunz 	int n;
    196  1.1  jkunz 
    197  1.1  jkunz 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
    198  1.1  jkunz 		(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    199  1.1  jkunz 		return(1);
    200  1.1  jkunz 	}
    201  1.1  jkunz 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    202  1.1  jkunz 	    BUS_DMASYNC_POSTREAD);
    203  1.1  jkunz 	scb_status = SC_SCB->scb_status;
    204  1.1  jkunz 	scb_cmd = SC_SCB->scb_cmd;
    205  1.1  jkunz 	rfd = SC_RFD(sc->sc_rx_done);
    206  1.2  jkunz 	while ((rfd->rfd_status & IEE_RFD_C) != 0) {
    207  1.1  jkunz 		/* At least one packet was received. */
    208  1.1  jkunz 		rbd = SC_RBD(sc->sc_rx_done);
    209  1.1  jkunz 		rx_map = sc->sc_rx_map[sc->sc_rx_done];
    210  1.1  jkunz 		rx_mbuf = sc->sc_rx_mbuf[sc->sc_rx_done];
    211  1.1  jkunz 		SC_RBD((sc->sc_rx_done + IEE_NRFD - 1) % IEE_NRFD)->rbd_size
    212  1.1  jkunz 		    &= ~IEE_RBD_EL;
    213  1.1  jkunz 		if ((rfd->rfd_status & IEE_RFD_OK) == 0
    214  1.1  jkunz 		    || (rbd->rbd_count & IEE_RBD_EOF) == 0
    215  1.1  jkunz 		    || (rbd->rbd_count & IEE_RBD_F) == 0){
    216  1.1  jkunz 			/* Receive error, skip frame and reuse buffer. */
    217  1.1  jkunz 			rfd->rfd_status = 0;
    218  1.1  jkunz 			rbd->rbd_count = 0;
    219  1.1  jkunz 			rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
    220  1.1  jkunz 			printf("%s: iee_intr: receive error %d, rfd_status="
    221  1.1  jkunz 			    "0x%.4x, rfd_count=0x%.4x\n", sc->sc_dev.dv_xname,
    222  1.1  jkunz 			    ++sc->sc_rx_err, rfd->rfd_status, rbd->rbd_count);
    223  1.1  jkunz 			sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
    224  1.1  jkunz 			continue;
    225  1.1  jkunz 		}
    226  1.1  jkunz 		rfd->rfd_status = 0;
    227  1.1  jkunz 		bus_dmamap_sync(sc->sc_dmat, rx_map, 0, rx_mbuf->m_ext.ext_size,
    228  1.1  jkunz 		    BUS_DMASYNC_POSTREAD);
    229  1.1  jkunz 		rx_mbuf->m_pkthdr.len = rx_mbuf->m_len =
    230  1.1  jkunz 		    rbd->rbd_count & IEE_RBD_COUNT;
    231  1.1  jkunz 		rx_mbuf->m_pkthdr.rcvif = ifp;
    232  1.1  jkunz 		MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
    233  1.1  jkunz 		if (new_mbuf == NULL) {
    234  1.1  jkunz 			printf("%s: iee_intr: can't allocate mbuf\n",
    235  1.1  jkunz 			    sc->sc_dev.dv_xname);
    236  1.1  jkunz 			break;
    237  1.1  jkunz 		}
    238  1.1  jkunz 		MCLAIM(new_mbuf, &sc->sc_ethercom.ec_rx_mowner);
    239  1.1  jkunz 		MCLGET(new_mbuf, M_DONTWAIT);
    240  1.1  jkunz 		if ((new_mbuf->m_flags & M_EXT) == 0) {
    241  1.1  jkunz 			printf("%s: iee_intr: can't alloc mbuf cluster\n",
    242  1.1  jkunz 			    sc->sc_dev.dv_xname);
    243  1.1  jkunz 			m_freem(new_mbuf);
    244  1.1  jkunz 			break;
    245  1.1  jkunz 		}
    246  1.1  jkunz 		bus_dmamap_unload(sc->sc_dmat, rx_map);
    247  1.1  jkunz 		if (bus_dmamap_load(sc->sc_dmat, rx_map,
    248  1.1  jkunz 		    new_mbuf->m_ext.ext_buf, new_mbuf->m_ext.ext_size,
    249  1.1  jkunz 		    NULL, BUS_DMA_READ | BUS_DMA_NOWAIT) != 0)
    250  1.1  jkunz 			panic("%s: iee_intr: can't load RX DMA map\n",
    251  1.1  jkunz 			    sc->sc_dev.dv_xname);
    252  1.1  jkunz 		bus_dmamap_sync(sc->sc_dmat, rx_map, 0,
    253  1.1  jkunz 		    new_mbuf->m_ext.ext_size, BUS_DMASYNC_PREREAD);
    254  1.1  jkunz #if NBPFILTER > 0
    255  1.1  jkunz 		if (ifp->if_bpf != 0)
    256  1.1  jkunz 			bpf_mtap(ifp->if_bpf, rx_mbuf);
    257  1.1  jkunz #endif /* NBPFILTER > 0 */
    258  1.1  jkunz 		(*ifp->if_input)(ifp, rx_mbuf);
    259  1.1  jkunz 		ifp->if_ipackets++;
    260  1.1  jkunz 		sc->sc_rx_mbuf[sc->sc_rx_done] = new_mbuf;
    261  1.1  jkunz 		rbd->rbd_count = 0;
    262  1.1  jkunz 		rbd->rbd_size = IEE_RBD_EL | rx_map->dm_segs[0].ds_len;
    263  1.1  jkunz 		rbd->rbd_rb_addr = rx_map->dm_segs[0].ds_addr;
    264  1.1  jkunz 		sc->sc_rx_done = (sc->sc_rx_done + 1) % IEE_NRFD;
    265  1.1  jkunz 		rfd = SC_RFD(sc->sc_rx_done);
    266  1.1  jkunz 	}
    267  1.1  jkunz 	if ((scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR1
    268  1.1  jkunz 	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR2
    269  1.1  jkunz 	    || (scb_status & IEE_SCB_RUS) == IEE_SCB_RUS_NR3) {
    270  1.1  jkunz 		/* Receive Overrun, reinit receive ring buffer. */
    271  1.1  jkunz 		for (n = 0 ; n < IEE_NRFD ; n++) {
    272  1.1  jkunz 			SC_RFD(n)->rfd_cmd = IEE_RFD_SF;
    273  1.1  jkunz 			SC_RFD(n)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
    274  1.1  jkunz 			    + IEE_RFD_SZ * ((n + 1) % IEE_NRFD));
    275  1.1  jkunz 			SC_RBD(n)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
    276  1.1  jkunz 			    + IEE_RBD_SZ * ((n + 1) % IEE_NRFD));
    277  1.1  jkunz 			SC_RBD(n)->rbd_size = IEE_RBD_EL |
    278  1.1  jkunz 			    sc->sc_rx_map[n]->dm_segs[0].ds_len;
    279  1.1  jkunz 			SC_RBD(n)->rbd_rb_addr =
    280  1.1  jkunz 			    sc->sc_rx_map[n]->dm_segs[0].ds_addr;
    281  1.1  jkunz 		}
    282  1.1  jkunz 		SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
    283  1.1  jkunz 		sc->sc_rx_done = 0;
    284  1.1  jkunz 		bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_RFD_OFF,
    285  1.1  jkunz 		    IEE_RFD_LIST_SZ + IEE_RBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
    286  1.1  jkunz 		(sc->sc_iee_cmd)(sc, IEE_SCB_RUC_ST);
    287  1.1  jkunz 		printf("%s: iee_intr: receive ring buffer overrun\n",
    288  1.1  jkunz 		    sc->sc_dev.dv_xname);
    289  1.2  jkunz 	}
    290  1.1  jkunz 
    291  1.2  jkunz 	if (sc->sc_next_cb != 0
    292  1.2  jkunz 	    && (SC_CB(sc->sc_next_cb - 1)->cb_status & IEE_CB_C) != 0) {
    293  1.1  jkunz 		/* CMD list finished */
    294  1.1  jkunz 		ifp->if_timer = 0;
    295  1.1  jkunz 		if (sc->sc_next_tbd != 0) {
    296  1.1  jkunz 			/* A TX CMD list finished, clenup */
    297  1.1  jkunz 			for (n = 0 ; n < sc->sc_next_cb ; n++) {
    298  1.1  jkunz 				m_freem(sc->sc_tx_mbuf[n]);
    299  1.1  jkunz 				sc->sc_tx_mbuf[n] = NULL;
    300  1.1  jkunz 				bus_dmamap_unload(sc->sc_dmat,sc->sc_tx_map[n]);
    301  1.1  jkunz 				if ((SC_CB(n)->cb_status & IEE_CB_COL) != 0 &&
    302  1.1  jkunz 				    (SC_CB(n)->cb_status & IEE_CB_MAXCOL) == 0)
    303  1.1  jkunz 					sc->sc_tx_col += 16;
    304  1.1  jkunz 				else
    305  1.1  jkunz 					sc->sc_tx_col += SC_CB(n)->cb_status
    306  1.1  jkunz 					    & IEE_CB_MAXCOL;
    307  1.1  jkunz 			}
    308  1.1  jkunz 			sc->sc_next_tbd = 0;
    309  1.1  jkunz 			ifp->if_flags &= ~IFF_OACTIVE;
    310  1.1  jkunz 		}
    311  1.1  jkunz 		for (n = 0 ; n < sc->sc_next_cb ; n++) {
    312  1.1  jkunz 			/* Check if a CMD failed, but ignore TX errors. */
    313  1.1  jkunz 			if ((SC_CB(n)->cb_cmd & IEE_CB_CMD) != IEE_CB_CMD_TR
    314  1.2  jkunz 			    && ((SC_CB(n)->cb_status & IEE_CB_OK) == 0))
    315  1.1  jkunz 				printf("%s: iee_intr: scb_status=0x%x "
    316  1.1  jkunz 				    "scb_cmd=0x%x failed command %d: "
    317  1.1  jkunz 				    "cb_status[%d]=0x%.4x cb_cmd[%d]=0x%.4x\n",
    318  1.1  jkunz 				    sc->sc_dev.dv_xname, scb_status, scb_cmd,
    319  1.1  jkunz 				    ++sc->sc_cmd_err, n, SC_CB(n)->cb_status,
    320  1.1  jkunz 				    n, SC_CB(n)->cb_cmd);
    321  1.1  jkunz 		}
    322  1.1  jkunz 		sc->sc_next_cb = 0;
    323  1.1  jkunz 		if ((sc->sc_flags & IEE_WANT_MCAST) != 0) {
    324  1.1  jkunz 			iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
    325  1.1  jkunz 			    | IEE_CB_I);
    326  1.1  jkunz 			(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    327  1.1  jkunz 		} else
    328  1.1  jkunz 			/* Try to get defered packets going. */
    329  1.1  jkunz 			iee_start(ifp);
    330  1.1  jkunz 	}
    331  1.1  jkunz 	if (IEE_SWAP(SC_SCB->scb_crc_err) != sc->sc_crc_err) {
    332  1.1  jkunz 		sc->sc_crc_err = IEE_SWAP(SC_SCB->scb_crc_err);
    333  1.1  jkunz 		printf("%s: iee_intr: crc_err=%d\n", sc->sc_dev.dv_xname,
    334  1.1  jkunz 		    sc->sc_crc_err);
    335  1.1  jkunz 	}
    336  1.1  jkunz 	if (IEE_SWAP(SC_SCB->scb_align_err) != sc->sc_align_err) {
    337  1.1  jkunz 		sc->sc_align_err = IEE_SWAP(SC_SCB->scb_align_err);
    338  1.1  jkunz 		printf("%s: iee_intr: align_err=%d\n", sc->sc_dev.dv_xname,
    339  1.1  jkunz 		    sc->sc_align_err);
    340  1.1  jkunz 	}
    341  1.1  jkunz 	if (IEE_SWAP(SC_SCB->scb_resource_err) != sc->sc_resource_err) {
    342  1.1  jkunz 		sc->sc_resource_err = IEE_SWAP(SC_SCB->scb_resource_err);
    343  1.1  jkunz 		printf("%s: iee_intr: resource_err=%d\n", sc->sc_dev.dv_xname,
    344  1.1  jkunz 		    sc->sc_resource_err);
    345  1.1  jkunz 	}
    346  1.1  jkunz 	if (IEE_SWAP(SC_SCB->scb_overrun_err) != sc->sc_overrun_err) {
    347  1.1  jkunz 		sc->sc_overrun_err = IEE_SWAP(SC_SCB->scb_overrun_err);
    348  1.1  jkunz 		printf("%s: iee_intr: overrun_err=%d\n", sc->sc_dev.dv_xname,
    349  1.1  jkunz 		    sc->sc_overrun_err);
    350  1.1  jkunz 	}
    351  1.1  jkunz 	if (IEE_SWAP(SC_SCB->scb_rcvcdt_err) != sc->sc_rcvcdt_err) {
    352  1.1  jkunz 		sc->sc_rcvcdt_err = IEE_SWAP(SC_SCB->scb_rcvcdt_err);
    353  1.1  jkunz 		printf("%s: iee_intr: rcvcdt_err=%d\n", sc->sc_dev.dv_xname,
    354  1.1  jkunz 		    sc->sc_rcvcdt_err);
    355  1.1  jkunz 	}
    356  1.1  jkunz 	if (IEE_SWAP(SC_SCB->scb_short_fr_err) != sc->sc_short_fr_err) {
    357  1.1  jkunz 		sc->sc_short_fr_err = IEE_SWAP(SC_SCB->scb_short_fr_err);
    358  1.1  jkunz 		printf("%s: iee_intr: short_fr_err=%d\n", sc->sc_dev.dv_xname,
    359  1.1  jkunz 		    sc->sc_short_fr_err);
    360  1.1  jkunz 	}
    361  1.2  jkunz 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    362  1.2  jkunz 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
    363  1.1  jkunz 	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    364  1.1  jkunz 	return(1);
    365  1.1  jkunz }
    366  1.1  jkunz 
    367  1.1  jkunz 
    368  1.1  jkunz 
    369  1.1  jkunz /*
    370  1.1  jkunz How Command Block List Processing is done.
    371  1.1  jkunz 
    372  1.1  jkunz A runing CBL is never manipulated. If there is a CBL already runing,
    373  1.1  jkunz further CMDs are deferd until the current list is done. A new list is
    374  1.1  jkunz setup when the old has finished.
    375  1.1  jkunz This eases programming. To manipulate a runing CBL it is neccesary to
    376  1.1  jkunz suspend the Command Unit to avoid race conditions. After a suspend
    377  1.1  jkunz is sent we have to wait for an interrupt that ACKs the suspend. Then
    378  1.1  jkunz we can manipulate the CBL and resume operation. I am not sure that this
    379  1.1  jkunz is more effective then the current, much simpler approach. => KISS
    380  1.1  jkunz See i82596CA data sheet page 26.
    381  1.1  jkunz 
    382  1.1  jkunz A CBL is runing or on the way to be set up when (sc->sc_next_cb != 0).
    383  1.1  jkunz 
    384  1.1  jkunz A CBL may consist of TX CMDs, and _only_ TX CMDs.
    385  1.1  jkunz A TX CBL is runing or on the way to be set up when
    386  1.1  jkunz ((sc->sc_next_cb != 0) && (sc->sc_next_tbd != 0)).
    387  1.1  jkunz 
    388  1.1  jkunz A CBL may consist of other non-TX CMDs like IAS or CONF, and _only_
    389  1.1  jkunz non-TX CMDs.
    390  1.1  jkunz 
    391  1.1  jkunz This comes mostly through the way how an Ethernet driver works and
    392  1.1  jkunz because runing CBLs are not manipulated when they are on the way. If
    393  1.1  jkunz if_start() is called there will be TX CMDs enqueued so we have a runing
    394  1.1  jkunz CBL and other CMDs from e.g. if_ioctl() will be deferd and vice versa.
    395  1.1  jkunz 
    396  1.1  jkunz The Multicast Setup Command is special. A MCS needs more space then
    397  1.1  jkunz a single CB has. Actual space requiement depends on the length of the
    398  1.1  jkunz multicast list. So we allways defer MCS until other CBLs are finished,
    399  1.1  jkunz then we setup a CONF CMD in the first CB. The CONF CMD is needed to
    400  1.1  jkunz turn ALLMULTI on the hardware on or off. The MCS is the 2nd CB and may
    401  1.1  jkunz use all the remaining space in the CBL and the Transmit Buffer Descriptor
    402  1.1  jkunz List. (Therefore CBL and TBDL must be continious in pysical and virtual
    403  1.1  jkunz memory. This is guaranteed through the definitions of the list offsets
    404  1.1  jkunz in i82596reg.h and because it is only a single DMA segment used for all
    405  1.1  jkunz lists.) When ALLMULTI is enabled via the CONF CMD, the MCS is run with
    406  1.1  jkunz a multicast list length of 0, thus disabling the multicast filter.
    407  1.1  jkunz A defered MCS is signaled via ((sc->sc_flags & IEE_WANT_MCAST) != 0)
    408  1.1  jkunz */
    409  1.1  jkunz void
    410  1.1  jkunz iee_cb_setup(struct iee_softc *sc, u_int32_t cmd)
    411  1.1  jkunz {
    412  1.1  jkunz 	struct iee_cb *cb = SC_CB(sc->sc_next_cb);
    413  1.1  jkunz 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    414  1.1  jkunz 	struct ether_multistep step;
    415  1.1  jkunz 	struct ether_multi *enm;
    416  1.1  jkunz 
    417  1.1  jkunz 	memset(cb, 0, IEE_CB_SZ);
    418  1.1  jkunz 	cb->cb_cmd = cmd;
    419  1.1  jkunz 	switch(cmd & IEE_CB_CMD) {
    420  1.1  jkunz 	case IEE_CB_CMD_NOP:	/* NOP CMD */
    421  1.1  jkunz 		break;
    422  1.1  jkunz 	case IEE_CB_CMD_IAS:	/* Individual Address Setup */
    423  1.1  jkunz 		memcpy((void*)cb->cb_ind_addr, LLADDR(ifp->if_sadl),
    424  1.1  jkunz 		    ETHER_ADDR_LEN);
    425  1.1  jkunz 		break;
    426  1.1  jkunz 	case IEE_CB_CMD_CONF:	/* Configure */
    427  1.1  jkunz 		memcpy((void*)cb->cb_cf, sc->sc_cf, sc->sc_cf[0]
    428  1.1  jkunz 		    & IEE_CF_0_CNT_M);
    429  1.1  jkunz 		break;
    430  1.1  jkunz 	case IEE_CB_CMD_MCS:	/* Multicast Setup */
    431  1.1  jkunz 		if (sc->sc_next_cb != 0) {
    432  1.1  jkunz 			sc->sc_flags |= IEE_WANT_MCAST;
    433  1.1  jkunz 			return;
    434  1.1  jkunz 		}
    435  1.1  jkunz 		sc->sc_flags &= ~IEE_WANT_MCAST;
    436  1.1  jkunz 		if ((sc->sc_cf[8] & IEE_CF_8_PRM) != 0) {
    437  1.1  jkunz 			/* Need no multicast filter in promisc mode. */
    438  1.1  jkunz 			iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL
    439  1.1  jkunz 			    | IEE_CB_I);
    440  1.1  jkunz 			return;
    441  1.1  jkunz 		}
    442  1.1  jkunz 		/* Leave room for a CONF CMD to en/dis-able ALLMULTI mode */
    443  1.1  jkunz 		cb = SC_CB(sc->sc_next_cb + 1);
    444  1.1  jkunz 		cb->cb_cmd = cmd;
    445  1.1  jkunz 		cb->cb_mcast.mc_size = 0;
    446  1.1  jkunz 		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
    447  1.1  jkunz 		while (enm != NULL) {
    448  1.1  jkunz 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
    449  1.1  jkunz 			    ETHER_ADDR_LEN) != 0 || cb->cb_mcast.mc_size
    450  1.1  jkunz 			    * ETHER_ADDR_LEN + 2 * IEE_CB_SZ
    451  1.1  jkunz 			    > IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ) {
    452  1.1  jkunz 				cb->cb_mcast.mc_size = 0;
    453  1.1  jkunz 				break;
    454  1.1  jkunz 			}
    455  1.1  jkunz 			memcpy((void*) &cb->cb_mcast.mc_addrs[
    456  1.1  jkunz 			    cb->cb_mcast.mc_size * ETHER_ADDR_LEN],
    457  1.1  jkunz 			    enm->enm_addrlo, ETHER_ADDR_LEN);
    458  1.1  jkunz 			ETHER_NEXT_MULTI(step, enm);
    459  1.1  jkunz 			cb->cb_mcast.mc_size++;
    460  1.1  jkunz 		}
    461  1.1  jkunz 		if (cb->cb_mcast.mc_size == 0) {
    462  1.1  jkunz 			/* Can't do exact mcast filtering, do ALLMULTI mode. */
    463  1.1  jkunz 			ifp->if_flags |= IFF_ALLMULTI;
    464  1.1  jkunz 			sc->sc_cf[11] &= ~IEE_CF_11_MCALL;
    465  1.1  jkunz 		} else {
    466  1.1  jkunz 			/* disable ALLMULTI and load mcast list */
    467  1.1  jkunz 			ifp->if_flags &= ~IFF_ALLMULTI;
    468  1.1  jkunz 			sc->sc_cf[11] |= IEE_CF_11_MCALL;
    469  1.1  jkunz 			/* Mcast setup may need more then IEE_CB_SZ bytes. */
    470  1.1  jkunz 			bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map,
    471  1.1  jkunz 			    IEE_CB_OFF, IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ,
    472  1.1  jkunz 			    BUS_DMASYNC_PREWRITE);
    473  1.1  jkunz 		}
    474  1.1  jkunz 		iee_cb_setup(sc, IEE_CB_CMD_CONF);
    475  1.1  jkunz 		break;
    476  1.1  jkunz 	case IEE_CB_CMD_TR:	/* Transmit */
    477  1.1  jkunz 		cb->cb_transmit.tx_tbd_addr = IEE_PHYS_SHMEM(IEE_TBD_OFF
    478  1.1  jkunz 		    + IEE_TBD_SZ * sc->sc_next_tbd);
    479  1.1  jkunz 		cb->cb_cmd |= IEE_CB_SF; /* Allways use Flexible Mode. */
    480  1.1  jkunz 		break;
    481  1.1  jkunz 	case IEE_CB_CMD_TDR:	/* Time Domain Reflectometry */
    482  1.1  jkunz 		break;
    483  1.1  jkunz 	case IEE_CB_CMD_DUMP:	/* Dump */
    484  1.1  jkunz 		break;
    485  1.1  jkunz 	case IEE_CB_CMD_DIAG:	/* Diagnose */
    486  1.1  jkunz 		break;
    487  1.1  jkunz 	default:
    488  1.1  jkunz 		/* can't happen */
    489  1.1  jkunz 		break;
    490  1.1  jkunz 	}
    491  1.1  jkunz 	cb->cb_link_addr = IEE_PHYS_SHMEM(IEE_CB_OFF + IEE_CB_SZ *
    492  1.1  jkunz 	    (sc->sc_next_cb + 1));
    493  1.1  jkunz 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_OFF
    494  1.1  jkunz 	    + IEE_CB_SZ * sc->sc_next_cb, IEE_CB_SZ, BUS_DMASYNC_PREWRITE);
    495  1.1  jkunz 	sc->sc_next_cb++;
    496  1.1  jkunz 	ifp->if_timer = 5;
    497  1.1  jkunz 	return;
    498  1.1  jkunz }
    499  1.1  jkunz 
    500  1.1  jkunz 
    501  1.1  jkunz 
    502  1.1  jkunz void
    503  1.1  jkunz iee_attach(struct iee_softc *sc, u_int8_t *eth_addr, int *media, int nmedia,
    504  1.1  jkunz     int defmedia)
    505  1.1  jkunz {
    506  1.1  jkunz 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    507  1.1  jkunz 	int n;
    508  1.1  jkunz 
    509  1.1  jkunz 	/* Set pointer to Intermediate System Configuration Pointer. */
    510  1.1  jkunz 	/* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
    511  1.1  jkunz 	SC_SCP->scp_iscp_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_ISCP_OFF));
    512  1.1  jkunz 	/* Set pointer to System Control Block. */
    513  1.1  jkunz 	/* Phys. addr. in big endian order. (Big endian as defined by Intel.) */
    514  1.1  jkunz 	SC_ISCP->iscp_scb_addr = IEE_SWAP(IEE_PHYS_SHMEM(IEE_SCB_OFF));
    515  1.1  jkunz 	/* Set pointer to Receive Frame Area. (physical address) */
    516  1.1  jkunz 	SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
    517  1.1  jkunz 	/* Set pointer to Command Block. (physical address) */
    518  1.1  jkunz 	SC_SCB->scb_cmd_blk_addr = IEE_PHYS_SHMEM(IEE_CB_OFF);
    519  1.1  jkunz 
    520  1.1  jkunz 	ifmedia_init(&sc->sc_ifmedia, 0, iee_mediachange, iee_mediastatus);
    521  1.1  jkunz 	if (media != NULL) {
    522  1.1  jkunz 		for (n = 0 ; n < nmedia ; n++)
    523  1.1  jkunz 			ifmedia_add(&sc->sc_ifmedia, media[n], 0, NULL);
    524  1.1  jkunz 		ifmedia_set(&sc->sc_ifmedia, defmedia);
    525  1.1  jkunz 	} else {
    526  1.1  jkunz 		ifmedia_add(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE, 0, NULL);
    527  1.1  jkunz 		ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_NONE);
    528  1.1  jkunz 	}
    529  1.1  jkunz 
    530  1.1  jkunz 	ifp->if_softc = sc;
    531  1.1  jkunz 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
    532  1.1  jkunz 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
    533  1.1  jkunz 	ifp->if_start = iee_start;	/* initiate output routine */
    534  1.1  jkunz 	ifp->if_ioctl = iee_ioctl;	/* ioctl routine */
    535  1.1  jkunz 	ifp->if_init = iee_init;	/* init routine */
    536  1.1  jkunz 	ifp->if_stop = iee_stop;	/* stop routine */
    537  1.1  jkunz 	ifp->if_watchdog = iee_watchdog;	/* timer routine */
    538  1.1  jkunz 	ifp->if_drain = iee_drain;	/* routine to release resources */
    539  1.1  jkunz 	IFQ_SET_READY(&ifp->if_snd);
    540  1.1  jkunz 	/* iee supports IEEE 802.1Q Virtual LANs, see vlan(4). */
    541  1.1  jkunz 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
    542  1.1  jkunz 
    543  1.1  jkunz 	if_attach(ifp);
    544  1.1  jkunz 	ether_ifattach(ifp, eth_addr);
    545  1.1  jkunz 
    546  1.1  jkunz 	aprint_normal(": Intel 82596%s address %s\n",
    547  1.1  jkunz 	    i82596_typenames[ sc->sc_type], ether_sprintf(eth_addr));
    548  1.1  jkunz 
    549  1.1  jkunz 	for (n = 0 ; n < IEE_NCB ; n++)
    550  1.1  jkunz 		sc->sc_tx_map[n] = NULL;
    551  1.1  jkunz 	for (n = 0 ; n < IEE_NRFD ; n++) {
    552  1.1  jkunz 		sc->sc_rx_mbuf[n] = NULL;
    553  1.1  jkunz 		sc->sc_rx_map[n] = NULL;
    554  1.1  jkunz 	}
    555  1.1  jkunz 	sc->sc_tx_timeout = 0;
    556  1.1  jkunz 	sc->sc_setup_timeout = 0;
    557  1.1  jkunz 	(sc->sc_iee_reset)(sc);
    558  1.1  jkunz 	return;
    559  1.1  jkunz }
    560  1.1  jkunz 
    561  1.1  jkunz 
    562  1.1  jkunz 
    563  1.1  jkunz void
    564  1.1  jkunz iee_detach(struct iee_softc *sc, int flags)
    565  1.1  jkunz {
    566  1.1  jkunz 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    567  1.1  jkunz 
    568  1.1  jkunz 	if ((ifp->if_flags & IFF_RUNNING) != 0)
    569  1.1  jkunz 		iee_stop(ifp, 1);
    570  1.1  jkunz 	ether_ifdetach(ifp);
    571  1.1  jkunz 	if_detach(ifp);
    572  1.1  jkunz 	return;
    573  1.1  jkunz }
    574  1.1  jkunz 
    575  1.1  jkunz 
    576  1.1  jkunz 
    577  1.1  jkunz /* media change and status callback */
    578  1.1  jkunz int
    579  1.1  jkunz iee_mediachange(struct ifnet *ifp)
    580  1.1  jkunz {
    581  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    582  1.1  jkunz 
    583  1.1  jkunz 	if (sc->sc_mediachange != NULL)
    584  1.1  jkunz 		return ((sc->sc_mediachange)(ifp));
    585  1.1  jkunz 	return(0);
    586  1.1  jkunz }
    587  1.1  jkunz 
    588  1.1  jkunz 
    589  1.1  jkunz 
    590  1.1  jkunz void
    591  1.1  jkunz iee_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmreq)
    592  1.1  jkunz {
    593  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    594  1.1  jkunz 
    595  1.1  jkunz 	if (sc->sc_mediastatus != NULL)
    596  1.1  jkunz 		return ((sc->sc_mediastatus)(ifp, ifmreq));
    597  1.1  jkunz 	return;
    598  1.1  jkunz }
    599  1.1  jkunz 
    600  1.1  jkunz 
    601  1.1  jkunz 
    602  1.1  jkunz /* initiate output routine */
    603  1.1  jkunz void
    604  1.1  jkunz iee_start(struct ifnet *ifp)
    605  1.1  jkunz {
    606  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    607  1.1  jkunz 	struct mbuf *m = NULL;
    608  1.1  jkunz 	int t;
    609  1.1  jkunz 	int n;
    610  1.1  jkunz 
    611  1.1  jkunz 	if (sc->sc_next_cb != 0)
    612  1.1  jkunz 		/* There is already a CMD runing. Defer packet enqueueing. */
    613  1.1  jkunz 		return;
    614  1.1  jkunz 	for (t = 0 ; t < IEE_NCB ; t++) {
    615  1.1  jkunz 		IFQ_DEQUEUE(&ifp->if_snd, sc->sc_tx_mbuf[t]);
    616  1.1  jkunz 		if (sc->sc_tx_mbuf[t] == NULL)
    617  1.1  jkunz 			break;
    618  1.1  jkunz 		if (bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
    619  1.1  jkunz 		    sc->sc_tx_mbuf[t], BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
    620  1.1  jkunz 			/*
    621  1.1  jkunz 			 * The packet needs more TBD then we support.
    622  1.1  jkunz 			 * Copy the packet into a mbuf cluster to get it out.
    623  1.1  jkunz 			 */
    624  1.1  jkunz 			printf("%s: iee_start: failed to load DMA map\n",
    625  1.1  jkunz 			    sc->sc_dev.dv_xname);
    626  1.1  jkunz 			MGETHDR(m, M_DONTWAIT, MT_DATA);
    627  1.1  jkunz 			if (m == NULL) {
    628  1.1  jkunz 				printf("%s: iee_start: can't allocate mbuf\n",
    629  1.1  jkunz 				    sc->sc_dev.dv_xname);
    630  1.1  jkunz 				m_freem(sc->sc_tx_mbuf[t]);
    631  1.1  jkunz 				t--;
    632  1.1  jkunz 				continue;
    633  1.1  jkunz 			}
    634  1.1  jkunz 			MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
    635  1.1  jkunz 			MCLGET(m, M_DONTWAIT);
    636  1.1  jkunz 			if ((m->m_flags & M_EXT) == 0) {
    637  1.1  jkunz 				printf("%s: iee_start: can't allocate mbuf "
    638  1.1  jkunz 				    "cluster\n", sc->sc_dev.dv_xname);
    639  1.1  jkunz 				m_freem(sc->sc_tx_mbuf[t]);
    640  1.1  jkunz 				m_freem(m);
    641  1.1  jkunz 				t--;
    642  1.1  jkunz 				continue;
    643  1.1  jkunz 			}
    644  1.1  jkunz 			m_copydata(sc->sc_tx_mbuf[t], 0,
    645  1.1  jkunz 			    sc->sc_tx_mbuf[t]->m_pkthdr.len, mtod(m, caddr_t));
    646  1.1  jkunz 			m->m_pkthdr.len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
    647  1.1  jkunz 			m->m_len = sc->sc_tx_mbuf[t]->m_pkthdr.len;
    648  1.1  jkunz 			m_freem(sc->sc_tx_mbuf[t]);
    649  1.1  jkunz 			sc->sc_tx_mbuf[t] = m;
    650  1.1  jkunz 			if(bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_tx_map[t],
    651  1.1  jkunz 		    	    m, BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0) {
    652  1.1  jkunz 				printf("%s: iee_start: can't load TX DMA map\n",
    653  1.1  jkunz 				    sc->sc_dev.dv_xname);
    654  1.1  jkunz 				m_freem(sc->sc_tx_mbuf[t]);
    655  1.1  jkunz 				t--;
    656  1.1  jkunz 				continue;
    657  1.1  jkunz 			}
    658  1.1  jkunz 		}
    659  1.1  jkunz 		for (n = 0 ; n < sc->sc_tx_map[t]->dm_nsegs ; n++) {
    660  1.1  jkunz 			SC_TBD(sc->sc_next_tbd + n)->tbd_tb_addr =
    661  1.1  jkunz 			    sc->sc_tx_map[t]->dm_segs[n].ds_addr;
    662  1.1  jkunz 			SC_TBD(sc->sc_next_tbd + n)->tbd_size =
    663  1.1  jkunz 			    sc->sc_tx_map[t]->dm_segs[n].ds_len;
    664  1.1  jkunz 			SC_TBD(sc->sc_next_tbd + n)->tbd_link_addr =
    665  1.1  jkunz 			    IEE_PHYS_SHMEM(IEE_TBD_OFF + IEE_TBD_SZ
    666  1.1  jkunz 			    * (sc->sc_next_tbd + n + 1));
    667  1.1  jkunz 		}
    668  1.1  jkunz 		SC_TBD(sc->sc_next_tbd + n - 1)->tbd_size |= IEE_CB_EL;
    669  1.1  jkunz 		bus_dmamap_sync(sc->sc_dmat, sc->sc_tx_map[t], 0,
    670  1.1  jkunz 		    sc->sc_tx_map[t]->dm_mapsize, BUS_DMASYNC_PREWRITE);
    671  1.1  jkunz 		IFQ_POLL(&ifp->if_snd, m);
    672  1.1  jkunz 		if (m == NULL)
    673  1.1  jkunz 			iee_cb_setup(sc, IEE_CB_CMD_TR | IEE_CB_S | IEE_CB_EL
    674  1.1  jkunz 			    | IEE_CB_I);
    675  1.1  jkunz 		else
    676  1.1  jkunz 			iee_cb_setup(sc, IEE_CB_CMD_TR);
    677  1.1  jkunz 		sc->sc_next_tbd += n;
    678  1.1  jkunz #if NBPFILTER > 0
    679  1.1  jkunz 		/* Pass packet to bpf if someone listens. */
    680  1.1  jkunz 		if (ifp->if_bpf)
    681  1.1  jkunz 			bpf_mtap(ifp->if_bpf, sc->sc_tx_mbuf[t]);
    682  1.1  jkunz #endif
    683  1.1  jkunz 	}
    684  1.1  jkunz 	if (t == 0)
    685  1.1  jkunz 		/* No packets got set up for TX. */
    686  1.1  jkunz 		return;
    687  1.1  jkunz 	if (t == IEE_NCB)
    688  1.1  jkunz 		ifp->if_flags |= IFF_OACTIVE;
    689  1.1  jkunz 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, IEE_CB_SZ,
    690  1.1  jkunz 	    IEE_CB_LIST_SZ + IEE_TBD_LIST_SZ, BUS_DMASYNC_PREWRITE);
    691  1.1  jkunz 	(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    692  1.1  jkunz 	return;
    693  1.1  jkunz }
    694  1.1  jkunz 
    695  1.1  jkunz 
    696  1.1  jkunz 
    697  1.1  jkunz /* ioctl routine */
    698  1.1  jkunz int
    699  1.1  jkunz iee_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
    700  1.1  jkunz {
    701  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    702  1.1  jkunz 	int s;
    703  1.1  jkunz 	int err;
    704  1.1  jkunz 
    705  1.1  jkunz 	s = splnet();
    706  1.1  jkunz 	if (cmd == SIOCSIFMEDIA || cmd == SIOCGIFMEDIA)
    707  1.1  jkunz 		return(ifmedia_ioctl(ifp, (struct ifreq *) data,
    708  1.1  jkunz 		    &sc->sc_ifmedia, cmd));
    709  1.1  jkunz 	else {
    710  1.1  jkunz 		err = ether_ioctl(ifp, cmd, data);
    711  1.1  jkunz 		if (err == ENETRESET ||
    712  1.1  jkunz 		    ((ifp->if_flags & IFF_PROMISC) != 0
    713  1.1  jkunz 		    && (sc->sc_cf[8] & IEE_CF_8_PRM) == 0)
    714  1.1  jkunz 		    || ((ifp->if_flags & IFF_PROMISC) == 0
    715  1.1  jkunz 		    && (sc->sc_cf[8] & IEE_CF_8_PRM) != 0)) {
    716  1.1  jkunz 			/* Do multicast setup / toggle promisc mode. */
    717  1.1  jkunz 			if ((ifp->if_flags & IFF_PROMISC) != 0)
    718  1.1  jkunz 				sc->sc_cf[8] |= IEE_CF_8_PRM;
    719  1.1  jkunz 			else
    720  1.1  jkunz 				sc->sc_cf[8] &= ~IEE_CF_8_PRM;
    721  1.1  jkunz 			/* Put new multicast list into the hardware filter. */
    722  1.1  jkunz 			iee_cb_setup(sc, IEE_CB_CMD_MCS | IEE_CB_S | IEE_CB_EL
    723  1.1  jkunz 			    | IEE_CB_I);
    724  1.1  jkunz 			if ((sc->sc_flags & IEE_WANT_MCAST) == 0)
    725  1.1  jkunz 				/* Mcast setup is not defered. */
    726  1.1  jkunz 				(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE);
    727  1.1  jkunz 			err = 0;
    728  1.1  jkunz 		}
    729  1.1  jkunz 	}
    730  1.1  jkunz 	splx(s);
    731  1.1  jkunz 	return(err);
    732  1.1  jkunz }
    733  1.1  jkunz 
    734  1.1  jkunz 
    735  1.1  jkunz 
    736  1.1  jkunz /* init routine */
    737  1.1  jkunz int
    738  1.1  jkunz iee_init(struct ifnet *ifp)
    739  1.1  jkunz {
    740  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    741  1.1  jkunz 	int r;
    742  1.1  jkunz 	int t;
    743  1.1  jkunz 	int n;
    744  1.1  jkunz 	int err;
    745  1.1  jkunz 
    746  1.1  jkunz 	sc->sc_next_cb = 0;
    747  1.1  jkunz 	sc->sc_next_tbd = 0;
    748  1.1  jkunz 	sc->sc_flags &= ~IEE_WANT_MCAST;
    749  1.1  jkunz 	sc->sc_rx_done = 0;
    750  1.1  jkunz 	SC_SCB->scb_crc_err = 0;
    751  1.1  jkunz 	SC_SCB->scb_align_err = 0;
    752  1.1  jkunz 	SC_SCB->scb_resource_err = 0;
    753  1.1  jkunz 	SC_SCB->scb_overrun_err = 0;
    754  1.1  jkunz 	SC_SCB->scb_rcvcdt_err = 0;
    755  1.1  jkunz 	SC_SCB->scb_short_fr_err = 0;
    756  1.1  jkunz 	sc->sc_crc_err = 0;
    757  1.1  jkunz 	sc->sc_align_err = 0;
    758  1.1  jkunz 	sc->sc_resource_err = 0;
    759  1.1  jkunz 	sc->sc_overrun_err = 0;
    760  1.1  jkunz 	sc->sc_rcvcdt_err = 0;
    761  1.1  jkunz 	sc->sc_short_fr_err = 0;
    762  1.1  jkunz 	sc->sc_tx_col = 0;
    763  1.1  jkunz 	sc->sc_rx_err = 0;
    764  1.1  jkunz 	sc->sc_cmd_err = 0;
    765  1.1  jkunz 	/* Create Transmit DMA maps. */
    766  1.1  jkunz 	for (t = 0 ; t < IEE_NCB ; t++) {
    767  1.1  jkunz 		if (sc->sc_tx_map[t] == NULL && bus_dmamap_create(sc->sc_dmat,
    768  1.1  jkunz 		    MCLBYTES, IEE_NTBD, MCLBYTES, 0, BUS_DMA_NOWAIT,
    769  1.1  jkunz 		    &sc->sc_tx_map[t]) != 0) {
    770  1.1  jkunz 			printf("%s: iee_init: can't create TX DMA map\n",
    771  1.1  jkunz 			    sc->sc_dev.dv_xname);
    772  1.1  jkunz 			for (n = 0 ; n < t ; n++)
    773  1.1  jkunz 				bus_dmamap_destroy(sc->sc_dmat,
    774  1.1  jkunz 				    sc->sc_tx_map[n]);
    775  1.1  jkunz 			return(ENOBUFS);
    776  1.1  jkunz 		}
    777  1.1  jkunz 	}
    778  1.1  jkunz 	/* Initialize Receive Frame and Receive Buffer Descriptors */
    779  1.1  jkunz 	err = 0;
    780  1.1  jkunz 	memset(SC_RFD(0), 0, IEE_RFD_LIST_SZ);
    781  1.1  jkunz 	memset(SC_RBD(0), 0, IEE_RBD_LIST_SZ);
    782  1.1  jkunz 	for (r = 0 ; r < IEE_NRFD ; r++) {
    783  1.1  jkunz 		SC_RFD(r)->rfd_cmd = IEE_RFD_SF;
    784  1.1  jkunz 		SC_RFD(r)->rfd_link_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF
    785  1.1  jkunz 		    + IEE_RFD_SZ * ((r + 1) % IEE_NRFD));
    786  1.1  jkunz 
    787  1.1  jkunz 		SC_RBD(r)->rbd_next_rbd = IEE_PHYS_SHMEM(IEE_RBD_OFF
    788  1.1  jkunz 		    + IEE_RBD_SZ * ((r + 1) % IEE_NRFD));
    789  1.1  jkunz 		if (sc->sc_rx_mbuf[r] == NULL) {
    790  1.1  jkunz 			MGETHDR(sc->sc_rx_mbuf[r], M_DONTWAIT, MT_DATA);
    791  1.1  jkunz 			if (sc->sc_rx_mbuf[r] == NULL) {
    792  1.1  jkunz 				printf("%s: iee_init: can't allocate mbuf\n",
    793  1.1  jkunz 				    sc->sc_dev.dv_xname);
    794  1.1  jkunz 				err = 1;
    795  1.1  jkunz 				break;
    796  1.1  jkunz 			}
    797  1.1  jkunz 			MCLAIM(sc->sc_rx_mbuf[r],&sc->sc_ethercom.ec_rx_mowner);
    798  1.1  jkunz 			MCLGET(sc->sc_rx_mbuf[r], M_DONTWAIT);
    799  1.1  jkunz 			if ((sc->sc_rx_mbuf[r]->m_flags & M_EXT) == 0) {
    800  1.1  jkunz 				printf("%s: iee_init: can't allocate mbuf"
    801  1.1  jkunz 				    " cluster\n", sc->sc_dev.dv_xname);
    802  1.1  jkunz 				m_freem(sc->sc_rx_mbuf[r]);
    803  1.1  jkunz 				err = 1;
    804  1.1  jkunz 				break;
    805  1.1  jkunz 			}
    806  1.1  jkunz 		}
    807  1.1  jkunz 		if (sc->sc_rx_map[r] == NULL && bus_dmamap_create(sc->sc_dmat,
    808  1.1  jkunz 		    MCLBYTES, 1, MCLBYTES , 0, BUS_DMA_NOWAIT,
    809  1.1  jkunz 		    &sc->sc_rx_map[r]) != 0) {
    810  1.1  jkunz 				printf("%s: iee_init: can't create RX "
    811  1.1  jkunz 				    "DMA map\n", sc->sc_dev.dv_xname);
    812  1.1  jkunz 				m_freem(sc->sc_rx_mbuf[r]);
    813  1.1  jkunz 				err = 1;
    814  1.1  jkunz 				break;
    815  1.1  jkunz 			}
    816  1.1  jkunz 		if (bus_dmamap_load(sc->sc_dmat, sc->sc_rx_map[r],
    817  1.1  jkunz 		    sc->sc_rx_mbuf[r]->m_ext.ext_buf,
    818  1.1  jkunz 		    sc->sc_rx_mbuf[r]->m_ext.ext_size, NULL,
    819  1.1  jkunz 		    BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
    820  1.1  jkunz 			printf("%s: iee_init: can't load RX DMA map\n",
    821  1.1  jkunz 			    sc->sc_dev.dv_xname);
    822  1.1  jkunz 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[r]);
    823  1.1  jkunz 			m_freem(sc->sc_rx_mbuf[r]);
    824  1.1  jkunz 			err = 1;
    825  1.1  jkunz 			break;
    826  1.1  jkunz 		}
    827  1.1  jkunz 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_map[r], 0,
    828  1.1  jkunz 		    sc->sc_rx_mbuf[r]->m_ext.ext_size, BUS_DMASYNC_PREREAD);
    829  1.1  jkunz 		SC_RBD(r)->rbd_size = sc->sc_rx_map[r]->dm_segs[0].ds_len;
    830  1.1  jkunz 		SC_RBD(r)->rbd_rb_addr= sc->sc_rx_map[r]->dm_segs[0].ds_addr;
    831  1.1  jkunz 	}
    832  1.1  jkunz 	SC_RFD(0)->rfd_rbd_addr = IEE_PHYS_SHMEM(IEE_RBD_OFF);
    833  1.1  jkunz 	if (err != 0) {
    834  1.1  jkunz 		for (n = 0 ; n < r; n++) {
    835  1.1  jkunz 			m_freem(sc->sc_rx_mbuf[n]);
    836  1.1  jkunz 			sc->sc_rx_mbuf[n] = NULL;
    837  1.1  jkunz 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
    838  1.1  jkunz 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
    839  1.1  jkunz 			sc->sc_rx_map[n] = NULL;
    840  1.1  jkunz 		}
    841  1.1  jkunz 		for (n = 0 ; n < t ; n++) {
    842  1.1  jkunz 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
    843  1.1  jkunz 			sc->sc_tx_map[n] = NULL;
    844  1.1  jkunz 		}
    845  1.1  jkunz 		return(ENOBUFS);
    846  1.1  jkunz 	}
    847  1.1  jkunz 
    848  1.1  jkunz 	(sc->sc_iee_reset)(sc);
    849  1.1  jkunz 	iee_cb_setup(sc, IEE_CB_CMD_IAS);
    850  1.1  jkunz 	sc->sc_cf[0] = IEE_CF_0_DEF | IEE_CF_0_PREF;
    851  1.1  jkunz 	sc->sc_cf[1] = IEE_CF_1_DEF;
    852  1.1  jkunz 	sc->sc_cf[2] = IEE_CF_2_DEF;
    853  1.1  jkunz 	sc->sc_cf[3] = IEE_CF_3_ADDRLEN_DEF | IEE_CF_3_NSAI
    854  1.1  jkunz 	    | IEE_CF_3_PREAMLEN_DEF;
    855  1.1  jkunz 	sc->sc_cf[4] = IEE_CF_4_DEF;
    856  1.1  jkunz 	sc->sc_cf[5] = IEE_CF_5_DEF;
    857  1.1  jkunz 	sc->sc_cf[6] = IEE_CF_6_DEF;
    858  1.1  jkunz 	sc->sc_cf[7] = IEE_CF_7_DEF;
    859  1.1  jkunz 	sc->sc_cf[8] = IEE_CF_8_DEF;
    860  1.1  jkunz 	sc->sc_cf[9] = IEE_CF_9_DEF;
    861  1.1  jkunz 	sc->sc_cf[10] = IEE_CF_10_DEF;
    862  1.1  jkunz 	sc->sc_cf[11] = IEE_CF_11_DEF & ~IEE_CF_11_LNGFLD;
    863  1.1  jkunz 	sc->sc_cf[12] = IEE_CF_12_DEF;
    864  1.1  jkunz 	sc->sc_cf[13] = IEE_CF_13_DEF;
    865  1.1  jkunz 	iee_cb_setup(sc, IEE_CB_CMD_CONF | IEE_CB_S | IEE_CB_EL);
    866  1.1  jkunz 	SC_SCB->scb_rfa_addr = IEE_PHYS_SHMEM(IEE_RFD_OFF);
    867  1.1  jkunz 	bus_dmamap_sync(sc->sc_dmat, sc->sc_shmem_map, 0, IEE_SHMEM_MAX,
    868  1.1  jkunz 	    BUS_DMASYNC_PREWRITE);
    869  1.1  jkunz 	(sc->sc_iee_cmd)(sc, IEE_SCB_CUC_EXE | IEE_SCB_RUC_ST);
    870  1.1  jkunz 	/* Issue a Channel Attention to ACK interrupts we may have caused. */
    871  1.1  jkunz 	(sc->sc_iee_cmd)(sc, IEE_SCB_ACK);
    872  1.1  jkunz 
    873  1.1  jkunz 	/* Mark the interface as running and ready to RX/TX packets. */
    874  1.1  jkunz 	ifp->if_flags |= IFF_RUNNING;
    875  1.1  jkunz 	ifp->if_flags &= ~IFF_OACTIVE;
    876  1.1  jkunz 	return(0);
    877  1.1  jkunz }
    878  1.1  jkunz 
    879  1.1  jkunz 
    880  1.1  jkunz 
    881  1.1  jkunz /* stop routine */
    882  1.1  jkunz void
    883  1.1  jkunz iee_stop(struct ifnet *ifp, int disable)
    884  1.1  jkunz {
    885  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    886  1.1  jkunz 	int n;
    887  1.1  jkunz 
    888  1.1  jkunz 	ifp->if_flags &= ~IFF_RUNNING;
    889  1.1  jkunz 	ifp->if_flags |= IFF_OACTIVE;
    890  1.1  jkunz 	ifp->if_timer = 0;
    891  1.1  jkunz 	/* Reset the chip to get it quiet. */
    892  1.1  jkunz 	(sc->sc_iee_reset)(ifp->if_softc);
    893  1.1  jkunz 	/* Issue a Channel Attention to ACK interrupts we may have caused. */
    894  1.1  jkunz 	(sc->sc_iee_cmd)(ifp->if_softc, IEE_SCB_ACK);
    895  1.1  jkunz 	/* Release any dynamically allocated ressources. */
    896  1.1  jkunz 	for (n = 0 ; n < IEE_NCB ; n++) {
    897  1.1  jkunz 		if (sc->sc_tx_map[n] != NULL)
    898  1.1  jkunz 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_map[n]);
    899  1.1  jkunz 		sc->sc_tx_map[n] = NULL;
    900  1.1  jkunz 	}
    901  1.1  jkunz 	for (n = 0 ; n < IEE_NRFD ; n++) {
    902  1.1  jkunz 		if (sc->sc_rx_mbuf[n] != NULL)
    903  1.1  jkunz 			m_freem(sc->sc_rx_mbuf[n]);
    904  1.1  jkunz 		sc->sc_rx_mbuf[n] = NULL;
    905  1.1  jkunz 		if (sc->sc_rx_map[n] != NULL) {
    906  1.1  jkunz 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_map[n]);
    907  1.1  jkunz 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_map[n]);
    908  1.1  jkunz 		}
    909  1.1  jkunz 		sc->sc_rx_map[n] = NULL;
    910  1.1  jkunz 	}
    911  1.1  jkunz 	return;
    912  1.1  jkunz }
    913  1.1  jkunz 
    914  1.1  jkunz 
    915  1.1  jkunz 
    916  1.1  jkunz /* timer routine */
    917  1.1  jkunz void
    918  1.1  jkunz iee_watchdog(struct ifnet *ifp)
    919  1.1  jkunz {
    920  1.1  jkunz 	struct iee_softc *sc = ifp->if_softc;
    921  1.1  jkunz 
    922  1.1  jkunz 	(sc->sc_iee_reset)(sc);
    923  1.1  jkunz 	if (sc->sc_next_tbd != 0)
    924  1.1  jkunz 		printf("%s: iee_watchdog: transmit timeout %d\n",
    925  1.1  jkunz 		    sc->sc_dev.dv_xname, ++sc->sc_tx_timeout);
    926  1.1  jkunz 	else
    927  1.1  jkunz 		printf("%s: iee_watchdog: setup timeout %d\n",
    928  1.1  jkunz 		    sc->sc_dev.dv_xname, ++sc->sc_setup_timeout);
    929  1.1  jkunz 	iee_init(ifp);
    930  1.1  jkunz 	return;
    931  1.1  jkunz }
    932  1.1  jkunz 
    933  1.1  jkunz 
    934  1.1  jkunz 
    935  1.1  jkunz /* routine to release res. */
    936  1.1  jkunz void
    937  1.1  jkunz iee_drain(struct ifnet *ifp)
    938  1.1  jkunz {
    939  1.1  jkunz 	iee_stop(ifp, 0);
    940  1.1  jkunz 	return;
    941  1.1  jkunz }
    942  1.1  jkunz 
    943  1.1  jkunz 
    944  1.1  jkunz 
    945