Home | History | Annotate | Line # | Download | only in ic
hme.c revision 1.4
      1 /*	$NetBSD: hme.c,v 1.4 1999/12/17 14:37:15 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * HME Ethernet module driver.
     41  */
     42 
     43 #define HMEDEBUG
     44 
     45 #include "opt_inet.h"
     46 #include "opt_ccitt.h"
     47 #include "opt_llc.h"
     48 #include "opt_ns.h"
     49 #include "bpfilter.h"
     50 #include "rnd.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/mbuf.h>
     55 #include <sys/syslog.h>
     56 #include <sys/socket.h>
     57 #include <sys/device.h>
     58 #include <sys/malloc.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/errno.h>
     61 #if NRND > 0
     62 #include <sys/rnd.h>
     63 #endif
     64 
     65 #include <net/if.h>
     66 #include <net/if_dl.h>
     67 #include <net/if_ether.h>
     68 #include <net/if_media.h>
     69 
     70 #ifdef INET
     71 #include <netinet/in.h>
     72 #include <netinet/if_inarp.h>
     73 #include <netinet/in_systm.h>
     74 #include <netinet/in_var.h>
     75 #include <netinet/ip.h>
     76 #endif
     77 
     78 #ifdef NS
     79 #include <netns/ns.h>
     80 #include <netns/ns_if.h>
     81 #endif
     82 
     83 #if NBPFILTER > 0
     84 #include <net/bpf.h>
     85 #include <net/bpfdesc.h>
     86 #endif
     87 
     88 #include <dev/mii/mii.h>
     89 #include <dev/mii/miivar.h>
     90 
     91 #include <machine/bus.h>
     92 
     93 #include <dev/ic/hmereg.h>
     94 #include <dev/ic/hmevar.h>
     95 
     96 void		hme_start __P((struct ifnet *));
     97 void		hme_stop __P((struct hme_softc *));
     98 int		hme_ioctl __P((struct ifnet *, u_long, caddr_t));
     99 void		hme_watchdog __P((struct ifnet *));
    100 void		hme_shutdown __P((void *));
    101 void		hme_init __P((struct hme_softc *));
    102 void		hme_meminit __P((struct hme_softc *));
    103 void		hme_mifinit __P((struct hme_softc *));
    104 void		hme_reset __P((struct hme_softc *));
    105 void		hme_setladrf __P((struct hme_softc *));
    106 
    107 /* MII methods & callbacks */
    108 static int	hme_mii_readreg __P((struct device *, int, int));
    109 static void	hme_mii_writereg __P((struct device *, int, int, int));
    110 static void	hme_mii_statchg __P((struct device *));
    111 
    112 int		hme_mediachange __P((struct ifnet *));
    113 void		hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
    114 
    115 struct mbuf	*hme_get __P((struct hme_softc *, int, int));
    116 int		hme_put __P((struct hme_softc *, int, struct mbuf *));
    117 void		hme_read __P((struct hme_softc *, int, int));
    118 int		hme_eint __P((struct hme_softc *, u_int));
    119 int		hme_rint __P((struct hme_softc *));
    120 int		hme_tint __P((struct hme_softc *));
    121 
    122 static int	ether_cmp __P((u_char *, u_char *));
    123 
    124 /* Default buffer copy routines */
    125 void	hme_copytobuf_contig __P((struct hme_softc *, void *, int, int));
    126 void	hme_copyfrombuf_contig __P((struct hme_softc *, void *, int, int));
    127 void	hme_zerobuf_contig __P((struct hme_softc *, int, int));
    128 
    129 
    130 void
    131 hme_config(sc)
    132 	struct hme_softc *sc;
    133 {
    134 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    135 	struct mii_data *mii = &sc->sc_mii;
    136 	bus_dma_segment_t seg;
    137 	bus_size_t size;
    138 	int rseg, error;
    139 
    140 	/*
    141 	 * HME common initialization.
    142 	 *
    143 	 * hme_softc fields that must be initialized by the front-end:
    144 	 *
    145 	 * the bus tag:
    146 	 *	sc_bustag
    147 	 *
    148 	 * the dma bus tag:
    149 	 *	sc_dmatag
    150 	 *
    151 	 * the bus handles:
    152 	 *	sc_seb		(Shared Ethernet Block registers)
    153 	 *	sc_erx		(Receiver Unit registers)
    154 	 *	sc_etx		(Transmitter Unit registers)
    155 	 *	sc_mac		(MAC registers)
    156 	 *	sc_mif		(Managment Interface registers)
    157 	 *
    158 	 * the maximum bus burst size:
    159 	 *	sc_burst
    160 	 *
    161 	 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
    162 	 *	rb_membase, rb_dmabase)
    163 	 *
    164 	 * the local Ethernet address:
    165 	 *	sc_enaddr
    166 	 *
    167 	 */
    168 
    169 	/* Make sure the chip is stopped. */
    170 	hme_stop(sc);
    171 
    172 
    173 	/*
    174 	 * Allocate descriptors and buffers
    175 	 * XXX - do all this differently.. and more configurably,
    176 	 * eg. use things as `dma_load_mbuf()' on transmit,
    177 	 *     and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
    178 	 *     all the time) on the reveiver side.
    179 	 */
    180 #define _HME_NDESC	32
    181 #define _HME_BUFSZ	1536
    182 
    183 	/* Note: the # of descriptors must be a multiple of 16 */
    184 	sc->sc_rb.rb_ntbuf = _HME_NDESC;
    185 	sc->sc_rb.rb_nrbuf = _HME_NDESC;
    186 
    187 	/*
    188 	 * Allocate DMA capable memory
    189 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
    190 	 * take this into account when calculating the size. Note that
    191 	 * the maximum number of descriptors (256) occupies 2048 bytes,
    192 	 * so we allocate that much regardless of _HME_NDESC.
    193 	 */
    194 	size =	2048 +					/* TX descriptors */
    195 		2048 +					/* RX descriptors */
    196 		sc->sc_rb.rb_ntbuf * _HME_BUFSZ +	/* TX buffers */
    197 		sc->sc_rb.rb_nrbuf * _HME_BUFSZ;	/* TX buffers */
    198 	if ((error = bus_dmamem_alloc(sc->sc_dmatag, size,
    199 				      2048, 0,
    200 				      &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    201 		printf("%s: DMA buffer alloc error %d\n",
    202 			sc->sc_dev.dv_xname, error);
    203 	}
    204 	sc->sc_rb.rb_dmabase = seg.ds_addr;
    205 
    206 	/* Map DMA memory in CPU adressable space */
    207 	if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, size,
    208 				    &sc->sc_rb.rb_membase,
    209 				    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    210 		printf("%s: DMA buffer map error %d\n",
    211 			sc->sc_dev.dv_xname, error);
    212 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
    213 		return;
    214 	}
    215 
    216 #if 0
    217 	/*
    218 	 * Install default copy routines if not supplied.
    219 	 */
    220 	if (sc->sc_copytobuf == NULL)
    221 		sc->sc_copytobuf = hme_copytobuf_contig;
    222 
    223 	if (sc->sc_copyfrombuf == NULL)
    224 		sc->sc_copyfrombuf = hme_copyfrombuf_contig;
    225 #endif
    226 
    227 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
    228 
    229 	/* Initialize ifnet structure. */
    230 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
    231 	ifp->if_softc = sc;
    232 	ifp->if_start = hme_start;
    233 	ifp->if_ioctl = hme_ioctl;
    234 	ifp->if_watchdog = hme_watchdog;
    235 	ifp->if_flags =
    236 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
    237 
    238 	/* Initialize ifmedia structures and MII info */
    239 	mii->mii_ifp = ifp;
    240 	mii->mii_readreg = hme_mii_readreg;
    241 	mii->mii_writereg = hme_mii_writereg;
    242 	mii->mii_statchg = hme_mii_statchg;
    243 
    244 	ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
    245 
    246 	hme_mifinit(sc);
    247 
    248 	mii_phy_probe(&sc->sc_dev, mii, 0xffffffff,
    249 			MII_PHY_ANY, MII_OFFSET_ANY);
    250 
    251 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
    252 		/* No PHY attached */
    253 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    254 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
    255 	} else {
    256 		/*
    257 		 * XXX - we can really do the following ONLY if the
    258 		 * phy indeed has the auto negotiation capability!!
    259 		 */
    260 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
    261 	}
    262 
    263 	/* Attach the interface. */
    264 	if_attach(ifp);
    265 	ether_ifattach(ifp, sc->sc_enaddr);
    266 
    267 #if NBPFILTER > 0
    268 	bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
    269 #endif
    270 
    271 	sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
    272 	if (sc->sc_sh == NULL)
    273 		panic("hme_config: can't establish shutdownhook");
    274 
    275 #if 0
    276 	printf("%s: %d receive buffers, %d transmit buffers\n",
    277 	    sc->sc_dev.dv_xname, sc->sc_nrbuf, sc->sc_ntbuf);
    278 	sc->sc_rbufaddr = malloc(sc->sc_nrbuf * sizeof(int), M_DEVBUF,
    279 					M_WAITOK);
    280 	sc->sc_tbufaddr = malloc(sc->sc_ntbuf * sizeof(int), M_DEVBUF,
    281 					M_WAITOK);
    282 #endif
    283 
    284 #if NRND > 0
    285 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
    286 			  RND_TYPE_NET, 0);
    287 #endif
    288 }
    289 
    290 void
    291 hme_reset(sc)
    292 	struct hme_softc *sc;
    293 {
    294 	int s;
    295 
    296 	s = splnet();
    297 	hme_init(sc);
    298 	splx(s);
    299 }
    300 
    301 void
    302 hme_stop(sc)
    303 	struct hme_softc *sc;
    304 {
    305 	bus_space_tag_t t = sc->sc_bustag;
    306 	bus_space_handle_t seb = sc->sc_seb;
    307 	int n;
    308 
    309 	/* Reset transmitter and receiver */
    310 	bus_space_write_4(t, seb, HME_SEBI_RESET,
    311 			  (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
    312 
    313 	for (n = 0; n < 20; n++) {
    314 		u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
    315 		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
    316 			return;
    317 		DELAY(20);
    318 	}
    319 
    320 	printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
    321 }
    322 
    323 void
    324 hme_meminit(sc)
    325 	struct hme_softc *sc;
    326 {
    327 	bus_addr_t txbufdma, rxbufdma;
    328 	bus_addr_t dma;
    329 	caddr_t p;
    330 	unsigned int ntbuf, nrbuf, i;
    331 	struct hme_ring *hr = &sc->sc_rb;
    332 
    333 	p = hr->rb_membase;
    334 	dma = hr->rb_dmabase;
    335 
    336 	ntbuf = hr->rb_ntbuf;
    337 	nrbuf = hr->rb_nrbuf;
    338 
    339 	/*
    340 	 * Allocate transmit descriptors
    341 	 */
    342 	hr->rb_txd = p;
    343 	hr->rb_txddma = dma;
    344 	p += ntbuf * HME_XD_SIZE;
    345 	dma += ntbuf * HME_XD_SIZE;
    346 	/* We have reserved descriptor space until the next 2048 byte boundary.*/
    347 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
    348 	p = (caddr_t)roundup((u_long)p, 2048);
    349 
    350 	/*
    351 	 * Allocate receive descriptors
    352 	 */
    353 	hr->rb_rxd = p;
    354 	hr->rb_rxddma = dma;
    355 	p += nrbuf * HME_XD_SIZE;
    356 	dma += nrbuf * HME_XD_SIZE;
    357 	/* Again move forward to the next 2048 byte boundary.*/
    358 	dma = (bus_addr_t)roundup((u_long)dma, 2048);
    359 	p = (caddr_t)roundup((u_long)p, 2048);
    360 
    361 
    362 	/*
    363 	 * Allocate transmit buffers
    364 	 */
    365 	hr->rb_txbuf = p;
    366 	txbufdma = dma;
    367 	p += ntbuf * _HME_BUFSZ;
    368 	dma += ntbuf * _HME_BUFSZ;
    369 
    370 	/*
    371 	 * Allocate receive buffers
    372 	 */
    373 	hr->rb_rxbuf = p;
    374 	rxbufdma = dma;
    375 	p += nrbuf * _HME_BUFSZ;
    376 	dma += nrbuf * _HME_BUFSZ;
    377 
    378 	/*
    379 	 * Initialize transmit buffer descriptors
    380 	 */
    381 	for (i = 0; i < ntbuf; i++) {
    382 		HME_XD_SETADDR(hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
    383 		HME_XD_SETFLAGS(hr->rb_txd, i, 0);
    384 	}
    385 
    386 	/*
    387 	 * Initialize receive buffer descriptors
    388 	 */
    389 	for (i = 0; i < nrbuf; i++) {
    390 		HME_XD_SETADDR(hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
    391 		HME_XD_SETFLAGS(hr->rb_rxd, i,
    392 				HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
    393 	}
    394 
    395 	hr->rb_tdhead = hr->rb_tdtail = 0;
    396 	hr->rb_td_nbusy = 0;
    397 	hr->rb_rdtail = 0;
    398 }
    399 
    400 /*
    401  * Initialization of interface; set up initialization block
    402  * and transmit/receive descriptor rings.
    403  */
    404 void
    405 hme_init(sc)
    406 	struct hme_softc *sc;
    407 {
    408 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    409 	bus_space_tag_t t = sc->sc_bustag;
    410 	bus_space_handle_t seb = sc->sc_seb;
    411 	bus_space_handle_t etx = sc->sc_etx;
    412 	bus_space_handle_t erx = sc->sc_erx;
    413 	bus_space_handle_t mac = sc->sc_mac;
    414 	bus_space_handle_t mif = sc->sc_mif;
    415 	u_int8_t *ea;
    416 	u_int32_t v;
    417 
    418 	/*
    419 	 * Initialization sequence. The numbered steps below correspond
    420 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
    421 	 * Channel Engine manual (part of the PCIO manual).
    422 	 * See also the STP2002-STQ document from Sun Microsystems.
    423 	 */
    424 
    425 	/* step 1 & 2. Reset the Ethernet Channel */
    426 	hme_stop(sc);
    427 
    428 	/* Re-initialize the MIF */
    429 	hme_mifinit(sc);
    430 
    431 	/* Call MI reset function if any */
    432 	if (sc->sc_hwreset)
    433 		(*sc->sc_hwreset)(sc);
    434 
    435 #if 0
    436 	/* Mask all MIF interrupts, just in case */
    437 	bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
    438 #endif
    439 
    440 	/* step 3. Setup data structures in host memory */
    441 	hme_meminit(sc);
    442 
    443 	/* step 4. TX MAC registers & counters */
    444 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
    445 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
    446 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
    447 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
    448 
    449 	/* Load station MAC address */
    450 	ea = sc->sc_enaddr;
    451 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
    452 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
    453 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
    454 
    455 	/*
    456 	 * Init seed for backoff
    457 	 * (source suggested by manual: low 10 bits of MAC address)
    458 	 */
    459 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
    460 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
    461 
    462 
    463 	/* Note: Accepting power-on default for other MAC registers here.. */
    464 
    465 
    466 	/* step 5. RX MAC registers & counters */
    467 	hme_setladrf(sc);
    468 
    469 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
    470 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
    471 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
    472 
    473 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
    474 
    475 
    476 	/* step 8. Global Configuration & Interrupt Mask */
    477 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
    478 			~(
    479 			  /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
    480 			  HME_SEB_STAT_HOSTTOTX |
    481 			  HME_SEB_STAT_RXTOHOST |
    482 			  HME_SEB_STAT_TXALL |
    483 			  HME_SEB_STAT_TXPERR |
    484 			  HME_SEB_STAT_RCNTEXP |
    485 			  HME_SEB_STAT_ALL_ERRORS ));
    486 
    487 	switch (sc->sc_burst) {
    488 	default:
    489 		v = 0;
    490 		break;
    491 	case 16:
    492 		v = HME_SEB_CFG_BURST16;
    493 		break;
    494 	case 32:
    495 		v = HME_SEB_CFG_BURST32;
    496 		break;
    497 	case 64:
    498 		v = HME_SEB_CFG_BURST64;
    499 		break;
    500 	}
    501 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
    502 
    503 	/* step 9. ETX Configuration: use mostly default values */
    504 
    505 	/* Enable DMA */
    506 	v = bus_space_read_4(t, etx, HME_ETXI_CFG);
    507 	v |= HME_ETX_CFG_DMAENABLE;
    508 	bus_space_write_4(t, etx, HME_ETXI_CFG, v);
    509 
    510 	/* Transmit Descriptor ring size: in increments of 16 */
    511 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1);
    512 
    513 
    514 	/* step 10. ERX Configuration */
    515 	v = bus_space_read_4(t, erx, HME_ERXI_CFG);
    516 
    517 	/* Encode Receive Descriptor ring size: four possible values */
    518 	switch (_HME_NDESC /*XXX*/) {
    519 	case 32:
    520 		v |= HME_ERX_CFG_RINGSIZE32;
    521 		break;
    522 	case 64:
    523 		v |= HME_ERX_CFG_RINGSIZE64;
    524 		break;
    525 	case 128:
    526 		v |= HME_ERX_CFG_RINGSIZE128;
    527 		break;
    528 	case 256:
    529 		v |= HME_ERX_CFG_RINGSIZE256;
    530 		break;
    531 	default:
    532 		printf("hme: invalid Receive Descriptor ring size\n");
    533 		break;
    534 	}
    535 
    536 	/* Enable DMA */
    537 	v |= HME_ERX_CFG_DMAENABLE;
    538 	bus_space_write_4(t, erx, HME_ERXI_CFG, v);
    539 
    540 	/* step 11. XIF Configuration */
    541 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
    542 	v |= HME_MAC_XIF_OE;
    543 	/* If an external transceiver is connected, enable its MII drivers */
    544 	if ((bus_space_read_4(t, mif, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
    545 		v |= HME_MAC_XIF_MIIENABLE;
    546 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
    547 
    548 
    549 	/* step 12. RX_MAC Configuration Register */
    550 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
    551 	v |= HME_MAC_RXCFG_ENABLE;
    552 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
    553 
    554 	/* step 13. TX_MAC Configuration Register */
    555 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
    556 	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
    557 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
    558 
    559 	/* step 14. Issue Transmit Pending command */
    560 
    561 	/* Call MI initialization function if any */
    562 	if (sc->sc_hwinit)
    563 		(*sc->sc_hwinit)(sc);
    564 
    565 	ifp->if_flags |= IFF_RUNNING;
    566 	ifp->if_flags &= ~IFF_OACTIVE;
    567 	ifp->if_timer = 0;
    568 	hme_start(ifp);
    569 }
    570 
    571 /*
    572  * Compare two Ether/802 addresses for equality, inlined and unrolled for
    573  * speed.
    574  */
    575 static __inline__ int
    576 ether_cmp(a, b)
    577 	u_char *a, *b;
    578 {
    579 
    580 	if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
    581 	    a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
    582 		return (0);
    583 	return (1);
    584 }
    585 
    586 
    587 /*
    588  * Routine to copy from mbuf chain to transmit buffer in
    589  * network buffer memory.
    590  * Returns the amount of data copied.
    591  */
    592 int
    593 hme_put(sc, ri, m)
    594 	struct hme_softc *sc;
    595 	int ri;			/* Ring index */
    596 	struct mbuf *m;
    597 {
    598 	struct mbuf *n;
    599 	int len, tlen = 0;
    600 	caddr_t bp;
    601 
    602 	bp = sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
    603 	for (; m; m = n) {
    604 		len = m->m_len;
    605 		if (len == 0) {
    606 			MFREE(m, n);
    607 			continue;
    608 		}
    609 		bcopy(mtod(m, caddr_t), bp, len);
    610 		bp += len;
    611 		tlen += len;
    612 		MFREE(m, n);
    613 	}
    614 	return (tlen);
    615 }
    616 
    617 /*
    618  * Pull data off an interface.
    619  * Len is length of data, with local net header stripped.
    620  * We copy the data into mbufs.  When full cluster sized units are present
    621  * we copy into clusters.
    622  */
    623 struct mbuf *
    624 hme_get(sc, ri, totlen)
    625 	struct hme_softc *sc;
    626 	int ri, totlen;
    627 {
    628 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    629 	struct mbuf *m, *m0, *newm;
    630 	caddr_t bp;
    631 	int len;
    632 
    633 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    634 	if (m0 == 0)
    635 		return (0);
    636 	m0->m_pkthdr.rcvif = ifp;
    637 	m0->m_pkthdr.len = totlen;
    638 	len = MHLEN;
    639 	m = m0;
    640 
    641 	bp = sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
    642 
    643 	while (totlen > 0) {
    644 		if (totlen >= MINCLSIZE) {
    645 			MCLGET(m, M_DONTWAIT);
    646 			if ((m->m_flags & M_EXT) == 0)
    647 				goto bad;
    648 			len = MCLBYTES;
    649 		}
    650 
    651 		if (m == m0) {
    652 			caddr_t newdata = (caddr_t)
    653 			    ALIGN(m->m_data + sizeof(struct ether_header)) -
    654 			    sizeof(struct ether_header);
    655 			len -= newdata - m->m_data;
    656 			m->m_data = newdata;
    657 		}
    658 
    659 		m->m_len = len = min(totlen, len);
    660 		bcopy(bp, mtod(m, caddr_t), len);
    661 		bp += len;
    662 
    663 		totlen -= len;
    664 		if (totlen > 0) {
    665 			MGET(newm, M_DONTWAIT, MT_DATA);
    666 			if (newm == 0)
    667 				goto bad;
    668 			len = MLEN;
    669 			m = m->m_next = newm;
    670 		}
    671 	}
    672 
    673 	return (m0);
    674 
    675 bad:
    676 	m_freem(m0);
    677 	return (0);
    678 }
    679 
    680 /*
    681  * Pass a packet to the higher levels.
    682  */
    683 void
    684 hme_read(sc, ix, len)
    685 	struct hme_softc *sc;
    686 	int ix, len;
    687 {
    688 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    689 	struct mbuf *m;
    690 
    691 	if (len <= sizeof(struct ether_header) ||
    692 	    len > ETHERMTU + sizeof(struct ether_header)) {
    693 #ifdef HMEDEBUG
    694 		printf("%s: invalid packet size %d; dropping\n",
    695 		    sc->sc_dev.dv_xname, len);
    696 #endif
    697 		ifp->if_ierrors++;
    698 		return;
    699 	}
    700 
    701 	/* Pull packet off interface. */
    702 	m = hme_get(sc, ix, len);
    703 	if (m == 0) {
    704 		ifp->if_ierrors++;
    705 		return;
    706 	}
    707 
    708 	ifp->if_ipackets++;
    709 
    710 #if NBPFILTER > 0
    711 	/*
    712 	 * Check if there's a BPF listener on this interface.
    713 	 * If so, hand off the raw packet to BPF.
    714 	 */
    715 	if (ifp->if_bpf) {
    716 		struct ether_header *eh;
    717 
    718 		bpf_mtap(ifp->if_bpf, m);
    719 
    720 		/*
    721 		 * Note that the interface cannot be in promiscuous mode if
    722 		 * there are no BPF listeners.  And if we are in promiscuous
    723 		 * mode, we have to check if this packet is really ours.
    724 		 */
    725 
    726 		/* We assume that the header fit entirely in one mbuf. */
    727 		eh = mtod(m, struct ether_header *);
    728 
    729 		if ((ifp->if_flags & IFF_PROMISC) != 0 &&
    730 		    (eh->ether_dhost[0] & 1) == 0 && /* !mcast and !bcast */
    731 		    ether_cmp(eh->ether_dhost, sc->sc_enaddr) == 0) {
    732 			m_freem(m);
    733 			return;
    734 		}
    735 	}
    736 #endif
    737 
    738 	/* Pass the packet up. */
    739 	(*ifp->if_input)(ifp, m);
    740 }
    741 
    742 void
    743 hme_start(ifp)
    744 	struct ifnet *ifp;
    745 {
    746 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
    747 	caddr_t txd = sc->sc_rb.rb_txd;
    748 	struct mbuf *m;
    749 	unsigned int ri, len;
    750 	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
    751 
    752 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    753 		return;
    754 
    755 	ri = sc->sc_rb.rb_tdhead;
    756 
    757 	for (;;) {
    758 		IF_DEQUEUE(&ifp->if_snd, m);
    759 		if (m == 0)
    760 			break;
    761 
    762 #if NBPFILTER > 0
    763 		/*
    764 		 * If BPF is listening on this interface, let it see the
    765 		 * packet before we commit it to the wire.
    766 		 */
    767 		if (ifp->if_bpf)
    768 			bpf_mtap(ifp->if_bpf, m);
    769 #endif
    770 
    771 		/*
    772 		 * Copy the mbuf chain into the transmit buffer.
    773 		 */
    774 		len = hme_put(sc, ri, m);
    775 
    776 		/*
    777 		 * Initialize transmit registers and start transmission
    778 		 */
    779 		HME_XD_SETFLAGS(txd, ri,
    780 			HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
    781 			HME_XD_ENCODE_TSIZE(len));
    782 
    783 		/*if (sc->sc_rb.rb_td_nbusy <= 0)*/
    784 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
    785 				  HME_ETX_TP_DMAWAKEUP);
    786 
    787 		if (++ri == ntbuf)
    788 			ri = 0;
    789 
    790 		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
    791 			ifp->if_flags |= IFF_OACTIVE;
    792 			break;
    793 		}
    794 	}
    795 
    796 	sc->sc_rb.rb_tdhead = ri;
    797 }
    798 
    799 /*
    800  * Transmit interrupt.
    801  */
    802 int
    803 hme_tint(sc)
    804 	struct hme_softc *sc;
    805 {
    806 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    807 	bus_space_tag_t t = sc->sc_bustag;
    808 	bus_space_handle_t mac = sc->sc_mac;
    809 	unsigned int ri, txflags;
    810 
    811 	/*
    812 	 * Unload collision counters
    813 	 */
    814 	ifp->if_collisions +=
    815 		bus_space_read_4(t, mac, HME_MACI_NCCNT) +
    816 		bus_space_read_4(t, mac, HME_MACI_FCCNT) +
    817 		bus_space_read_4(t, mac, HME_MACI_EXCNT) +
    818 		bus_space_read_4(t, mac, HME_MACI_LTCNT);
    819 
    820 	/*
    821 	 * then clear the hardware counters.
    822 	 */
    823 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
    824 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
    825 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
    826 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
    827 
    828 	/* Fetch current position in the transmit ring */
    829 	ri = sc->sc_rb.rb_tdtail;
    830 
    831 	for (;;) {
    832 		if (sc->sc_rb.rb_td_nbusy <= 0)
    833 			break;
    834 
    835 		txflags = HME_XD_GETFLAGS(sc->sc_rb.rb_txd, ri);
    836 
    837 		if (txflags & HME_XD_OWN)
    838 			break;
    839 
    840 		ifp->if_flags &= ~IFF_OACTIVE;
    841 		ifp->if_opackets++;
    842 
    843 		if (++ri == sc->sc_rb.rb_ntbuf)
    844 			ri = 0;
    845 
    846 		--sc->sc_rb.rb_td_nbusy;
    847 	}
    848 
    849 	/* Update ring */
    850 	sc->sc_rb.rb_tdtail = ri;
    851 
    852 	hme_start(ifp);
    853 
    854 	if (sc->sc_rb.rb_td_nbusy == 0)
    855 		ifp->if_timer = 0;
    856 
    857 	return (1);
    858 }
    859 
    860 /*
    861  * Receive interrupt.
    862  */
    863 int
    864 hme_rint(sc)
    865 	struct hme_softc *sc;
    866 {
    867 	caddr_t xdr = sc->sc_rb.rb_rxd;
    868 	unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
    869 	unsigned int ri, len;
    870 	u_int32_t flags;
    871 
    872 	ri = sc->sc_rb.rb_rdtail;
    873 
    874 	/*
    875 	 * Process all buffers with valid data.
    876 	 */
    877 	for (;;) {
    878 		flags = HME_XD_GETFLAGS(xdr, ri);
    879 		if (flags & HME_XD_OWN)
    880 			break;
    881 
    882 		if (flags & HME_XD_OFL) {
    883 			printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
    884 					sc->sc_dev.dv_xname, ri, flags);
    885 		} else {
    886 			len = HME_XD_DECODE_RSIZE(flags);
    887 			hme_read(sc, ri, len);
    888 		}
    889 
    890 		/* This buffer can be used by the hardware again */
    891 		HME_XD_SETFLAGS(xdr, ri,
    892 				HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
    893 
    894 		if (++ri == nrbuf)
    895 			ri = 0;
    896 	}
    897 
    898 	sc->sc_rb.rb_rdtail = ri;
    899 
    900 	return (1);
    901 }
    902 
    903 int
    904 hme_eint(sc, status)
    905 	struct hme_softc *sc;
    906 	u_int status;
    907 {
    908 	char bits[128];
    909 
    910 	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
    911 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
    912 		return (1);
    913 	}
    914 
    915 	printf("%s: status=%s\n", sc->sc_dev.dv_xname,
    916 		bitmask_snprintf(status, HME_SEB_STAT_BITS, bits,sizeof(bits)));
    917 	return (1);
    918 }
    919 
    920 int
    921 hme_intr(v)
    922 	void *v;
    923 {
    924 	struct hme_softc *sc = (struct hme_softc *)v;
    925 	bus_space_tag_t t = sc->sc_bustag;
    926 	bus_space_handle_t seb = sc->sc_seb;
    927 	u_int32_t status;
    928 	int r = 0;
    929 
    930 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
    931 
    932 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
    933 		r |= hme_eint(sc, status);
    934 
    935 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
    936 		r |= hme_tint(sc);
    937 
    938 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
    939 		r |= hme_rint(sc);
    940 
    941 	return (r);
    942 }
    943 
    944 
    945 void
    946 hme_watchdog(ifp)
    947 	struct ifnet *ifp;
    948 {
    949 	struct hme_softc *sc = ifp->if_softc;
    950 
    951 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
    952 	++ifp->if_oerrors;
    953 
    954 	hme_reset(sc);
    955 }
    956 
    957 /*
    958  * Initialize the MII Management Interface
    959  */
    960 void
    961 hme_mifinit(sc)
    962 	struct hme_softc *sc;
    963 {
    964 	bus_space_tag_t t = sc->sc_bustag;
    965 	bus_space_handle_t mif = sc->sc_mif;
    966 	u_int32_t v;
    967 
    968 	/* Configure the MIF in frame mode */
    969 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
    970 	v &= ~HME_MIF_CFG_BBMODE;
    971 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
    972 }
    973 
    974 /*
    975  * MII interface
    976  */
    977 static int
    978 hme_mii_readreg(self, phy, reg)
    979 	struct device *self;
    980 	int phy, reg;
    981 {
    982 	struct hme_softc *sc = (void *)self;
    983 	bus_space_tag_t t = sc->sc_bustag;
    984 	bus_space_handle_t mif = sc->sc_mif;
    985 	int n;
    986 	u_int32_t v;
    987 
    988 	/* Construct the frame command */
    989 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
    990 	    HME_MIF_FO_TAMSB |
    991 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
    992 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
    993 	    (reg << HME_MIF_FO_REGAD_SHIFT);
    994 
    995 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
    996 	for (n = 0; n < 100; n++) {
    997 		DELAY(1);
    998 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
    999 		if (v & HME_MIF_FO_TALSB)
   1000 			return (v & HME_MIF_FO_DATA);
   1001 	}
   1002 
   1003 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
   1004 	return (0);
   1005 }
   1006 
   1007 static void
   1008 hme_mii_writereg(self, phy, reg, val)
   1009 	struct device *self;
   1010 	int phy, reg, val;
   1011 {
   1012 	struct hme_softc *sc = (void *)self;
   1013 	bus_space_tag_t t = sc->sc_bustag;
   1014 	bus_space_handle_t mif = sc->sc_mif;
   1015 	int n;
   1016 	u_int32_t v;
   1017 
   1018 	/* Construct the frame command */
   1019 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
   1020 	    HME_MIF_FO_TAMSB				|
   1021 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
   1022 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
   1023 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
   1024 	    (val & HME_MIF_FO_DATA);
   1025 
   1026 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
   1027 	for (n = 0; n < 100; n++) {
   1028 		DELAY(1);
   1029 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
   1030 		if (v & HME_MIF_FO_TALSB)
   1031 			return;
   1032 	}
   1033 
   1034 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
   1035 }
   1036 
   1037 static void
   1038 hme_mii_statchg(dev)
   1039 	struct device *dev;
   1040 {
   1041 #ifdef HMEDEBUG
   1042 	struct hme_softc *sc = (void *)dev;
   1043 	if (sc->sc_debug)
   1044 		printf("hme_mii_statchg: status change\n");
   1045 #endif
   1046 }
   1047 
   1048 int
   1049 hme_mediachange(ifp)
   1050 	struct ifnet *ifp;
   1051 {
   1052 	struct hme_softc *sc = ifp->if_softc;
   1053 	struct ifmedia *ifm = &sc->sc_media;
   1054 	int newmedia = ifm->ifm_media;
   1055 	bus_space_tag_t t = sc->sc_bustag;
   1056 	bus_space_handle_t mac = sc->sc_mac;
   1057 	u_int32_t v;
   1058 	int error;
   1059 
   1060 	if (IFM_TYPE(newmedia) != IFM_ETHER)
   1061 		return (EINVAL);
   1062 
   1063 	if ((ifp->if_flags & IFF_UP) == 0)
   1064 		return (0);
   1065 
   1066 	if ((error = mii_mediachg(&sc->sc_mii)) != 0)
   1067 		return (error);
   1068 
   1069 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
   1070 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
   1071 		v |= HME_MAC_TXCFG_FULLDPLX;
   1072 	else
   1073 		v &= ~HME_MAC_TXCFG_FULLDPLX;
   1074 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
   1075 
   1076 	return (0);
   1077 }
   1078 
   1079 void
   1080 hme_mediastatus(ifp, ifmr)
   1081 	struct ifnet *ifp;
   1082 	struct ifmediareq *ifmr;
   1083 {
   1084 	struct hme_softc *sc = ifp->if_softc;
   1085 
   1086 	if ((ifp->if_flags & IFF_UP) == 0)
   1087 		return;
   1088 
   1089 	mii_pollstat(&sc->sc_mii);
   1090 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   1091 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   1092 }
   1093 
   1094 /*
   1095  * Process an ioctl request.
   1096  */
   1097 int
   1098 hme_ioctl(ifp, cmd, data)
   1099 	struct ifnet *ifp;
   1100 	u_long cmd;
   1101 	caddr_t data;
   1102 {
   1103 	struct hme_softc *sc = ifp->if_softc;
   1104 	struct ifaddr *ifa = (struct ifaddr *)data;
   1105 	struct ifreq *ifr = (struct ifreq *)data;
   1106 	int s, error = 0;
   1107 
   1108 	s = splnet();
   1109 
   1110 	switch (cmd) {
   1111 
   1112 	case SIOCSIFADDR:
   1113 		ifp->if_flags |= IFF_UP;
   1114 
   1115 		switch (ifa->ifa_addr->sa_family) {
   1116 #ifdef INET
   1117 		case AF_INET:
   1118 			hme_init(sc);
   1119 			arp_ifinit(ifp, ifa);
   1120 			break;
   1121 #endif
   1122 #ifdef NS
   1123 		case AF_NS:
   1124 		    {
   1125 			struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
   1126 
   1127 			if (ns_nullhost(*ina))
   1128 				ina->x_host =
   1129 				    *(union ns_host *)LLADDR(ifp->if_sadl);
   1130 			else {
   1131 				bcopy(ina->x_host.c_host,
   1132 				    LLADDR(ifp->if_sadl),
   1133 				    sizeof(sc->sc_enaddr));
   1134 			}
   1135 			/* Set new address. */
   1136 			hme_init(sc);
   1137 			break;
   1138 		    }
   1139 #endif
   1140 		default:
   1141 			hme_init(sc);
   1142 			break;
   1143 		}
   1144 		break;
   1145 
   1146 	case SIOCSIFFLAGS:
   1147 		if ((ifp->if_flags & IFF_UP) == 0 &&
   1148 		    (ifp->if_flags & IFF_RUNNING) != 0) {
   1149 			/*
   1150 			 * If interface is marked down and it is running, then
   1151 			 * stop it.
   1152 			 */
   1153 			hme_stop(sc);
   1154 			ifp->if_flags &= ~IFF_RUNNING;
   1155 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
   1156 		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
   1157 			/*
   1158 			 * If interface is marked up and it is stopped, then
   1159 			 * start it.
   1160 			 */
   1161 			hme_init(sc);
   1162 		} else if ((ifp->if_flags & IFF_UP) != 0) {
   1163 			/*
   1164 			 * Reset the interface to pick up changes in any other
   1165 			 * flags that affect hardware registers.
   1166 			 */
   1167 			/*hme_stop(sc);*/
   1168 			hme_init(sc);
   1169 		}
   1170 #ifdef HMEDEBUG
   1171 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
   1172 #endif
   1173 		break;
   1174 
   1175 	case SIOCADDMULTI:
   1176 	case SIOCDELMULTI:
   1177 		error = (cmd == SIOCADDMULTI) ?
   1178 		    ether_addmulti(ifr, &sc->sc_ethercom) :
   1179 		    ether_delmulti(ifr, &sc->sc_ethercom);
   1180 
   1181 		if (error == ENETRESET) {
   1182 			/*
   1183 			 * Multicast list has changed; set the hardware filter
   1184 			 * accordingly.
   1185 			 */
   1186 			hme_setladrf(sc);
   1187 			error = 0;
   1188 		}
   1189 		break;
   1190 
   1191 	case SIOCGIFMEDIA:
   1192 	case SIOCSIFMEDIA:
   1193 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
   1194 		break;
   1195 
   1196 	default:
   1197 		error = EINVAL;
   1198 		break;
   1199 	}
   1200 
   1201 	splx(s);
   1202 	return (error);
   1203 }
   1204 
   1205 void
   1206 hme_shutdown(arg)
   1207 	void *arg;
   1208 {
   1209 
   1210 	hme_stop((struct hme_softc *)arg);
   1211 }
   1212 
   1213 /*
   1214  * Set up the logical address filter.
   1215  */
   1216 void
   1217 hme_setladrf(sc)
   1218 	struct hme_softc *sc;
   1219 {
   1220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1221 	struct ether_multi *enm;
   1222 	struct ether_multistep step;
   1223 	struct ethercom *ec = &sc->sc_ethercom;
   1224 	bus_space_tag_t t = sc->sc_bustag;
   1225 	bus_space_handle_t mac = sc->sc_mac;
   1226 	u_char *cp;
   1227 	u_int32_t crc;
   1228 	u_int32_t hash[4];
   1229 	int len;
   1230 
   1231 	/*
   1232 	 * Set up multicast address filter by passing all multicast addresses
   1233 	 * through a crc generator, and then using the high order 6 bits as an
   1234 	 * index into the 64 bit logical address filter.  The high order bit
   1235 	 * selects the word, while the rest of the bits select the bit within
   1236 	 * the word.
   1237 	 */
   1238 
   1239 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
   1240 		u_int32_t v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
   1241 		v |= HME_MAC_RXCFG_PMISC;
   1242 		bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
   1243 		goto allmulti;
   1244 	}
   1245 
   1246 	/* Clear hash table */
   1247 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
   1248 	ETHER_FIRST_MULTI(step, ec, enm);
   1249 	while (enm != NULL) {
   1250 		if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
   1251 			/*
   1252 			 * We must listen to a range of multicast addresses.
   1253 			 * For now, just accept all multicasts, rather than
   1254 			 * trying to set only those filter bits needed to match
   1255 			 * the range.  (At this time, the only use of address
   1256 			 * ranges is for IP multicast routing, for which the
   1257 			 * range is big enough to require all bits set.)
   1258 			 */
   1259 			goto allmulti;
   1260 		}
   1261 
   1262 		cp = enm->enm_addrlo;
   1263 		crc = 0xffffffff;
   1264 		for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
   1265 			int octet = *cp++;
   1266 			int i;
   1267 
   1268 #define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
   1269 			for (i = 0; i < 8; i++) {
   1270 				if ((crc & 1) ^ (octet & 1)) {
   1271 					crc >>= 1;
   1272 					crc ^= MC_POLY_LE;
   1273 				} else {
   1274 					crc >>= 1;
   1275 				}
   1276 				octet >>= 1;
   1277 			}
   1278 		}
   1279 		/* Just want the 6 most significant bits. */
   1280 		crc >>= 26;
   1281 
   1282 		/* Set the corresponding bit in the filter. */
   1283 		hash[crc >> 4] |= 1 << (crc & 0xf);
   1284 
   1285 		ETHER_NEXT_MULTI(step, enm);
   1286 	}
   1287 
   1288 	/* Now load the hash table onto the chip */
   1289 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
   1290 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
   1291 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
   1292 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
   1293 
   1294 	ifp->if_flags &= ~IFF_ALLMULTI;
   1295 	return;
   1296 
   1297 allmulti:
   1298 	ifp->if_flags |= IFF_ALLMULTI;
   1299 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, 0xffff);
   1300 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, 0xffff);
   1301 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, 0xffff);
   1302 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, 0xffff);
   1303 }
   1304 
   1305 /*
   1306  * Routines for accessing the transmit and receive buffers.
   1307  * The various CPU and adapter configurations supported by this
   1308  * driver require three different access methods for buffers
   1309  * and descriptors:
   1310  *	(1) contig (contiguous data; no padding),
   1311  *	(2) gap2 (two bytes of data followed by two bytes of padding),
   1312  *	(3) gap16 (16 bytes of data followed by 16 bytes of padding).
   1313  */
   1314 
   1315 #if 0
   1316 /*
   1317  * contig: contiguous data with no padding.
   1318  *
   1319  * Buffers may have any alignment.
   1320  */
   1321 
   1322 void
   1323 hme_copytobuf_contig(sc, from, ri, len)
   1324 	struct hme_softc *sc;
   1325 	void *from;
   1326 	int ri, len;
   1327 {
   1328 	volatile caddr_t buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
   1329 
   1330 	/*
   1331 	 * Just call bcopy() to do the work.
   1332 	 */
   1333 	bcopy(from, buf, len);
   1334 }
   1335 
   1336 void
   1337 hme_copyfrombuf_contig(sc, to, boff, len)
   1338 	struct hme_softc *sc;
   1339 	void *to;
   1340 	int boff, len;
   1341 {
   1342 	volatile caddr_t buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
   1343 
   1344 	/*
   1345 	 * Just call bcopy() to do the work.
   1346 	 */
   1347 	bcopy(buf, to, len);
   1348 }
   1349 #endif
   1350