Home | History | Annotate | Line # | Download | only in ic
hme.c revision 1.2
      1 /*	$NetBSD: hme.c,v 1.2 1999/12/14 23:58:15 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * HME Ethernet module driver.
     41  */
     42 
     43 #define HMEDEBUG
     44 
     45 #include "opt_inet.h"
     46 #include "opt_ccitt.h"
     47 #include "opt_llc.h"
     48 #include "opt_ns.h"
     49 #include "bpfilter.h"
     50 #include "rnd.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/mbuf.h>
     55 #include <sys/syslog.h>
     56 #include <sys/socket.h>
     57 #include <sys/device.h>
     58 #include <sys/malloc.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/errno.h>
     61 #if NRND > 0
     62 #include <sys/rnd.h>
     63 #endif
     64 
     65 #include <net/if.h>
     66 #include <net/if_dl.h>
     67 #include <net/if_ether.h>
     68 #include <net/if_media.h>
     69 
     70 #ifdef INET
     71 #include <netinet/in.h>
     72 #include <netinet/if_inarp.h>
     73 #include <netinet/in_systm.h>
     74 #include <netinet/in_var.h>
     75 #include <netinet/ip.h>
     76 #endif
     77 
     78 #ifdef NS
     79 #include <netns/ns.h>
     80 #include <netns/ns_if.h>
     81 #endif
     82 
     83 #if NBPFILTER > 0
     84 #include <net/bpf.h>
     85 #include <net/bpfdesc.h>
     86 #endif
     87 
     88 #include <dev/mii/mii.h>
     89 #include <dev/mii/miivar.h>
     90 
     91 #include <machine/bus.h>
     92 
     93 #include <dev/ic/hmereg.h>
     94 #include <dev/ic/hmevar.h>
     95 
     96 void		hme_start __P((struct ifnet *));
     97 void		hme_stop __P((struct hme_softc *));
     98 int		hme_ioctl __P((struct ifnet *, u_long, caddr_t));
     99 void		hme_watchdog __P((struct ifnet *));
    100 void		hme_shutdown __P((void *));
    101 void		hme_init __P((struct hme_softc *));
    102 void		hme_meminit __P((struct hme_softc *));
    103 void		hme_reset __P((struct hme_softc *));
    104 void		hme_setladrf __P((struct hme_softc *));
    105 
    106 /* MII methods & callbacks */
    107 static int	hme_mii_readreg __P((struct device *, int, int));
    108 static void	hme_mii_writereg __P((struct device *, int, int, int));
    109 static void	hme_mii_statchg __P((struct device *));
    110 
    111 int		hme_mediachange __P((struct ifnet *));
    112 void		hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
    113 
    114 struct mbuf	*hme_get __P((struct hme_softc *, int, int));
    115 int		hme_put __P((struct hme_softc *, int, struct mbuf *));
    116 void		hme_read __P((struct hme_softc *, int, int));
    117 int		hme_eint __P((struct hme_softc *, u_int));
    118 int		hme_rint __P((struct hme_softc *));
    119 int		hme_tint __P((struct hme_softc *));
    120 
    121 static int	ether_cmp __P((u_char *, u_char *));
    122 
    123 /* Default buffer copy routines */
    124 void	hme_copytobuf_contig __P((struct hme_softc *, void *, int, int));
    125 void	hme_copyfrombuf_contig __P((struct hme_softc *, void *, int, int));
    126 void	hme_zerobuf_contig __P((struct hme_softc *, int, int));
    127 
    128 
    129 void
    130 hme_config(sc)
    131 	struct hme_softc *sc;
    132 {
    133 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    134 	struct mii_data *mii = &sc->sc_mii;
    135 	bus_dma_segment_t seg;
    136 	bus_size_t size;
    137 	int rseg, error;
    138 
    139 	/*
    140 	 * HME common initialization.
    141 	 *
    142 	 * hme_softc fields that must be initialized by the front-end:
    143 	 *
    144 	 * the bus tag:
    145 	 *	sc_bustag
    146 	 *
    147 	 * the dma bus tag:
    148 	 *	sc_dmatag
    149 	 *
    150 	 * the bus handles:
    151 	 *	sc_seb		(Shared Ethernet Block registers)
    152 	 *	sc_erx		(Receiver Unit registers)
    153 	 *	sc_etx		(Transmitter Unit registers)
    154 	 *	sc_mac		(MAC registers)
    155 	 *	sc_mif		(Managment Interface registers)
    156 	 *
    157 	 * the maximum bus burst size:
    158 	 *	sc_burst
    159 	 *
    160 	 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
    161 	 *	rb_membase, rb_dmabase)
    162 	 *
    163 	 * the local Ethernet address:
    164 	 *	sc_enaddr
    165 	 *
    166 	 */
    167 
    168 	/* Make sure the chip is stopped. */
    169 	hme_stop(sc);
    170 
    171 
    172 	/*
    173 	 * Allocate descriptors and buffers
    174 	 * XXX - do all this differently.. and more configurably,
    175 	 * eg. use things as `dma_load_mbuf()' on transmit,
    176 	 *     and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
    177 	 *     all the time) on the reveiver side.
    178 	 */
    179 #define _HME_NDESC	32
    180 #define _HME_BUFSZ	1536
    181 
    182 	/* Note: the # of descriptors must be a multiple of 16 */
    183 	sc->sc_rb.rb_ntbuf = _HME_NDESC;
    184 	sc->sc_rb.rb_nrbuf = _HME_NDESC;
    185 
    186 	/*
    187 	 * Allocate DMA capable memory
    188 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
    189 	 * take this into account when calculating the size. Note that
    190 	 * the maximum number of descriptors (256) occupies 2048 bytes,
    191 	 * so we allocate that much regardless of _HME_NDESC.
    192 	 */
    193 	size =	2048 +					/* TX descriptors */
    194 		2048 +					/* RX descriptors */
    195 		sc->sc_rb.rb_ntbuf * _HME_BUFSZ +	/* TX buffers */
    196 		sc->sc_rb.rb_nrbuf * _HME_BUFSZ;	/* TX buffers */
    197 	if ((error = bus_dmamem_alloc(sc->sc_dmatag, size,
    198 				      2048, 0,
    199 				      &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    200 		printf("%s: DMA buffer alloc error %d\n",
    201 			sc->sc_dev.dv_xname, error);
    202 	}
    203 	sc->sc_rb.rb_dmabase = seg.ds_addr;
    204 
    205 	/* Map DMA memory in CPU adressable space */
    206 	if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, size,
    207 				    &sc->sc_rb.rb_membase,
    208 				    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    209 		printf("%s: DMA buffer map error %d\n",
    210 			sc->sc_dev.dv_xname, error);
    211 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
    212 		return;
    213 	}
    214 
    215 #if 0
    216 	/*
    217 	 * Install default copy routines if not supplied.
    218 	 */
    219 	if (sc->sc_copytobuf == NULL)
    220 		sc->sc_copytobuf = hme_copytobuf_contig;
    221 
    222 	if (sc->sc_copyfrombuf == NULL)
    223 		sc->sc_copyfrombuf = hme_copyfrombuf_contig;
    224 #endif
    225 
    226 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
    227 
    228 	/* Initialize ifnet structure. */
    229 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
    230 	ifp->if_softc = sc;
    231 	ifp->if_start = hme_start;
    232 	ifp->if_ioctl = hme_ioctl;
    233 	ifp->if_watchdog = hme_watchdog;
    234 	ifp->if_flags =
    235 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
    236 
    237 	/* Initialize ifmedia structures and MII info */
    238 	mii->mii_ifp = ifp;
    239 	mii->mii_readreg = hme_mii_readreg;
    240 	mii->mii_writereg = hme_mii_writereg;
    241 	mii->mii_statchg = hme_mii_statchg;
    242 
    243 	ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
    244 
    245 	mii_phy_probe(&sc->sc_dev, mii, 0xffffffff,
    246 			MII_PHY_ANY, MII_OFFSET_ANY);
    247 
    248 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
    249 		/* No PHY attached */
    250 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    251 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
    252 	} else {
    253 		/*
    254 		 * XXX - we can really do the following ONLY if the
    255 		 * phy indeed has the auto negotiation capability!!
    256 		 */
    257 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
    258 	}
    259 
    260 	/* Attach the interface. */
    261 	if_attach(ifp);
    262 	ether_ifattach(ifp, sc->sc_enaddr);
    263 
    264 #if NBPFILTER > 0
    265 	bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
    266 #endif
    267 
    268 	sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
    269 	if (sc->sc_sh == NULL)
    270 		panic("hme_config: can't establish shutdownhook");
    271 
    272 #if 0
    273 	printf("%s: %d receive buffers, %d transmit buffers\n",
    274 	    sc->sc_dev.dv_xname, sc->sc_nrbuf, sc->sc_ntbuf);
    275 	sc->sc_rbufaddr = malloc(sc->sc_nrbuf * sizeof(int), M_DEVBUF,
    276 					M_WAITOK);
    277 	sc->sc_tbufaddr = malloc(sc->sc_ntbuf * sizeof(int), M_DEVBUF,
    278 					M_WAITOK);
    279 #endif
    280 
    281 #if NRND > 0
    282 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
    283 			  RND_TYPE_NET, 0);
    284 #endif
    285 }
    286 
    287 void
    288 hme_reset(sc)
    289 	struct hme_softc *sc;
    290 {
    291 	int s;
    292 
    293 	s = splnet();
    294 	hme_init(sc);
    295 	splx(s);
    296 }
    297 
    298 void
    299 hme_stop(sc)
    300 	struct hme_softc *sc;
    301 {
    302 	bus_space_tag_t t = sc->sc_bustag;
    303 	bus_space_handle_t seb = sc->sc_seb;
    304 	int n;
    305 
    306 	/* Reset transmitter and receiver */
    307 	bus_space_write_4(t, seb, HME_SEBI_RESET,
    308 			  (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
    309 
    310 	for (n = 0; n < 20; n++) {
    311 		u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
    312 		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
    313 			return;
    314 		DELAY(20);
    315 	}
    316 
    317 	printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
    318 }
    319 
    320 void
    321 hme_meminit(sc)
    322 	struct hme_softc *sc;
    323 {
    324 	bus_addr_t txbufdma, rxbufdma;
    325 	bus_addr_t dma;
    326 	caddr_t p;
    327 	unsigned int ntbuf, nrbuf, i;
    328 	struct hme_ring *hr = &sc->sc_rb;
    329 
    330 	p = hr->rb_membase;
    331 	dma = hr->rb_dmabase;
    332 
    333 	ntbuf = hr->rb_ntbuf;
    334 	nrbuf = hr->rb_nrbuf;
    335 
    336 	/*
    337 	 * Allocate transmit descriptors
    338 	 */
    339 	hr->rb_txd = p;
    340 	hr->rb_txddma = dma;
    341 	p += ntbuf * HME_XD_SIZE;
    342 	dma += ntbuf * HME_XD_SIZE;
    343 
    344 	/*
    345 	 * Allocate receive descriptors
    346 	 * Buffer descriptors must be aligned on a 2048 byte boundary.
    347 	 */
    348 	dma = (bus_addr_t)roundup((long)dma, 2048);
    349 	p = (caddr_t)roundup((long)p, 2048);
    350 	hr->rb_rxd = p;
    351 	hr->rb_rxddma = dma;
    352 	p += nrbuf * HME_XD_SIZE;
    353 	dma += nrbuf * HME_XD_SIZE;
    354 
    355 
    356 	/*
    357 	 * Allocate transmit buffers
    358 	 */
    359 	hr->rb_txbuf = p;
    360 	txbufdma = dma;
    361 	p += ntbuf * _HME_BUFSZ;
    362 	dma += ntbuf * _HME_BUFSZ;
    363 
    364 	/*
    365 	 * Allocate receive buffers
    366 	 */
    367 	hr->rb_rxbuf = p;
    368 	rxbufdma = dma;
    369 	p += nrbuf * _HME_BUFSZ;
    370 	dma += nrbuf * _HME_BUFSZ;
    371 
    372 	/*
    373 	 * Initialize transmit buffer descriptors
    374 	 */
    375 	for (i = 0; i < ntbuf; i++) {
    376 		HME_XD_SETADDR(hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
    377 		HME_XD_SETFLAGS(hr->rb_txd, i, 0);
    378 	}
    379 
    380 	/*
    381 	 * Initialize receive buffer descriptors
    382 	 */
    383 	for (i = 0; i < nrbuf; i++) {
    384 		HME_XD_SETADDR(hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
    385 		HME_XD_SETFLAGS(hr->rb_rxd, i,
    386 				HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
    387 	}
    388 
    389 	hr->rb_tdhead = hr->rb_tdtail = 0;
    390 	hr->rb_td_nbusy = 0;
    391 	hr->rb_rdtail = 0;
    392 }
    393 
    394 /*
    395  * Initialization of interface; set up initialization block
    396  * and transmit/receive descriptor rings.
    397  */
    398 void
    399 hme_init(sc)
    400 	struct hme_softc *sc;
    401 {
    402 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    403 	bus_space_tag_t t = sc->sc_bustag;
    404 	bus_space_handle_t seb = sc->sc_seb;
    405 	bus_space_handle_t etx = sc->sc_etx;
    406 	bus_space_handle_t erx = sc->sc_erx;
    407 	bus_space_handle_t mac = sc->sc_mac;
    408 	bus_space_handle_t mif = sc->sc_mif;
    409 	u_int8_t *ea;
    410 	u_int32_t v;
    411 
    412 	/*
    413 	 * Initialization sequence. The numbered steps below correspond
    414 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
    415 	 * Channel Engine manual (part of the PCIO manual).
    416 	 * See also the STP2002-STQ document from Sun Microsystems.
    417 	 */
    418 
    419 	/* step 1 & 2. Reset the Ethernet Channel */
    420 	hme_stop(sc);
    421 
    422 	/* Call MI reset function if any */
    423 	if (sc->sc_hwreset)
    424 		(*sc->sc_hwreset)(sc);
    425 
    426 #if 0
    427 	/* Mask all MIF interrupts, just in case */
    428 	bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
    429 #endif
    430 
    431 	/* step 3. Setup data structures in host memory */
    432 	hme_meminit(sc);
    433 
    434 	/* step 4. TX MAC registers & counters */
    435 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
    436 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
    437 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
    438 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
    439 
    440 	/* Load station MAC address */
    441 	ea = sc->sc_enaddr;
    442 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
    443 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
    444 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
    445 
    446 	/*
    447 	 * Init seed for backoff
    448 	 * (source suggested by manual: low 10 bits of MAC address)
    449 	 */
    450 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
    451 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
    452 
    453 
    454 	/* Note: Accepting power-on default for other MAC registers here.. */
    455 
    456 
    457 	/* step 5. RX MAC registers & counters */
    458 	hme_setladrf(sc);
    459 
    460 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
    461 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
    462 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
    463 
    464 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
    465 
    466 
    467 	/* step 8. Global Configuration & Interrupt Mask */
    468 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
    469 			~(
    470 			  /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
    471 			  HME_SEB_STAT_HOSTTOTX |
    472 			  HME_SEB_STAT_RXTOHOST |
    473 			  HME_SEB_STAT_TXALL |
    474 			  HME_SEB_STAT_TXPERR |
    475 			  HME_SEB_STAT_RCNTEXP |
    476 			  HME_SEB_STAT_ALL_ERRORS ));
    477 
    478 	switch (sc->sc_burst) {
    479 	default:
    480 		v = 0;
    481 		break;
    482 	case 16:
    483 		v = HME_SEB_CFG_BURST16;
    484 		break;
    485 	case 32:
    486 		v = HME_SEB_CFG_BURST32;
    487 		break;
    488 	case 64:
    489 		v = HME_SEB_CFG_BURST64;
    490 		break;
    491 	}
    492 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
    493 
    494 	/* step 9. ETX Configuration: use mostly default values */
    495 
    496 	/* Enable DMA */
    497 	v = bus_space_read_4(t, etx, HME_ETXI_CFG);
    498 	v |= HME_ETX_CFG_DMAENABLE;
    499 	bus_space_write_4(t, etx, HME_ETXI_CFG, v);
    500 
    501 	/* Descriptor ring size: in increments of 16 */
    502 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16);
    503 
    504 
    505 	/* step 10. ERX Configuration: use default values; enable DMA */
    506 	v = bus_space_read_4(t, erx, HME_ERXI_CFG);
    507 	v |= HME_ERX_CFG_DMAENABLE;
    508 	bus_space_write_4(t, erx, HME_ERXI_CFG, v);
    509 
    510 	/* step 11. XIF Configuration */
    511 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
    512 	v |= HME_MAC_XIF_OE;
    513 	/* If an external transceiver is connected, disable MII drivers */
    514 	if ((bus_space_read_4(t, mif, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
    515 		v |= HME_MAC_XIF_MIIDISAB;
    516 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
    517 
    518 
    519 	/* step 12. RX_MAC Configuration Register */
    520 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
    521 	v |= HME_MAC_RXCFG_ENABLE;
    522 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
    523 
    524 	/* step 13. TX_MAC Configuration Register */
    525 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
    526 	v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
    527 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
    528 
    529 	/* step 14. Issue Transmit Pending command */
    530 
    531 	/*
    532 	 * Put MIF in frame mode
    533 	 * XXX - do bit-bang mode later
    534 	 */
    535 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
    536 	v &= ~HME_MIF_CFG_BBMODE;
    537 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
    538 
    539 	/* Call MI initialization function if any */
    540 	if (sc->sc_hwinit)
    541 		(*sc->sc_hwinit)(sc);
    542 
    543 	ifp->if_flags |= IFF_RUNNING;
    544 	ifp->if_flags &= ~IFF_OACTIVE;
    545 	ifp->if_timer = 0;
    546 	hme_start(ifp);
    547 }
    548 
    549 /*
    550  * Compare two Ether/802 addresses for equality, inlined and unrolled for
    551  * speed.
    552  */
    553 static __inline__ int
    554 ether_cmp(a, b)
    555 	u_char *a, *b;
    556 {
    557 
    558 	if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
    559 	    a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
    560 		return (0);
    561 	return (1);
    562 }
    563 
    564 
    565 /*
    566  * Routine to copy from mbuf chain to transmit buffer in
    567  * network buffer memory.
    568  * Returns the amount of data copied.
    569  */
    570 int
    571 hme_put(sc, ri, m)
    572 	struct hme_softc *sc;
    573 	int ri;			/* Ring index */
    574 	struct mbuf *m;
    575 {
    576 	struct mbuf *n;
    577 	int len, tlen = 0;
    578 	caddr_t bp;
    579 
    580 	bp = sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
    581 	for (; m; m = n) {
    582 		len = m->m_len;
    583 		if (len == 0) {
    584 			MFREE(m, n);
    585 			continue;
    586 		}
    587 		bcopy(mtod(m, caddr_t), bp, len);
    588 		bp += len;
    589 		tlen += len;
    590 		MFREE(m, n);
    591 	}
    592 	return (tlen);
    593 }
    594 
    595 /*
    596  * Pull data off an interface.
    597  * Len is length of data, with local net header stripped.
    598  * We copy the data into mbufs.  When full cluster sized units are present
    599  * we copy into clusters.
    600  */
    601 struct mbuf *
    602 hme_get(sc, ri, totlen)
    603 	struct hme_softc *sc;
    604 	int ri, totlen;
    605 {
    606 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    607 	struct mbuf *m, *m0, *newm;
    608 	caddr_t bp;
    609 	int len;
    610 
    611 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    612 	if (m0 == 0)
    613 		return (0);
    614 	m0->m_pkthdr.rcvif = ifp;
    615 	m0->m_pkthdr.len = totlen;
    616 	len = MHLEN;
    617 	m = m0;
    618 
    619 	bp = sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
    620 
    621 	while (totlen > 0) {
    622 		if (totlen >= MINCLSIZE) {
    623 			MCLGET(m, M_DONTWAIT);
    624 			if ((m->m_flags & M_EXT) == 0)
    625 				goto bad;
    626 			len = MCLBYTES;
    627 		}
    628 
    629 		if (m == m0) {
    630 			caddr_t newdata = (caddr_t)
    631 			    ALIGN(m->m_data + sizeof(struct ether_header)) -
    632 			    sizeof(struct ether_header);
    633 			len -= newdata - m->m_data;
    634 			m->m_data = newdata;
    635 		}
    636 
    637 		m->m_len = len = min(totlen, len);
    638 		bcopy(bp, mtod(m, caddr_t), len);
    639 		bp += len;
    640 
    641 		totlen -= len;
    642 		if (totlen > 0) {
    643 			MGET(newm, M_DONTWAIT, MT_DATA);
    644 			if (newm == 0)
    645 				goto bad;
    646 			len = MLEN;
    647 			m = m->m_next = newm;
    648 		}
    649 	}
    650 
    651 	return (m0);
    652 
    653 bad:
    654 	m_freem(m0);
    655 	return (0);
    656 }
    657 
    658 /*
    659  * Pass a packet to the higher levels.
    660  */
    661 void
    662 hme_read(sc, ix, len)
    663 	struct hme_softc *sc;
    664 	int ix, len;
    665 {
    666 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    667 	struct mbuf *m;
    668 
    669 	if (len <= sizeof(struct ether_header) ||
    670 	    len > ETHERMTU + sizeof(struct ether_header)) {
    671 #ifdef HMEDEBUG
    672 		printf("%s: invalid packet size %d; dropping\n",
    673 		    sc->sc_dev.dv_xname, len);
    674 #endif
    675 		ifp->if_ierrors++;
    676 		return;
    677 	}
    678 
    679 	/* Pull packet off interface. */
    680 	m = hme_get(sc, ix, len);
    681 	if (m == 0) {
    682 		ifp->if_ierrors++;
    683 		return;
    684 	}
    685 
    686 	ifp->if_ipackets++;
    687 
    688 #if NBPFILTER > 0
    689 	/*
    690 	 * Check if there's a BPF listener on this interface.
    691 	 * If so, hand off the raw packet to BPF.
    692 	 */
    693 	if (ifp->if_bpf) {
    694 		struct ether_header *eh;
    695 
    696 		bpf_mtap(ifp->if_bpf, m);
    697 
    698 		/*
    699 		 * Note that the interface cannot be in promiscuous mode if
    700 		 * there are no BPF listeners.  And if we are in promiscuous
    701 		 * mode, we have to check if this packet is really ours.
    702 		 */
    703 
    704 		/* We assume that the header fit entirely in one mbuf. */
    705 		eh = mtod(m, struct ether_header *);
    706 
    707 		if ((ifp->if_flags & IFF_PROMISC) != 0 &&
    708 		    (eh->ether_dhost[0] & 1) == 0 && /* !mcast and !bcast */
    709 		    ether_cmp(eh->ether_dhost, sc->sc_enaddr)) {
    710 			m_freem(m);
    711 			return;
    712 		}
    713 	}
    714 #endif
    715 
    716 	/* Pass the packet up. */
    717 	(*ifp->if_input)(ifp, m);
    718 }
    719 
    720 void
    721 hme_start(ifp)
    722 	struct ifnet *ifp;
    723 {
    724 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
    725 	caddr_t txd = sc->sc_rb.rb_txd;
    726 	struct mbuf *m;
    727 	unsigned int ri, len;
    728 	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
    729 
    730 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    731 		return;
    732 
    733 	ri = sc->sc_rb.rb_tdhead;
    734 
    735 	for (;;) {
    736 		IF_DEQUEUE(&ifp->if_snd, m);
    737 		if (m == 0)
    738 			break;
    739 
    740 #if NBPFILTER > 0
    741 		/*
    742 		 * If BPF is listening on this interface, let it see the
    743 		 * packet before we commit it to the wire.
    744 		 */
    745 		if (ifp->if_bpf)
    746 			bpf_mtap(ifp->if_bpf, m);
    747 #endif
    748 
    749 		/*
    750 		 * Copy the mbuf chain into the transmit buffer.
    751 		 */
    752 		len = hme_put(sc, ri, m);
    753 
    754 		/*
    755 		 * Initialize transmit registers and start transmission
    756 		 */
    757 		HME_XD_SETFLAGS(txd, ri,
    758 			HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
    759 			HME_XD_ENCODE_TSIZE(len));
    760 
    761 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
    762 				  HME_ETX_TP_DMAWAKEUP);
    763 
    764 		if (++ri == ntbuf)
    765 			ri = 0;
    766 
    767 		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
    768 			ifp->if_flags |= IFF_OACTIVE;
    769 			break;
    770 		}
    771 	}
    772 
    773 	sc->sc_rb.rb_tdhead = ri;
    774 }
    775 
    776 /*
    777  * Transmit interrupt.
    778  */
    779 int
    780 hme_tint(sc)
    781 	struct hme_softc *sc;
    782 {
    783 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    784 	bus_space_tag_t t = sc->sc_bustag;
    785 	bus_space_handle_t mac = sc->sc_mac;
    786 	unsigned int ri, txflags;
    787 
    788 	/*
    789 	 * Unload collision counters
    790 	 */
    791 	ifp->if_collisions +=
    792 		bus_space_read_4(t, mac, HME_MACI_NCCNT) +
    793 		bus_space_read_4(t, mac, HME_MACI_FCCNT) +
    794 		bus_space_read_4(t, mac, HME_MACI_EXCNT) +
    795 		bus_space_read_4(t, mac, HME_MACI_LTCNT);
    796 
    797 	/*
    798 	 * then clear the hardware counters.
    799 	 */
    800 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
    801 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
    802 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
    803 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
    804 
    805 	/* Fetch current position in the transmit ring */
    806 	ri = sc->sc_rb.rb_tdtail;
    807 
    808 	for (;;) {
    809 		if (sc->sc_rb.rb_td_nbusy <= 0)
    810 			break;
    811 
    812 		txflags = HME_XD_GETFLAGS(sc->sc_rb.rb_txd, ri);
    813 
    814 		if (txflags & HME_XD_OWN)
    815 			break;
    816 
    817 		ifp->if_flags &= ~IFF_OACTIVE;
    818 		ifp->if_opackets++;
    819 
    820 		if (++ri == sc->sc_rb.rb_nrbuf)
    821 			ri = 0;
    822 
    823 		--sc->sc_rb.rb_td_nbusy;
    824 	}
    825 
    826 	sc->sc_rb.rb_tdtail = ri;
    827 
    828 	hme_start(ifp);
    829 
    830 	if (sc->sc_rb.rb_td_nbusy == 0)
    831 		ifp->if_timer = 0;
    832 
    833 	return (1);
    834 }
    835 
    836 /*
    837  * Receive interrupt.
    838  */
    839 int
    840 hme_rint(sc)
    841 	struct hme_softc *sc;
    842 {
    843 	caddr_t xdr = sc->sc_rb.rb_rxd;
    844 	unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
    845 	unsigned int ri, len;
    846 	u_int32_t flags;
    847 
    848 	ri = sc->sc_rb.rb_rdtail;
    849 
    850 	/*
    851 	 * Process all buffers with valid data.
    852 	 */
    853 	for (;;) {
    854 		flags = HME_XD_GETFLAGS(xdr, ri);
    855 		if (flags & HME_XD_OWN)
    856 			break;
    857 
    858 		len = HME_XD_DECODE_RSIZE(flags);
    859 		hme_read(sc, ri, len);
    860 
    861 		/* This buffer can be used by the hardware again */
    862 		HME_XD_SETFLAGS(xdr, ri,
    863 				HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
    864 
    865 		if (++ri == nrbuf)
    866 			ri = 0;
    867 	}
    868 
    869 	sc->sc_rb.rb_rdtail = ri;
    870 
    871 	return (1);
    872 }
    873 
    874 int
    875 hme_eint(sc, status)
    876 	struct hme_softc *sc;
    877 	u_int status;
    878 {
    879 	char bits[128];
    880 
    881 	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
    882 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
    883 		return (1);
    884 	}
    885 
    886 	printf("%s: status=%s\n", sc->sc_dev.dv_xname,
    887 		bitmask_snprintf(status, HME_SEB_STAT_BITS, bits,sizeof(bits)));
    888 	return (1);
    889 }
    890 
    891 int
    892 hme_intr(v)
    893 	void *v;
    894 {
    895 	struct hme_softc *sc = (struct hme_softc *)v;
    896 	bus_space_tag_t t = sc->sc_bustag;
    897 	bus_space_handle_t seb = sc->sc_seb;
    898 	u_int32_t status;
    899 	int r = 0;
    900 
    901 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
    902 
    903 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
    904 		r |= hme_eint(sc, status);
    905 
    906 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
    907 		r |= hme_tint(sc);
    908 
    909 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
    910 		r |= hme_rint(sc);
    911 
    912 	return (r);
    913 }
    914 
    915 
    916 void
    917 hme_watchdog(ifp)
    918 	struct ifnet *ifp;
    919 {
    920 	struct hme_softc *sc = ifp->if_softc;
    921 
    922 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
    923 	++ifp->if_oerrors;
    924 
    925 	hme_reset(sc);
    926 }
    927 
    928 /*
    929  * MII interface
    930  */
    931 static int
    932 hme_mii_readreg(self, phy, reg)
    933 	struct device *self;
    934 	int phy, reg;
    935 {
    936 	struct hme_softc *sc = (void *)self;
    937 	bus_space_tag_t t = sc->sc_bustag;
    938 	bus_space_handle_t mif = sc->sc_mif;
    939 	int n;
    940 	u_int32_t v;
    941 
    942 	/* Construct the frame command */
    943 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
    944 	    HME_MIF_FO_TAMSB |
    945 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
    946 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
    947 	    (reg << HME_MIF_FO_REGAD_SHIFT);
    948 
    949 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
    950 	for (n = 0; n < 100; n++) {
    951 		DELAY(1);
    952 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
    953 		if (v & HME_MIF_FO_TALSB)
    954 			return (v & HME_MIF_FO_DATA);
    955 	}
    956 
    957 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
    958 	return (0);
    959 }
    960 
    961 static void
    962 hme_mii_writereg(self, phy, reg, val)
    963 	struct device *self;
    964 	int phy, reg, val;
    965 {
    966 	struct hme_softc *sc = (void *)self;
    967 	bus_space_tag_t t = sc->sc_bustag;
    968 	bus_space_handle_t mif = sc->sc_mif;
    969 	int n;
    970 	u_int32_t v;
    971 
    972 	/* Construct the frame command */
    973 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
    974 	    HME_MIF_FO_TAMSB				|
    975 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
    976 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
    977 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
    978 	    (val & HME_MIF_FO_DATA);
    979 
    980 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
    981 	for (n = 0; n < 100; n++) {
    982 		DELAY(1);
    983 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
    984 		if (v & HME_MIF_FO_TALSB)
    985 			return;
    986 	}
    987 
    988 	printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
    989 }
    990 
    991 static void
    992 hme_mii_statchg(dev)
    993 	struct device *dev;
    994 {
    995 }
    996 
    997 int
    998 hme_mediachange(ifp)
    999 	struct ifnet *ifp;
   1000 {
   1001 	struct hme_softc *sc = ifp->if_softc;
   1002 	struct ifmedia *ifm = &sc->sc_media;
   1003 	int newmedia = ifm->ifm_media;
   1004 	bus_space_tag_t t = sc->sc_bustag;
   1005 	bus_space_handle_t mac = sc->sc_mac;
   1006 	u_int32_t v;
   1007 	int error;
   1008 
   1009 	if (IFM_TYPE(newmedia) != IFM_ETHER)
   1010 		return (EINVAL);
   1011 
   1012 	if ((ifp->if_flags & IFF_UP) == 0)
   1013 		return (0);
   1014 
   1015 	if ((error = mii_mediachg(&sc->sc_mii)) != 0)
   1016 		return (error);
   1017 
   1018 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
   1019 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
   1020 		v |= HME_MAC_TXCFG_FULLDPLX;
   1021 	else
   1022 		v &= ~HME_MAC_TXCFG_FULLDPLX;
   1023 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
   1024 
   1025 	return (0);
   1026 }
   1027 
   1028 void
   1029 hme_mediastatus(ifp, ifmr)
   1030 	struct ifnet *ifp;
   1031 	struct ifmediareq *ifmr;
   1032 {
   1033 	struct hme_softc *sc = ifp->if_softc;
   1034 
   1035 	if ((ifp->if_flags & IFF_UP) == 0)
   1036 		return;
   1037 
   1038 	mii_pollstat(&sc->sc_mii);
   1039 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   1040 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   1041 }
   1042 
   1043 /*
   1044  * Process an ioctl request.
   1045  */
   1046 int
   1047 hme_ioctl(ifp, cmd, data)
   1048 	struct ifnet *ifp;
   1049 	u_long cmd;
   1050 	caddr_t data;
   1051 {
   1052 	struct hme_softc *sc = ifp->if_softc;
   1053 	struct ifaddr *ifa = (struct ifaddr *)data;
   1054 	struct ifreq *ifr = (struct ifreq *)data;
   1055 	int s, error = 0;
   1056 
   1057 	s = splnet();
   1058 
   1059 	switch (cmd) {
   1060 
   1061 	case SIOCSIFADDR:
   1062 		ifp->if_flags |= IFF_UP;
   1063 
   1064 		switch (ifa->ifa_addr->sa_family) {
   1065 #ifdef INET
   1066 		case AF_INET:
   1067 			hme_init(sc);
   1068 			arp_ifinit(ifp, ifa);
   1069 			break;
   1070 #endif
   1071 #ifdef NS
   1072 		case AF_NS:
   1073 		    {
   1074 			struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
   1075 
   1076 			if (ns_nullhost(*ina))
   1077 				ina->x_host =
   1078 				    *(union ns_host *)LLADDR(ifp->if_sadl);
   1079 			else {
   1080 				bcopy(ina->x_host.c_host,
   1081 				    LLADDR(ifp->if_sadl),
   1082 				    sizeof(sc->sc_enaddr));
   1083 			}
   1084 			/* Set new address. */
   1085 			hme_init(sc);
   1086 			break;
   1087 		    }
   1088 #endif
   1089 		default:
   1090 			hme_init(sc);
   1091 			break;
   1092 		}
   1093 		break;
   1094 
   1095 	case SIOCSIFFLAGS:
   1096 		if ((ifp->if_flags & IFF_UP) == 0 &&
   1097 		    (ifp->if_flags & IFF_RUNNING) != 0) {
   1098 			/*
   1099 			 * If interface is marked down and it is running, then
   1100 			 * stop it.
   1101 			 */
   1102 			hme_stop(sc);
   1103 			ifp->if_flags &= ~IFF_RUNNING;
   1104 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
   1105 		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
   1106 			/*
   1107 			 * If interface is marked up and it is stopped, then
   1108 			 * start it.
   1109 			 */
   1110 			hme_init(sc);
   1111 		} else if ((ifp->if_flags & IFF_UP) != 0) {
   1112 			/*
   1113 			 * Reset the interface to pick up changes in any other
   1114 			 * flags that affect hardware registers.
   1115 			 */
   1116 			/*hme_stop(sc);*/
   1117 			hme_init(sc);
   1118 		}
   1119 #ifdef HMEDEBUG
   1120 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
   1121 #endif
   1122 		break;
   1123 
   1124 	case SIOCADDMULTI:
   1125 	case SIOCDELMULTI:
   1126 		error = (cmd == SIOCADDMULTI) ?
   1127 		    ether_addmulti(ifr, &sc->sc_ethercom) :
   1128 		    ether_delmulti(ifr, &sc->sc_ethercom);
   1129 
   1130 		if (error == ENETRESET) {
   1131 			/*
   1132 			 * Multicast list has changed; set the hardware filter
   1133 			 * accordingly.
   1134 			 */
   1135 			hme_setladrf(sc);
   1136 			error = 0;
   1137 		}
   1138 		break;
   1139 
   1140 	case SIOCGIFMEDIA:
   1141 	case SIOCSIFMEDIA:
   1142 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
   1143 		break;
   1144 
   1145 	default:
   1146 		error = EINVAL;
   1147 		break;
   1148 	}
   1149 
   1150 	splx(s);
   1151 	return (error);
   1152 }
   1153 
   1154 void
   1155 hme_shutdown(arg)
   1156 	void *arg;
   1157 {
   1158 
   1159 	hme_stop((struct hme_softc *)arg);
   1160 }
   1161 
   1162 /*
   1163  * Set up the logical address filter.
   1164  */
   1165 void
   1166 hme_setladrf(sc)
   1167 	struct hme_softc *sc;
   1168 {
   1169 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1170 	struct ether_multi *enm;
   1171 	struct ether_multistep step;
   1172 	struct ethercom *ec = &sc->sc_ethercom;
   1173 	bus_space_tag_t t = sc->sc_bustag;
   1174 	bus_space_handle_t mac = sc->sc_mac;
   1175 	u_char *cp;
   1176 	u_int32_t crc;
   1177 	u_int32_t hash[4];
   1178 	int len;
   1179 
   1180 	/*
   1181 	 * Set up multicast address filter by passing all multicast addresses
   1182 	 * through a crc generator, and then using the high order 6 bits as an
   1183 	 * index into the 64 bit logical address filter.  The high order bit
   1184 	 * selects the word, while the rest of the bits select the bit within
   1185 	 * the word.
   1186 	 */
   1187 
   1188 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
   1189 		u_int32_t v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
   1190 		v |= HME_MAC_RXCFG_PMISC;
   1191 		bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
   1192 		goto allmulti;
   1193 	}
   1194 
   1195 	/* Clear hash table */
   1196 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
   1197 	ETHER_FIRST_MULTI(step, ec, enm);
   1198 	while (enm != NULL) {
   1199 		if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
   1200 			/*
   1201 			 * We must listen to a range of multicast addresses.
   1202 			 * For now, just accept all multicasts, rather than
   1203 			 * trying to set only those filter bits needed to match
   1204 			 * the range.  (At this time, the only use of address
   1205 			 * ranges is for IP multicast routing, for which the
   1206 			 * range is big enough to require all bits set.)
   1207 			 */
   1208 			goto allmulti;
   1209 		}
   1210 
   1211 		cp = enm->enm_addrlo;
   1212 		crc = 0xffffffff;
   1213 		for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
   1214 			int octet = *cp++;
   1215 			int i;
   1216 
   1217 #define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
   1218 			for (i = 0; i < 8; i++) {
   1219 				if ((crc & 1) ^ (octet & 1)) {
   1220 					crc >>= 1;
   1221 					crc ^= MC_POLY_LE;
   1222 				} else {
   1223 					crc >>= 1;
   1224 				}
   1225 				octet >>= 1;
   1226 			}
   1227 		}
   1228 		/* Just want the 6 most significant bits. */
   1229 		crc >>= 26;
   1230 
   1231 		/* Set the corresponding bit in the filter. */
   1232 		hash[crc >> 4] |= 1 << (crc & 0xf);
   1233 
   1234 		ETHER_NEXT_MULTI(step, enm);
   1235 	}
   1236 
   1237 	/* Now load the hash table onto the chip */
   1238 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
   1239 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
   1240 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
   1241 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
   1242 
   1243 	ifp->if_flags &= ~IFF_ALLMULTI;
   1244 	return;
   1245 
   1246 allmulti:
   1247 	ifp->if_flags |= IFF_ALLMULTI;
   1248 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, 0xffff);
   1249 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, 0xffff);
   1250 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, 0xffff);
   1251 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, 0xffff);
   1252 }
   1253 
   1254 /*
   1255  * Routines for accessing the transmit and receive buffers.
   1256  * The various CPU and adapter configurations supported by this
   1257  * driver require three different access methods for buffers
   1258  * and descriptors:
   1259  *	(1) contig (contiguous data; no padding),
   1260  *	(2) gap2 (two bytes of data followed by two bytes of padding),
   1261  *	(3) gap16 (16 bytes of data followed by 16 bytes of padding).
   1262  */
   1263 
   1264 #if 0
   1265 /*
   1266  * contig: contiguous data with no padding.
   1267  *
   1268  * Buffers may have any alignment.
   1269  */
   1270 
   1271 void
   1272 hme_copytobuf_contig(sc, from, ri, len)
   1273 	struct hme_softc *sc;
   1274 	void *from;
   1275 	int ri, len;
   1276 {
   1277 	volatile caddr_t buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
   1278 
   1279 	/*
   1280 	 * Just call bcopy() to do the work.
   1281 	 */
   1282 	bcopy(from, buf, len);
   1283 }
   1284 
   1285 void
   1286 hme_copyfrombuf_contig(sc, to, boff, len)
   1287 	struct hme_softc *sc;
   1288 	void *to;
   1289 	int boff, len;
   1290 {
   1291 	volatile caddr_t buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
   1292 
   1293 	/*
   1294 	 * Just call bcopy() to do the work.
   1295 	 */
   1296 	bcopy(buf, to, len);
   1297 }
   1298 #endif
   1299