Home | History | Annotate | Line # | Download | only in ic
hme.c revision 1.1
      1 /*	$NetBSD: hme.c,v 1.1 1999/06/27 12:26:32 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Paul Kranenburg.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * HME Ethernet module driver.
     41  */
     42 
     43 #define HMEDEBUG
     44 
     45 #include "opt_inet.h"
     46 #include "opt_ccitt.h"
     47 #include "opt_llc.h"
     48 #include "opt_ns.h"
     49 #include "bpfilter.h"
     50 #include "rnd.h"
     51 
     52 #include <sys/param.h>
     53 #include <sys/systm.h>
     54 #include <sys/mbuf.h>
     55 #include <sys/syslog.h>
     56 #include <sys/socket.h>
     57 #include <sys/device.h>
     58 #include <sys/malloc.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/errno.h>
     61 #if NRND > 0
     62 #include <sys/rnd.h>
     63 #endif
     64 
     65 #include <net/if.h>
     66 #include <net/if_dl.h>
     67 #include <net/if_ether.h>
     68 #include <net/if_media.h>
     69 
     70 #ifdef INET
     71 #include <netinet/in.h>
     72 #include <netinet/if_inarp.h>
     73 #include <netinet/in_systm.h>
     74 #include <netinet/in_var.h>
     75 #include <netinet/ip.h>
     76 #endif
     77 
     78 #ifdef NS
     79 #include <netns/ns.h>
     80 #include <netns/ns_if.h>
     81 #endif
     82 
     83 #if NBPFILTER > 0
     84 #include <net/bpf.h>
     85 #include <net/bpfdesc.h>
     86 #endif
     87 
     88 #include <dev/mii/mii.h>
     89 #include <dev/mii/miivar.h>
     90 
     91 #include <machine/bus.h>
     92 
     93 #include <dev/ic/hmereg.h>
     94 #include <dev/ic/hmevar.h>
     95 
     96 void		hme_start __P((struct ifnet *));
     97 void		hme_stop __P((struct hme_softc *));
     98 int		hme_ioctl __P((struct ifnet *, u_long, caddr_t));
     99 void		hme_watchdog __P((struct ifnet *));
    100 void		hme_shutdown __P((void *));
    101 void		hme_init __P((struct hme_softc *));
    102 void		hme_meminit __P((struct hme_softc *));
    103 void		hme_reset __P((struct hme_softc *));
    104 void		hme_setladrf __P((struct hme_softc *));
    105 
    106 /* MII methods & callbacks */
    107 static int	hme_mii_readreg __P((struct device *, int, int));
    108 static void	hme_mii_writereg __P((struct device *, int, int, int));
    109 static void	hme_mii_statchg __P((struct device *));
    110 
    111 int		hme_mediachange __P((struct ifnet *));
    112 void		hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
    113 
    114 struct mbuf	*hme_get __P((struct hme_softc *, int, int));
    115 int		hme_put __P((struct hme_softc *, int, struct mbuf *));
    116 void		hme_read __P((struct hme_softc *, int, int));
    117 int		hme_eint __P((struct hme_softc *, u_int));
    118 int		hme_rint __P((struct hme_softc *));
    119 int		hme_tint __P((struct hme_softc *));
    120 
    121 static int	ether_cmp __P((u_char *, u_char *));
    122 
    123 /* Default buffer copy routines */
    124 void	hme_copytobuf_contig __P((struct hme_softc *, void *, int, int));
    125 void	hme_copyfrombuf_contig __P((struct hme_softc *, void *, int, int));
    126 void	hme_zerobuf_contig __P((struct hme_softc *, int, int));
    127 
    128 
    129 void
    130 hme_config(sc)
    131 	struct hme_softc *sc;
    132 {
    133 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    134 	struct mii_data *mii = &sc->sc_mii;
    135 	bus_dma_segment_t seg;
    136 	bus_size_t size;
    137 	int rseg, error;
    138 
    139 	/*
    140 	 * HME common initialization.
    141 	 *
    142 	 * hme_softc fields that must be initialized by the front-end:
    143 	 *
    144 	 * the bus tag:
    145 	 *	sc_bustag
    146 	 *
    147 	 * the dma bus tag:
    148 	 *	sc_dmatag
    149 	 *
    150 	 * the bus handles:
    151 	 *	sc_seb		(Shared Ethernet Block registers)
    152 	 *	sc_erx		(Receiver Unit registers)
    153 	 *	sc_etx		(Transmitter Unit registers)
    154 	 *	sc_mac		(MAC registers)
    155 	 *	sc_mif		(Managment Interface registers)
    156 	 *
    157 	 * the maximum bus burst size:
    158 	 *	sc_burst
    159 	 *
    160 	 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
    161 	 *	rb_membase, rb_dmabase)
    162 	 *
    163 	 * the local Ethernet address:
    164 	 *	sc_enaddr
    165 	 *
    166 	 */
    167 
    168 	/* Make sure the chip is stopped. */
    169 	hme_stop(sc);
    170 
    171 
    172 	/*
    173 	 * Allocate descriptors and buffers
    174 	 * XXX - do all this differently.. and more configurably,
    175 	 * eg. use things as `dma_load_mbuf()' on transmit,
    176 	 *     and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
    177 	 *     all the time) on the reveiver side.
    178 	 */
    179 #define _HME_NDESC	32
    180 #define _HME_BUFSZ	32
    181 
    182 	/* Note: the # of descriptors must be a multiple of 16 */
    183 	sc->sc_rb.rb_ntbuf = _HME_NDESC;
    184 	sc->sc_rb.rb_nrbuf = _HME_NDESC;
    185 
    186 	/*
    187 	 * Allocate DMA capable memory
    188 	 * Buffer descriptors must be aligned on a 2048 byte boundary;
    189 	 * take this into account when calculating the size. Note that
    190 	 * the maximum number of descriptors (256) occupies 2048 bytes,
    191 	 * so we allocate that much regardless of _HME_NDESC.
    192 	 */
    193 	size =	2048 +					/* TX descriptors */
    194 		2048 +					/* RX descriptors */
    195 		sc->sc_rb.rb_ntbuf * _HME_BUFSZ +	/* TX buffers */
    196 		sc->sc_rb.rb_nrbuf * _HME_BUFSZ;	/* TX buffers */
    197 	if ((error = bus_dmamem_alloc(sc->sc_dmatag, size,
    198 				      2048, 0,
    199 				      &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
    200 		printf("%s: DMA buffer alloc error %d\n",
    201 			sc->sc_dev.dv_xname, error);
    202 	}
    203 	sc->sc_rb.rb_dmabase = seg.ds_addr;
    204 
    205 	/* Map DMA memory in CPU adressable space */
    206 	if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, size,
    207 				    &sc->sc_rb.rb_membase,
    208 				    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
    209 		printf("%s: DMA buffer map error %d\n",
    210 			sc->sc_dev.dv_xname, error);
    211 		bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
    212 		return;
    213 	}
    214 
    215 #if 0
    216 	/*
    217 	 * Install default copy routines if not supplied.
    218 	 */
    219 	if (sc->sc_copytobuf == NULL)
    220 		sc->sc_copytobuf = hme_copytobuf_contig;
    221 
    222 	if (sc->sc_copyfrombuf == NULL)
    223 		sc->sc_copyfrombuf = hme_copyfrombuf_contig;
    224 #endif
    225 
    226 	/* Initialize ifnet structure. */
    227 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
    228 	ifp->if_softc = sc;
    229 	ifp->if_start = hme_start;
    230 	ifp->if_ioctl = hme_ioctl;
    231 	ifp->if_watchdog = hme_watchdog;
    232 	ifp->if_flags =
    233 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
    234 
    235 	/* Initialize ifmedia structures and MII info */
    236 	mii->mii_ifp = ifp;
    237 	mii->mii_readreg = hme_mii_readreg;
    238 	mii->mii_writereg = hme_mii_writereg;
    239 	mii->mii_statchg = hme_mii_statchg;
    240 
    241 	ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
    242 
    243 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
    244 		/* No PHY attached */
    245 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
    246 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
    247 	} else {
    248 		/*
    249 		 * XXX - we can really do the following ONLY if the
    250 		 * phy indeed has the auto negotiation capability!!
    251 		 */
    252 		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
    253 	}
    254 
    255 	/* Attach the interface. */
    256 	if_attach(ifp);
    257 	ether_ifattach(ifp, sc->sc_enaddr);
    258 
    259 #if NBPFILTER > 0
    260 	bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
    261 #endif
    262 
    263 	printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
    264 
    265 	sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
    266 	if (sc->sc_sh == NULL)
    267 		panic("hme_config: can't establish shutdownhook");
    268 
    269 #if 0
    270 	printf("%s: %d receive buffers, %d transmit buffers\n",
    271 	    sc->sc_dev.dv_xname, sc->sc_nrbuf, sc->sc_ntbuf);
    272 	sc->sc_rbufaddr = malloc(sc->sc_nrbuf * sizeof(int), M_DEVBUF,
    273 					M_WAITOK);
    274 	sc->sc_tbufaddr = malloc(sc->sc_ntbuf * sizeof(int), M_DEVBUF,
    275 					M_WAITOK);
    276 #endif
    277 
    278 #if NRND > 0
    279 	rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
    280 			  RND_TYPE_NET, 0);
    281 #endif
    282 }
    283 
    284 void
    285 hme_reset(sc)
    286 	struct hme_softc *sc;
    287 {
    288 	int s;
    289 
    290 	s = splnet();
    291 	hme_init(sc);
    292 	splx(s);
    293 }
    294 
    295 void
    296 hme_stop(sc)
    297 	struct hme_softc *sc;
    298 {
    299 	bus_space_tag_t t = sc->sc_bustag;
    300 	bus_space_handle_t seb = sc->sc_seb;
    301 	int n;
    302 
    303 	/* Reset transmitter and receiver */
    304 	bus_space_write_4(t, seb, HME_SEBI_RESET,
    305 			  (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
    306 
    307 	for (n = 0; n < 20; n++) {
    308 		u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
    309 		if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
    310 			return;
    311 		DELAY(20);
    312 	}
    313 
    314 	printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
    315 }
    316 
    317 void
    318 hme_meminit(sc)
    319 	struct hme_softc *sc;
    320 {
    321 	bus_addr_t txbufdma, rxbufdma;
    322 	bus_addr_t dma;
    323 	caddr_t p;
    324 	unsigned int ntbuf, nrbuf, i;
    325 	struct hme_ring *hr = &sc->sc_rb;
    326 
    327 	p = hr->rb_membase;
    328 	dma = hr->rb_dmabase;
    329 
    330 	ntbuf = hr->rb_ntbuf;
    331 	nrbuf = hr->rb_nrbuf;
    332 
    333 	/*
    334 	 * Allocate transmit descriptors
    335 	 */
    336 	hr->rb_txd = p;
    337 	hr->rb_txddma = dma;
    338 	p += ntbuf * HME_XD_SIZE;
    339 	dma += ntbuf * HME_XD_SIZE;
    340 
    341 	/*
    342 	 * Allocate receive descriptors
    343 	 * Buffer descriptors must be aligned on a 2048 byte boundary.
    344 	 */
    345 	dma = (bus_addr_t)roundup((long)dma, 2048);
    346 	p = (caddr_t)roundup((long)p, 2048);
    347 	hr->rb_rxd = p;
    348 	hr->rb_rxddma = dma;
    349 	p += nrbuf * HME_XD_SIZE;
    350 	dma += nrbuf * HME_XD_SIZE;
    351 
    352 
    353 	/*
    354 	 * Allocate transmit buffers
    355 	 */
    356 	hr->rb_txbuf = p;
    357 	txbufdma = dma;
    358 	p += ntbuf * _HME_BUFSZ;
    359 	dma += ntbuf * _HME_BUFSZ;
    360 
    361 	/*
    362 	 * Allocate receive buffers
    363 	 */
    364 	hr->rb_rxbuf = p;
    365 	rxbufdma = dma;
    366 	p += nrbuf * _HME_BUFSZ;
    367 	dma += nrbuf * _HME_BUFSZ;
    368 
    369 	/*
    370 	 * Initialize transmit buffer descriptors
    371 	 */
    372 	for (i = 0; i < ntbuf; i++) {
    373 		HME_XD_SETADDR(hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
    374 		HME_XD_SETFLAGS(hr->rb_txd, i, 0);
    375 	}
    376 
    377 	/*
    378 	 * Initialize receive buffer descriptors
    379 	 */
    380 	for (i = 0; i < nrbuf; i++) {
    381 		HME_XD_SETADDR(hr->rb_txd, i, rxbufdma + i * _HME_BUFSZ);
    382 		HME_XD_SETFLAGS(hr->rb_txd, i,
    383 				HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
    384 	}
    385 
    386 	hr->rb_tdhead = hr->rb_tdtail = 0;
    387 	hr->rb_td_nbusy = 0;
    388 	hr->rb_rdtail = 0;
    389 }
    390 
    391 /*
    392  * Initialization of interface; set up initialization block
    393  * and transmit/receive descriptor rings.
    394  */
    395 void
    396 hme_init(sc)
    397 	struct hme_softc *sc;
    398 {
    399 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    400 	bus_space_tag_t t = sc->sc_bustag;
    401 	bus_space_handle_t seb = sc->sc_seb;
    402 	bus_space_handle_t etx = sc->sc_etx;
    403 	bus_space_handle_t erx = sc->sc_erx;
    404 	bus_space_handle_t mac = sc->sc_mac;
    405 	bus_space_handle_t mif = sc->sc_mif;
    406 	u_int8_t *ea;
    407 	u_int32_t v;
    408 
    409 	/*
    410 	 * Initialization sequence. The numbered steps below correspond
    411 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
    412 	 * Channel Engine manual (part of the PCIO manual).
    413 	 * See also the STP2002-STQ document from Sun Microsystems.
    414 	 */
    415 
    416 	/* step 1 & 2. Reset the Ethernet Channel */
    417 	hme_stop(sc);
    418 
    419 	/* Call MI reset function if any */
    420 	if (sc->sc_hwreset)
    421 		(*sc->sc_hwreset)(sc);
    422 
    423 #if 0
    424 	/* Mask all MIF interrupts, just in case */
    425 	bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
    426 #endif
    427 
    428 	/* step 3. Setup data structures in host memory */
    429 	hme_meminit(sc);
    430 
    431 	/* step 4. TX MAC registers & counters */
    432 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
    433 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
    434 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
    435 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
    436 
    437 	/* Load station MAC address */
    438 	ea = sc->sc_enaddr;
    439 	bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
    440 	bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
    441 	bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
    442 
    443 	/*
    444 	 * Init seed for backoff
    445 	 * (source suggested by manual: low 10 bits of MAC address)
    446 	 */
    447 	v = ((ea[4] << 8) | ea[5]) & 0x3fff;
    448 	bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
    449 
    450 
    451 	/* Note: Accepting power-on default for other MAC registers here.. */
    452 
    453 
    454 	/* step 5. RX MAC registers & counters */
    455 	hme_setladrf(sc);
    456 
    457 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
    458 	bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
    459 	bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
    460 
    461 	bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
    462 
    463 
    464 	/* step 8. Global Configuration & Interrupt Mask */
    465 	bus_space_write_4(t, seb, HME_SEBI_IMASK,
    466 			  HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_TXPERR |
    467 			  HME_SEB_STAT_GOTFRAME  | HME_SEB_STAT_RCNTEXP);
    468 
    469 	switch (sc->sc_burst) {
    470 	default:
    471 		v = 0;
    472 		break;
    473 	case 16:
    474 		v = HME_SEB_CFG_BURST16;
    475 		break;
    476 	case 32:
    477 		v = HME_SEB_CFG_BURST32;
    478 		break;
    479 	case 64:
    480 		v = HME_SEB_CFG_BURST64;
    481 		break;
    482 	}
    483 	bus_space_write_4(t, seb, HME_SEBI_CFG, v);
    484 
    485 	/* step 9. ETX Configuration: use mostly default values */
    486 
    487 	/* Enable DMA */
    488 	v = bus_space_read_4(t, erx, HME_ETXI_CFG);
    489 	v |= HME_ETX_CFG_DMAENABLE;
    490 	bus_space_write_4(t, erx, HME_ETXI_CFG, v);
    491 
    492 	/* Descriptor ring size: in increments of 16 */
    493 	bus_space_write_4(t, erx, HME_ETXI_RSIZE, _HME_NDESC / 16);
    494 
    495 
    496 	/* step 10. ERX Configuration: use default values; enable DMA */
    497 	v = bus_space_read_4(t, etx, HME_ERXI_CFG);
    498 	v |= HME_ERX_CFG_DMAENABLE;
    499 	bus_space_write_4(t, etx, HME_ERXI_CFG, v);
    500 
    501 	/* step 11. XIF Configuration */
    502 	v = bus_space_read_4(t, mac, HME_MACI_XIF);
    503 	v |= HME_MAC_XIF_OE;
    504 	bus_space_write_4(t, mac, HME_MACI_XIF, v);
    505 
    506 	/* step 12. RX_MAC Configuration Register */
    507 	v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
    508 	v |= HME_MAC_RXCFG_ENABLE;
    509 	bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
    510 
    511 	/* step 13. TX_MAC Configuration Register */
    512 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
    513 	v |= HME_MAC_TXCFG_ENABLE;
    514 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
    515 
    516 	/* step 14. Issue Transmit Pending command */
    517 
    518 	/*
    519 	 * Put MIF in frame mode
    520 	 * XXX - do bit-bang mode later
    521 	 */
    522 	v = bus_space_read_4(t, mif, HME_MIFI_CFG);
    523 	v &= ~HME_MIF_CFG_BBMODE;
    524 	bus_space_write_4(t, mif, HME_MIFI_CFG, v);
    525 
    526 	/* Call MI initialization function if any */
    527 	if (sc->sc_hwinit)
    528 		(*sc->sc_hwinit)(sc);
    529 
    530 	ifp->if_flags |= IFF_RUNNING;
    531 	ifp->if_flags &= ~IFF_OACTIVE;
    532 	ifp->if_timer = 0;
    533 	hme_start(ifp);
    534 }
    535 
    536 /*
    537  * Compare two Ether/802 addresses for equality, inlined and unrolled for
    538  * speed.
    539  */
    540 static __inline__ int
    541 ether_cmp(a, b)
    542 	u_char *a, *b;
    543 {
    544 
    545 	if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
    546 	    a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
    547 		return (0);
    548 	return (1);
    549 }
    550 
    551 
    552 /*
    553  * Routine to copy from mbuf chain to transmit buffer in
    554  * network buffer memory.
    555  * Returns the amount of data copied.
    556  */
    557 int
    558 hme_put(sc, ri, m)
    559 	struct hme_softc *sc;
    560 	int ri;			/* Ring index */
    561 	struct mbuf *m;
    562 {
    563 	struct mbuf *n;
    564 	int len, tlen = 0;
    565 	caddr_t bp;
    566 
    567 	bp = sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
    568 	for (; m; m = n) {
    569 		len = m->m_len;
    570 		if (len == 0) {
    571 			MFREE(m, n);
    572 			continue;
    573 		}
    574 		bcopy(mtod(m, caddr_t), bp, len);
    575 		bp += len;
    576 		tlen += len;
    577 		MFREE(m, n);
    578 	}
    579 	return (tlen);
    580 }
    581 
    582 /*
    583  * Pull data off an interface.
    584  * Len is length of data, with local net header stripped.
    585  * We copy the data into mbufs.  When full cluster sized units are present
    586  * we copy into clusters.
    587  */
    588 struct mbuf *
    589 hme_get(sc, ri, totlen)
    590 	struct hme_softc *sc;
    591 	int ri, totlen;
    592 {
    593 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    594 	struct mbuf *m, *m0, *newm;
    595 	caddr_t bp;
    596 	int len;
    597 
    598 	MGETHDR(m0, M_DONTWAIT, MT_DATA);
    599 	if (m0 == 0)
    600 		return (0);
    601 	m0->m_pkthdr.rcvif = ifp;
    602 	m0->m_pkthdr.len = totlen;
    603 	len = MHLEN;
    604 	m = m0;
    605 
    606 	bp = sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
    607 
    608 	while (totlen > 0) {
    609 		if (totlen >= MINCLSIZE) {
    610 			MCLGET(m, M_DONTWAIT);
    611 			if ((m->m_flags & M_EXT) == 0)
    612 				goto bad;
    613 			len = MCLBYTES;
    614 		}
    615 
    616 		if (m == m0) {
    617 			caddr_t newdata = (caddr_t)
    618 			    ALIGN(m->m_data + sizeof(struct ether_header)) -
    619 			    sizeof(struct ether_header);
    620 			len -= newdata - m->m_data;
    621 			m->m_data = newdata;
    622 		}
    623 
    624 		m->m_len = len = min(totlen, len);
    625 		bcopy(bp, mtod(m, caddr_t), len);
    626 		bp += len;
    627 
    628 		totlen -= len;
    629 		if (totlen > 0) {
    630 			MGET(newm, M_DONTWAIT, MT_DATA);
    631 			if (newm == 0)
    632 				goto bad;
    633 			len = MLEN;
    634 			m = m->m_next = newm;
    635 		}
    636 	}
    637 
    638 	return (m0);
    639 
    640 bad:
    641 	m_freem(m0);
    642 	return (0);
    643 }
    644 
    645 /*
    646  * Pass a packet to the higher levels.
    647  */
    648 void
    649 hme_read(sc, ix, len)
    650 	struct hme_softc *sc;
    651 	int ix, len;
    652 {
    653 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    654 	struct mbuf *m;
    655 	struct ether_header *eh;
    656 
    657 	if (len <= sizeof(struct ether_header) ||
    658 	    len > ETHERMTU + sizeof(struct ether_header)) {
    659 #ifdef HMEDEBUG
    660 		printf("%s: invalid packet size %d; dropping\n",
    661 		    sc->sc_dev.dv_xname, len);
    662 #endif
    663 		ifp->if_ierrors++;
    664 		return;
    665 	}
    666 
    667 	/* Pull packet off interface. */
    668 	m = hme_get(sc, ix, len);
    669 	if (m == 0) {
    670 		ifp->if_ierrors++;
    671 		return;
    672 	}
    673 
    674 	ifp->if_ipackets++;
    675 
    676 	/* We assume that the header fit entirely in one mbuf. */
    677 	eh = mtod(m, struct ether_header *);
    678 
    679 #if NBPFILTER > 0
    680 	/*
    681 	 * Check if there's a BPF listener on this interface.
    682 	 * If so, hand off the raw packet to BPF.
    683 	 */
    684 	if (ifp->if_bpf) {
    685 		bpf_mtap(ifp->if_bpf, m);
    686 
    687 		/*
    688 		 * Note that the interface cannot be in promiscuous mode if
    689 		 * there are no BPF listeners.  And if we are in promiscuous
    690 		 * mode, we have to check if this packet is really ours.
    691 		 */
    692 		if ((ifp->if_flags & IFF_PROMISC) != 0 &&
    693 		    (eh->ether_dhost[0] & 1) == 0 && /* !mcast and !bcast */
    694 		    ether_cmp(eh->ether_dhost, sc->sc_enaddr)) {
    695 			m_freem(m);
    696 			return;
    697 		}
    698 	}
    699 #endif
    700 
    701 	/* Pass the packet up. */
    702 	(*ifp->if_input)(ifp, m);
    703 }
    704 
    705 void
    706 hme_start(ifp)
    707 	struct ifnet *ifp;
    708 {
    709 	struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
    710 	caddr_t txd = sc->sc_rb.rb_txd;
    711 	struct mbuf *m;
    712 	unsigned int ri, len;
    713 	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
    714 
    715 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
    716 		return;
    717 
    718 	ri = sc->sc_rb.rb_tdhead;
    719 
    720 	for (;;) {
    721 		IF_DEQUEUE(&ifp->if_snd, m);
    722 		if (m == 0)
    723 			break;
    724 
    725 #if NBPFILTER > 0
    726 		/*
    727 		 * If BPF is listening on this interface, let it see the
    728 		 * packet before we commit it to the wire.
    729 		 */
    730 		if (ifp->if_bpf)
    731 			bpf_mtap(ifp->if_bpf, m);
    732 #endif
    733 
    734 		/*
    735 		 * Copy the mbuf chain into the transmit buffer.
    736 		 */
    737 		len = hme_put(sc, ri, m);
    738 
    739 		/*
    740 		 * Initialize transmit registers and start transmission
    741 		 */
    742 		HME_XD_SETFLAGS(txd, ri,
    743 			HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
    744 			HME_XD_ENCODE_TSIZE(len));
    745 
    746 		bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
    747 				  HME_ETX_TP_DMAWAKEUP);
    748 
    749 		if (++ri == ntbuf)
    750 			ri = 0;
    751 
    752 		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
    753 			ifp->if_flags |= IFF_OACTIVE;
    754 			break;
    755 		}
    756 	}
    757 
    758 	sc->sc_rb.rb_tdhead = ri;
    759 }
    760 
    761 /*
    762  * Transmit interrupt.
    763  */
    764 int
    765 hme_tint(sc)
    766 	struct hme_softc *sc;
    767 {
    768 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
    769 	bus_space_tag_t t = sc->sc_bustag;
    770 	bus_space_handle_t mac = sc->sc_mac;
    771 	unsigned int ri, txflags;
    772 
    773 	/*
    774 	 * Unload collision counters
    775 	 */
    776 	ifp->if_collisions +=
    777 		bus_space_read_4(t, mac, HME_MACI_NCCNT) +
    778 		bus_space_read_4(t, mac, HME_MACI_FCCNT) +
    779 		bus_space_read_4(t, mac, HME_MACI_EXCNT) +
    780 		bus_space_read_4(t, mac, HME_MACI_LTCNT);
    781 
    782 	/*
    783 	 * then clear the hardware counters.
    784 	 */
    785 	bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
    786 	bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
    787 	bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
    788 	bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
    789 
    790 	/* Fetch current position in the transmit ring */
    791 	ri = sc->sc_rb.rb_tdtail;
    792 
    793 	for (;;) {
    794 		if (sc->sc_rb.rb_td_nbusy <= 0)
    795 			break;
    796 
    797 		txflags = HME_XD_GETFLAGS(sc->sc_rb.rb_txd, ri);
    798 
    799 		if (txflags & HME_XD_OWN)
    800 			break;
    801 
    802 		ifp->if_flags &= ~IFF_OACTIVE;
    803 		ifp->if_opackets++;
    804 
    805 		if (++ri == sc->sc_rb.rb_nrbuf)
    806 			ri = 0;
    807 
    808 		--sc->sc_rb.rb_td_nbusy;
    809 	}
    810 
    811 	sc->sc_rb.rb_tdtail = ri;
    812 
    813 	hme_start(ifp);
    814 
    815 	if (sc->sc_rb.rb_td_nbusy == 0)
    816 		ifp->if_timer = 0;
    817 
    818 	return (1);
    819 }
    820 
    821 /*
    822  * Receive interrupt.
    823  */
    824 int
    825 hme_rint(sc)
    826 	struct hme_softc *sc;
    827 {
    828 	caddr_t xdr = sc->sc_rb.rb_rxd;
    829 	unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
    830 	unsigned int ri, len;
    831 	u_int32_t flags;
    832 
    833 	ri = sc->sc_rb.rb_rdtail;
    834 
    835 	/*
    836 	 * Process all buffers with valid data.
    837 	 */
    838 	for (;;) {
    839 		flags = HME_XD_GETFLAGS(xdr, ri);
    840 		if (flags & HME_XD_OWN)
    841 			break;
    842 
    843 		len = HME_XD_DECODE_RSIZE(flags);
    844 		hme_read(sc, ri, len);
    845 
    846 		/* This buffer can be used by the hardware again */
    847 		HME_XD_SETFLAGS(xdr, ri,
    848 				HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
    849 
    850 		if (++ri == nrbuf)
    851 			ri = 0;
    852 	}
    853 
    854 	sc->sc_rb.rb_rdtail = ri;
    855 
    856 	return (1);
    857 }
    858 
    859 int
    860 hme_eint(sc, status)
    861 	struct hme_softc *sc;
    862 	u_int status;
    863 {
    864 	char bits[128];
    865 
    866 	if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
    867 		printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
    868 		return (1);
    869 	}
    870 
    871 	printf("%s: status=%s\n", sc->sc_dev.dv_xname,
    872 		bitmask_snprintf(status, HME_SEB_STAT_BITS, bits,sizeof(bits)));
    873 	return (1);
    874 }
    875 
    876 int
    877 hme_intr(v)
    878 	void *v;
    879 {
    880 	struct hme_softc *sc = (struct hme_softc *)v;
    881 	bus_space_tag_t t = sc->sc_bustag;
    882 	bus_space_handle_t seb = sc->sc_seb;
    883 	u_int32_t status;
    884 	int r = 0;
    885 
    886 	status = bus_space_read_4(t, seb, HME_SEBI_STAT);
    887 
    888 	if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
    889 		r |= hme_eint(sc, status);
    890 
    891 	if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
    892 		r |= hme_tint(sc);
    893 
    894 	if ((status & HME_SEB_STAT_RXTOHOST) != 0)
    895 		r |= hme_rint(sc);
    896 
    897 	return (r);
    898 }
    899 
    900 
    901 void
    902 hme_watchdog(ifp)
    903 	struct ifnet *ifp;
    904 {
    905 	struct hme_softc *sc = ifp->if_softc;
    906 
    907 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
    908 	++ifp->if_oerrors;
    909 
    910 	hme_reset(sc);
    911 }
    912 
    913 /*
    914  * MII interface
    915  */
    916 static int
    917 hme_mii_readreg(self, phy, reg)
    918 	struct device *self;
    919 	int phy, reg;
    920 {
    921 	struct hme_softc *sc = (void *)self;
    922 	bus_space_tag_t t = sc->sc_bustag;
    923 	bus_space_handle_t mif = sc->sc_mif;
    924 	int n;
    925 	u_int32_t v;
    926 
    927 	/* Construct the frame command */
    928 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
    929 	    HME_MIF_FO_TAMSB |
    930 	    (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
    931 	    (phy << HME_MIF_FO_PHYAD_SHIFT) |
    932 	    (reg << HME_MIF_FO_REGAD_SHIFT);
    933 
    934 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
    935 	for (n = 0; n < 100; n++) {
    936 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
    937 		if (v & HME_MIF_FO_TALSB)
    938 			return (v & HME_MIF_FO_DATA);
    939 	}
    940 
    941 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
    942 	return (0);
    943 }
    944 
    945 static void
    946 hme_mii_writereg(self, phy, reg, val)
    947 	struct device *self;
    948 	int phy, reg, val;
    949 {
    950 	struct hme_softc *sc = (void *)self;
    951 	bus_space_tag_t t = sc->sc_bustag;
    952 	bus_space_handle_t mif = sc->sc_mif;
    953 	int n;
    954 	u_int32_t v;
    955 
    956 	/* Construct the frame command */
    957 	v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT)	|
    958 	    HME_MIF_FO_TAMSB				|
    959 	    (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT)	|
    960 	    (phy << HME_MIF_FO_PHYAD_SHIFT)		|
    961 	    (reg << HME_MIF_FO_REGAD_SHIFT)		|
    962 	    (val & HME_MIF_FO_DATA);
    963 
    964 	bus_space_write_4(t, mif, HME_MIFI_FO, v);
    965 	for (n = 0; n < 100; n++) {
    966 		v = bus_space_read_4(t, mif, HME_MIFI_FO);
    967 		if (v & HME_MIF_FO_TALSB)
    968 			return;
    969 	}
    970 
    971 	printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
    972 }
    973 
    974 static void
    975 hme_mii_statchg(dev)
    976 	struct device *dev;
    977 {
    978 }
    979 
    980 int
    981 hme_mediachange(ifp)
    982 	struct ifnet *ifp;
    983 {
    984 	struct hme_softc *sc = ifp->if_softc;
    985 	struct ifmedia *ifm = &sc->sc_media;
    986 	int newmedia = ifm->ifm_media;
    987 	bus_space_tag_t t = sc->sc_bustag;
    988 	bus_space_handle_t mac = sc->sc_mac;
    989 	u_int32_t v;
    990 	int error;
    991 
    992 	if (IFM_TYPE(newmedia) != IFM_ETHER)
    993 		return (EINVAL);
    994 
    995 	if ((ifp->if_flags & IFF_UP) == 0)
    996 		return (0);
    997 
    998 	if ((error = mii_mediachg(&sc->sc_mii)) != 0)
    999 		return (error);
   1000 
   1001 	v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
   1002 	if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
   1003 		v |= HME_MAC_TXCFG_FULLDPLX;
   1004 	else
   1005 		v &= ~HME_MAC_TXCFG_FULLDPLX;
   1006 	bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
   1007 
   1008 	return (0);
   1009 }
   1010 
   1011 void
   1012 hme_mediastatus(ifp, ifmr)
   1013 	struct ifnet *ifp;
   1014 	struct ifmediareq *ifmr;
   1015 {
   1016 	struct hme_softc *sc = ifp->if_softc;
   1017 
   1018 	if ((ifp->if_flags & IFF_UP) == 0)
   1019 		return;
   1020 
   1021 	mii_pollstat(&sc->sc_mii);
   1022 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
   1023 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
   1024 }
   1025 
   1026 /*
   1027  * Process an ioctl request.
   1028  */
   1029 int
   1030 hme_ioctl(ifp, cmd, data)
   1031 	struct ifnet *ifp;
   1032 	u_long cmd;
   1033 	caddr_t data;
   1034 {
   1035 	struct hme_softc *sc = ifp->if_softc;
   1036 	struct ifaddr *ifa = (struct ifaddr *)data;
   1037 	struct ifreq *ifr = (struct ifreq *)data;
   1038 	int s, error = 0;
   1039 
   1040 	s = splnet();
   1041 
   1042 	switch (cmd) {
   1043 
   1044 	case SIOCSIFADDR:
   1045 		ifp->if_flags |= IFF_UP;
   1046 
   1047 		switch (ifa->ifa_addr->sa_family) {
   1048 #ifdef INET
   1049 		case AF_INET:
   1050 			hme_init(sc);
   1051 			arp_ifinit(ifp, ifa);
   1052 			break;
   1053 #endif
   1054 #ifdef NS
   1055 		case AF_NS:
   1056 		    {
   1057 			struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
   1058 
   1059 			if (ns_nullhost(*ina))
   1060 				ina->x_host =
   1061 				    *(union ns_host *)LLADDR(ifp->if_sadl);
   1062 			else {
   1063 				bcopy(ina->x_host.c_host,
   1064 				    LLADDR(ifp->if_sadl),
   1065 				    sizeof(sc->sc_enaddr));
   1066 			}
   1067 			/* Set new address. */
   1068 			hme_init(sc);
   1069 			break;
   1070 		    }
   1071 #endif
   1072 		default:
   1073 			hme_init(sc);
   1074 			break;
   1075 		}
   1076 		break;
   1077 
   1078 	case SIOCSIFFLAGS:
   1079 		if ((ifp->if_flags & IFF_UP) == 0 &&
   1080 		    (ifp->if_flags & IFF_RUNNING) != 0) {
   1081 			/*
   1082 			 * If interface is marked down and it is running, then
   1083 			 * stop it.
   1084 			 */
   1085 			hme_stop(sc);
   1086 			ifp->if_flags &= ~IFF_RUNNING;
   1087 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
   1088 		    	   (ifp->if_flags & IFF_RUNNING) == 0) {
   1089 			/*
   1090 			 * If interface is marked up and it is stopped, then
   1091 			 * start it.
   1092 			 */
   1093 			hme_init(sc);
   1094 		} else if ((ifp->if_flags & IFF_UP) != 0) {
   1095 			/*
   1096 			 * Reset the interface to pick up changes in any other
   1097 			 * flags that affect hardware registers.
   1098 			 */
   1099 			/*hme_stop(sc);*/
   1100 			hme_init(sc);
   1101 		}
   1102 #ifdef HMEDEBUG
   1103 		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
   1104 #endif
   1105 		break;
   1106 
   1107 	case SIOCADDMULTI:
   1108 	case SIOCDELMULTI:
   1109 		error = (cmd == SIOCADDMULTI) ?
   1110 		    ether_addmulti(ifr, &sc->sc_ethercom) :
   1111 		    ether_delmulti(ifr, &sc->sc_ethercom);
   1112 
   1113 		if (error == ENETRESET) {
   1114 			/*
   1115 			 * Multicast list has changed; set the hardware filter
   1116 			 * accordingly.
   1117 			 */
   1118 			hme_setladrf(sc);
   1119 			error = 0;
   1120 		}
   1121 		break;
   1122 
   1123 	case SIOCGIFMEDIA:
   1124 	case SIOCSIFMEDIA:
   1125 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
   1126 		break;
   1127 
   1128 	default:
   1129 		error = EINVAL;
   1130 		break;
   1131 	}
   1132 
   1133 	splx(s);
   1134 	return (error);
   1135 }
   1136 
   1137 void
   1138 hme_shutdown(arg)
   1139 	void *arg;
   1140 {
   1141 
   1142 	hme_stop((struct hme_softc *)arg);
   1143 }
   1144 
   1145 /*
   1146  * Set up the logical address filter.
   1147  */
   1148 void
   1149 hme_setladrf(sc)
   1150 	struct hme_softc *sc;
   1151 {
   1152 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1153 	struct ether_multi *enm;
   1154 	struct ether_multistep step;
   1155 	struct ethercom *ec = &sc->sc_ethercom;
   1156 	bus_space_tag_t t = sc->sc_bustag;
   1157 	bus_space_handle_t mac = sc->sc_mac;
   1158 	u_char *cp;
   1159 	u_int32_t crc;
   1160 	u_int32_t hash[4];
   1161 	int len;
   1162 
   1163 	/*
   1164 	 * Set up multicast address filter by passing all multicast addresses
   1165 	 * through a crc generator, and then using the high order 6 bits as an
   1166 	 * index into the 64 bit logical address filter.  The high order bit
   1167 	 * selects the word, while the rest of the bits select the bit within
   1168 	 * the word.
   1169 	 */
   1170 
   1171 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
   1172 		u_int32_t v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
   1173 		v |= HME_MAC_RXCFG_PMISC;
   1174 		bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
   1175 		goto allmulti;
   1176 	}
   1177 
   1178 	/* Clear hash table */
   1179 	hash[3] = hash[2] = hash[1] = hash[0] = 0;
   1180 	ETHER_FIRST_MULTI(step, ec, enm);
   1181 	while (enm != NULL) {
   1182 		if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
   1183 			/*
   1184 			 * We must listen to a range of multicast addresses.
   1185 			 * For now, just accept all multicasts, rather than
   1186 			 * trying to set only those filter bits needed to match
   1187 			 * the range.  (At this time, the only use of address
   1188 			 * ranges is for IP multicast routing, for which the
   1189 			 * range is big enough to require all bits set.)
   1190 			 */
   1191 			goto allmulti;
   1192 		}
   1193 
   1194 		cp = enm->enm_addrlo;
   1195 		crc = 0xffffffff;
   1196 		for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
   1197 			int octet = *cp++;
   1198 			int i;
   1199 
   1200 #define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
   1201 			for (i = 0; i < 8; i++) {
   1202 				if ((crc & 1) ^ (octet & 1)) {
   1203 					crc >>= 1;
   1204 					crc ^= MC_POLY_LE;
   1205 				} else {
   1206 					crc >>= 1;
   1207 				}
   1208 				octet >>= 1;
   1209 			}
   1210 		}
   1211 		/* Just want the 6 most significant bits. */
   1212 		crc >>= 26;
   1213 
   1214 		/* Set the corresponding bit in the filter. */
   1215 		hash[crc >> 4] |= 1 << (crc & 0xf);
   1216 
   1217 		ETHER_NEXT_MULTI(step, enm);
   1218 	}
   1219 
   1220 	/* Now load the hash table onto the chip */
   1221 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
   1222 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
   1223 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
   1224 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
   1225 
   1226 	ifp->if_flags &= ~IFF_ALLMULTI;
   1227 	return;
   1228 
   1229 allmulti:
   1230 	ifp->if_flags |= IFF_ALLMULTI;
   1231 	bus_space_write_4(t, mac, HME_MACI_HASHTAB0, 0xffff);
   1232 	bus_space_write_4(t, mac, HME_MACI_HASHTAB1, 0xffff);
   1233 	bus_space_write_4(t, mac, HME_MACI_HASHTAB2, 0xffff);
   1234 	bus_space_write_4(t, mac, HME_MACI_HASHTAB3, 0xffff);
   1235 }
   1236 
   1237 /*
   1238  * Routines for accessing the transmit and receive buffers.
   1239  * The various CPU and adapter configurations supported by this
   1240  * driver require three different access methods for buffers
   1241  * and descriptors:
   1242  *	(1) contig (contiguous data; no padding),
   1243  *	(2) gap2 (two bytes of data followed by two bytes of padding),
   1244  *	(3) gap16 (16 bytes of data followed by 16 bytes of padding).
   1245  */
   1246 
   1247 #if 0
   1248 /*
   1249  * contig: contiguous data with no padding.
   1250  *
   1251  * Buffers may have any alignment.
   1252  */
   1253 
   1254 void
   1255 hme_copytobuf_contig(sc, from, ri, len)
   1256 	struct hme_softc *sc;
   1257 	void *from;
   1258 	int ri, len;
   1259 {
   1260 	volatile caddr_t buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
   1261 
   1262 	/*
   1263 	 * Just call bcopy() to do the work.
   1264 	 */
   1265 	bcopy(from, buf, len);
   1266 }
   1267 
   1268 void
   1269 hme_copyfrombuf_contig(sc, to, boff, len)
   1270 	struct hme_softc *sc;
   1271 	void *to;
   1272 	int boff, len;
   1273 {
   1274 	volatile caddr_t buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
   1275 
   1276 	/*
   1277 	 * Just call bcopy() to do the work.
   1278 	 */
   1279 	bcopy(buf, to, len);
   1280 }
   1281 #endif
   1282