1 1.139 gson /* $NetBSD: gem.c,v 1.139 2025/04/27 16:03:16 gson Exp $ */ 2 1.1 eeh 3 1.1 eeh /* 4 1.31 heas * 5 1.1 eeh * Copyright (C) 2001 Eduardo Horvath. 6 1.68 jdc * Copyright (c) 2001-2003 Thomas Moestl 7 1.1 eeh * All rights reserved. 8 1.1 eeh * 9 1.1 eeh * 10 1.1 eeh * Redistribution and use in source and binary forms, with or without 11 1.1 eeh * modification, are permitted provided that the following conditions 12 1.1 eeh * are met: 13 1.1 eeh * 1. Redistributions of source code must retain the above copyright 14 1.1 eeh * notice, this list of conditions and the following disclaimer. 15 1.1 eeh * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 eeh * notice, this list of conditions and the following disclaimer in the 17 1.1 eeh * documentation and/or other materials provided with the distribution. 18 1.31 heas * 19 1.1 eeh * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 20 1.1 eeh * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 1.1 eeh * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 1.1 eeh * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 23 1.1 eeh * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 1.1 eeh * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 1.1 eeh * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 1.1 eeh * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 1.1 eeh * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 1.1 eeh * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 1.1 eeh * SUCH DAMAGE. 30 1.1 eeh * 31 1.1 eeh */ 32 1.1 eeh 33 1.1 eeh /* 34 1.68 jdc * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 35 1.68 jdc * See `GEM Gigabit Ethernet ASIC Specification' 36 1.139 gson * https://web.archive.org/web/20090701010806/http://www.sun.com/processors/manuals/ge.pdf 37 1.1 eeh */ 38 1.10 lukem 39 1.10 lukem #include <sys/cdefs.h> 40 1.139 gson __KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.139 2025/04/27 16:03:16 gson Exp $"); 41 1.1 eeh 42 1.35 heas #include "opt_inet.h" 43 1.1 eeh 44 1.1 eeh #include <sys/param.h> 45 1.31 heas #include <sys/systm.h> 46 1.1 eeh #include <sys/callout.h> 47 1.31 heas #include <sys/mbuf.h> 48 1.1 eeh #include <sys/syslog.h> 49 1.1 eeh #include <sys/kernel.h> 50 1.1 eeh #include <sys/socket.h> 51 1.1 eeh #include <sys/ioctl.h> 52 1.1 eeh #include <sys/errno.h> 53 1.1 eeh #include <sys/device.h> 54 1.1 eeh 55 1.1 eeh #include <machine/endian.h> 56 1.1 eeh 57 1.1 eeh #include <net/if.h> 58 1.1 eeh #include <net/if_dl.h> 59 1.1 eeh #include <net/if_media.h> 60 1.1 eeh #include <net/if_ether.h> 61 1.1 eeh 62 1.35 heas #ifdef INET 63 1.35 heas #include <netinet/in.h> 64 1.35 heas #include <netinet/in_systm.h> 65 1.35 heas #include <netinet/in_var.h> 66 1.35 heas #include <netinet/ip.h> 67 1.35 heas #include <netinet/tcp.h> 68 1.35 heas #include <netinet/udp.h> 69 1.35 heas #endif 70 1.35 heas 71 1.1 eeh #include <net/bpf.h> 72 1.1 eeh 73 1.60 ad #include <sys/bus.h> 74 1.60 ad #include <sys/intr.h> 75 1.1 eeh 76 1.1 eeh #include <dev/mii/mii.h> 77 1.1 eeh #include <dev/mii/miivar.h> 78 1.1 eeh #include <dev/mii/mii_bitbang.h> 79 1.1 eeh 80 1.1 eeh #include <dev/ic/gemreg.h> 81 1.1 eeh #include <dev/ic/gemvar.h> 82 1.1 eeh 83 1.1 eeh #define TRIES 10000 84 1.1 eeh 85 1.85 dyoung static void gem_inten(struct gem_softc *); 86 1.41 christos static void gem_start(struct ifnet *); 87 1.41 christos static void gem_stop(struct ifnet *, int); 88 1.53 christos int gem_ioctl(struct ifnet *, u_long, void *); 89 1.34 perry void gem_tick(void *); 90 1.34 perry void gem_watchdog(struct ifnet *); 91 1.99 jdc void gem_rx_watchdog(void *); 92 1.68 jdc void gem_pcs_start(struct gem_softc *sc); 93 1.68 jdc void gem_pcs_stop(struct gem_softc *sc, int); 94 1.34 perry int gem_init(struct ifnet *); 95 1.1 eeh void gem_init_regs(struct gem_softc *sc); 96 1.1 eeh static int gem_ringsize(int sz); 97 1.41 christos static int gem_meminit(struct gem_softc *); 98 1.34 perry void gem_mifinit(struct gem_softc *); 99 1.50 martin static int gem_bitwait(struct gem_softc *sc, bus_space_handle_t, int, 100 1.112 msaitoh uint32_t, uint32_t); 101 1.34 perry void gem_reset(struct gem_softc *); 102 1.1 eeh int gem_reset_rx(struct gem_softc *sc); 103 1.68 jdc static void gem_reset_rxdma(struct gem_softc *sc); 104 1.68 jdc static void gem_rx_common(struct gem_softc *sc); 105 1.1 eeh int gem_reset_tx(struct gem_softc *sc); 106 1.1 eeh int gem_disable_rx(struct gem_softc *sc); 107 1.1 eeh int gem_disable_tx(struct gem_softc *sc); 108 1.41 christos static void gem_rxdrain(struct gem_softc *sc); 109 1.1 eeh int gem_add_rxbuf(struct gem_softc *sc, int idx); 110 1.34 perry void gem_setladrf(struct gem_softc *); 111 1.1 eeh 112 1.1 eeh /* MII methods & callbacks */ 113 1.113 msaitoh static int gem_mii_readreg(device_t, int, int, uint16_t *); 114 1.113 msaitoh static int gem_mii_writereg(device_t, int, int, uint16_t); 115 1.100 matt static void gem_mii_statchg(struct ifnet *); 116 1.34 perry 117 1.79 dyoung static int gem_ifflags_cb(struct ethercom *); 118 1.79 dyoung 119 1.68 jdc void gem_statuschange(struct gem_softc *); 120 1.68 jdc 121 1.69 dyoung int gem_ser_mediachange(struct ifnet *); 122 1.69 dyoung void gem_ser_mediastatus(struct ifnet *, struct ifmediareq *); 123 1.34 perry 124 1.85 dyoung static void gem_partial_detach(struct gem_softc *, enum gem_attach_stage); 125 1.85 dyoung 126 1.34 perry struct mbuf *gem_get(struct gem_softc *, int, int); 127 1.34 perry int gem_put(struct gem_softc *, int, struct mbuf *); 128 1.34 perry void gem_read(struct gem_softc *, int, int); 129 1.68 jdc int gem_pint(struct gem_softc *); 130 1.34 perry int gem_eint(struct gem_softc *, u_int); 131 1.34 perry int gem_rint(struct gem_softc *); 132 1.34 perry int gem_tint(struct gem_softc *); 133 1.34 perry void gem_power(int, void *); 134 1.1 eeh 135 1.1 eeh #ifdef GEM_DEBUG 136 1.67 dyoung static void gem_txsoft_print(const struct gem_softc *, int, int); 137 1.1 eeh #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 138 1.1 eeh printf x 139 1.1 eeh #else 140 1.1 eeh #define DPRINTF(sc, x) /* nothing */ 141 1.1 eeh #endif 142 1.1 eeh 143 1.40 bouyer #define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header)) 144 1.40 bouyer 145 1.85 dyoung int 146 1.85 dyoung gem_detach(struct gem_softc *sc, int flags) 147 1.85 dyoung { 148 1.88 martin int i; 149 1.85 dyoung struct ifnet *ifp = &sc->sc_ethercom.ec_if; 150 1.91 jdc bus_space_tag_t t = sc->sc_bustag; 151 1.91 jdc bus_space_handle_t h = sc->sc_h1; 152 1.85 dyoung 153 1.85 dyoung /* 154 1.85 dyoung * Free any resources we've allocated during the attach. 155 1.85 dyoung * Do this in reverse order and fall through. 156 1.85 dyoung */ 157 1.85 dyoung switch (sc->sc_att_stage) { 158 1.85 dyoung case GEM_ATT_BACKEND_2: 159 1.85 dyoung case GEM_ATT_BACKEND_1: 160 1.85 dyoung case GEM_ATT_FINISHED: 161 1.91 jdc bus_space_write_4(t, h, GEM_INTMASK, ~(uint32_t)0); 162 1.85 dyoung gem_stop(&sc->sc_ethercom.ec_if, 1); 163 1.85 dyoung 164 1.85 dyoung #ifdef GEM_COUNTERS 165 1.85 dyoung for (i = __arraycount(sc->sc_ev_rxhist); --i >= 0; ) 166 1.85 dyoung evcnt_detach(&sc->sc_ev_rxhist[i]); 167 1.85 dyoung evcnt_detach(&sc->sc_ev_rxnobuf); 168 1.85 dyoung evcnt_detach(&sc->sc_ev_rxfull); 169 1.85 dyoung evcnt_detach(&sc->sc_ev_rxint); 170 1.85 dyoung evcnt_detach(&sc->sc_ev_txint); 171 1.133 jdc evcnt_detach(&sc->sc_ev_rxoverflow); 172 1.85 dyoung #endif 173 1.85 dyoung evcnt_detach(&sc->sc_ev_intr); 174 1.85 dyoung 175 1.85 dyoung rnd_detach_source(&sc->rnd_source); 176 1.85 dyoung ether_ifdetach(ifp); 177 1.85 dyoung if_detach(ifp); 178 1.86 martin 179 1.86 martin callout_destroy(&sc->sc_tick_ch); 180 1.99 jdc callout_destroy(&sc->sc_rx_watchdog); 181 1.86 martin 182 1.85 dyoung /*FALLTHROUGH*/ 183 1.85 dyoung case GEM_ATT_MII: 184 1.85 dyoung sc->sc_att_stage = GEM_ATT_MII; 185 1.88 martin mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 186 1.132 mrg ifmedia_fini(&sc->sc_mii.mii_media); 187 1.132 mrg 188 1.85 dyoung /*FALLTHROUGH*/ 189 1.85 dyoung case GEM_ATT_7: 190 1.85 dyoung for (i = 0; i < GEM_NRXDESC; i++) { 191 1.85 dyoung if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 192 1.85 dyoung bus_dmamap_destroy(sc->sc_dmatag, 193 1.85 dyoung sc->sc_rxsoft[i].rxs_dmamap); 194 1.85 dyoung } 195 1.85 dyoung /*FALLTHROUGH*/ 196 1.85 dyoung case GEM_ATT_6: 197 1.85 dyoung for (i = 0; i < GEM_TXQUEUELEN; i++) { 198 1.85 dyoung if (sc->sc_txsoft[i].txs_dmamap != NULL) 199 1.85 dyoung bus_dmamap_destroy(sc->sc_dmatag, 200 1.85 dyoung sc->sc_txsoft[i].txs_dmamap); 201 1.85 dyoung } 202 1.85 dyoung bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 203 1.85 dyoung /*FALLTHROUGH*/ 204 1.85 dyoung case GEM_ATT_5: 205 1.86 martin bus_dmamap_unload(sc->sc_dmatag, sc->sc_nulldmamap); 206 1.85 dyoung /*FALLTHROUGH*/ 207 1.85 dyoung case GEM_ATT_4: 208 1.86 martin bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap); 209 1.85 dyoung /*FALLTHROUGH*/ 210 1.85 dyoung case GEM_ATT_3: 211 1.85 dyoung bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 212 1.85 dyoung /*FALLTHROUGH*/ 213 1.85 dyoung case GEM_ATT_2: 214 1.85 dyoung bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data, 215 1.85 dyoung sizeof(struct gem_control_data)); 216 1.85 dyoung /*FALLTHROUGH*/ 217 1.85 dyoung case GEM_ATT_1: 218 1.85 dyoung bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 219 1.85 dyoung /*FALLTHROUGH*/ 220 1.85 dyoung case GEM_ATT_0: 221 1.85 dyoung sc->sc_att_stage = GEM_ATT_0; 222 1.85 dyoung /*FALLTHROUGH*/ 223 1.85 dyoung case GEM_ATT_BACKEND_0: 224 1.85 dyoung break; 225 1.85 dyoung } 226 1.88 martin return 0; 227 1.85 dyoung } 228 1.85 dyoung 229 1.85 dyoung static void 230 1.85 dyoung gem_partial_detach(struct gem_softc *sc, enum gem_attach_stage stage) 231 1.85 dyoung { 232 1.85 dyoung cfattach_t ca = device_cfattach(sc->sc_dev); 233 1.85 dyoung 234 1.85 dyoung sc->sc_att_stage = stage; 235 1.85 dyoung (*ca->ca_detach)(sc->sc_dev, 0); 236 1.85 dyoung } 237 1.1 eeh 238 1.1 eeh /* 239 1.6 thorpej * gem_attach: 240 1.1 eeh * 241 1.1 eeh * Attach a Gem interface to the system. 242 1.1 eeh */ 243 1.1 eeh void 244 1.81 dsl gem_attach(struct gem_softc *sc, const uint8_t *enaddr) 245 1.1 eeh { 246 1.1 eeh struct ifnet *ifp = &sc->sc_ethercom.ec_if; 247 1.1 eeh struct mii_data *mii = &sc->sc_mii; 248 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 249 1.68 jdc bus_space_handle_t h = sc->sc_h1; 250 1.122 msaitoh struct ifmedia_entry *ife; 251 1.89 jdc int i, error, phyaddr; 252 1.112 msaitoh uint32_t v; 253 1.40 bouyer char *nullbuf; 254 1.1 eeh 255 1.1 eeh /* Make sure the chip is stopped. */ 256 1.1 eeh ifp->if_softc = sc; 257 1.1 eeh gem_reset(sc); 258 1.1 eeh 259 1.1 eeh /* 260 1.1 eeh * Allocate the control data structures, and create and load the 261 1.40 bouyer * DMA map for it. gem_control_data is 9216 bytes, we have space for 262 1.40 bouyer * the padding buffer in the bus_dmamem_alloc()'d memory. 263 1.1 eeh */ 264 1.1 eeh if ((error = bus_dmamem_alloc(sc->sc_dmatag, 265 1.40 bouyer sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE, 266 1.40 bouyer 0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) { 267 1.85 dyoung aprint_error_dev(sc->sc_dev, 268 1.76 cegger "unable to allocate control data, error = %d\n", 269 1.76 cegger error); 270 1.85 dyoung gem_partial_detach(sc, GEM_ATT_0); 271 1.85 dyoung return; 272 1.1 eeh } 273 1.1 eeh 274 1.68 jdc /* XXX should map this in with correct endianness */ 275 1.1 eeh if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 276 1.53 christos sizeof(struct gem_control_data), (void **)&sc->sc_control_data, 277 1.1 eeh BUS_DMA_COHERENT)) != 0) { 278 1.85 dyoung aprint_error_dev(sc->sc_dev, 279 1.85 dyoung "unable to map control data, error = %d\n", error); 280 1.85 dyoung gem_partial_detach(sc, GEM_ATT_1); 281 1.85 dyoung return; 282 1.1 eeh } 283 1.1 eeh 284 1.40 bouyer nullbuf = 285 1.54 christos (char *)sc->sc_control_data + sizeof(struct gem_control_data); 286 1.40 bouyer 287 1.1 eeh if ((error = bus_dmamap_create(sc->sc_dmatag, 288 1.1 eeh sizeof(struct gem_control_data), 1, 289 1.1 eeh sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 290 1.85 dyoung aprint_error_dev(sc->sc_dev, 291 1.85 dyoung "unable to create control data DMA map, error = %d\n", 292 1.85 dyoung error); 293 1.85 dyoung gem_partial_detach(sc, GEM_ATT_2); 294 1.85 dyoung return; 295 1.1 eeh } 296 1.1 eeh 297 1.1 eeh if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 298 1.1 eeh sc->sc_control_data, sizeof(struct gem_control_data), NULL, 299 1.1 eeh 0)) != 0) { 300 1.85 dyoung aprint_error_dev(sc->sc_dev, 301 1.76 cegger "unable to load control data DMA map, error = %d\n", 302 1.76 cegger error); 303 1.85 dyoung gem_partial_detach(sc, GEM_ATT_3); 304 1.85 dyoung return; 305 1.1 eeh } 306 1.1 eeh 307 1.40 bouyer memset(nullbuf, 0, ETHER_MIN_TX); 308 1.40 bouyer if ((error = bus_dmamap_create(sc->sc_dmatag, 309 1.40 bouyer ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) { 310 1.85 dyoung aprint_error_dev(sc->sc_dev, 311 1.85 dyoung "unable to create padding DMA map, error = %d\n", error); 312 1.85 dyoung gem_partial_detach(sc, GEM_ATT_4); 313 1.85 dyoung return; 314 1.40 bouyer } 315 1.40 bouyer 316 1.40 bouyer if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap, 317 1.40 bouyer nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) { 318 1.85 dyoung aprint_error_dev(sc->sc_dev, 319 1.85 dyoung "unable to load padding DMA map, error = %d\n", error); 320 1.85 dyoung gem_partial_detach(sc, GEM_ATT_5); 321 1.85 dyoung return; 322 1.40 bouyer } 323 1.40 bouyer 324 1.40 bouyer bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX, 325 1.40 bouyer BUS_DMASYNC_PREWRITE); 326 1.40 bouyer 327 1.1 eeh /* 328 1.1 eeh * Initialize the transmit job descriptors. 329 1.1 eeh */ 330 1.1 eeh SIMPLEQ_INIT(&sc->sc_txfreeq); 331 1.1 eeh SIMPLEQ_INIT(&sc->sc_txdirtyq); 332 1.1 eeh 333 1.1 eeh /* 334 1.1 eeh * Create the transmit buffer DMA maps. 335 1.1 eeh */ 336 1.1 eeh for (i = 0; i < GEM_TXQUEUELEN; i++) { 337 1.1 eeh struct gem_txsoft *txs; 338 1.1 eeh 339 1.1 eeh txs = &sc->sc_txsoft[i]; 340 1.1 eeh txs->txs_mbuf = NULL; 341 1.15 matt if ((error = bus_dmamap_create(sc->sc_dmatag, 342 1.15 matt ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS, 343 1.15 matt ETHER_MAX_LEN_JUMBO, 0, 0, 344 1.1 eeh &txs->txs_dmamap)) != 0) { 345 1.85 dyoung aprint_error_dev(sc->sc_dev, 346 1.85 dyoung "unable to create tx DMA map %d, error = %d\n", 347 1.85 dyoung i, error); 348 1.85 dyoung gem_partial_detach(sc, GEM_ATT_6); 349 1.85 dyoung return; 350 1.1 eeh } 351 1.1 eeh SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 352 1.1 eeh } 353 1.1 eeh 354 1.1 eeh /* 355 1.1 eeh * Create the receive buffer DMA maps. 356 1.1 eeh */ 357 1.1 eeh for (i = 0; i < GEM_NRXDESC; i++) { 358 1.1 eeh if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 359 1.1 eeh MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 360 1.85 dyoung aprint_error_dev(sc->sc_dev, 361 1.85 dyoung "unable to create rx DMA map %d, error = %d\n", 362 1.85 dyoung i, error); 363 1.85 dyoung gem_partial_detach(sc, GEM_ATT_7); 364 1.85 dyoung return; 365 1.1 eeh } 366 1.1 eeh sc->sc_rxsoft[i].rxs_mbuf = NULL; 367 1.1 eeh } 368 1.1 eeh 369 1.68 jdc /* Initialize ifmedia structures and MII info */ 370 1.68 jdc mii->mii_ifp = ifp; 371 1.68 jdc mii->mii_readreg = gem_mii_readreg; 372 1.68 jdc mii->mii_writereg = gem_mii_writereg; 373 1.68 jdc mii->mii_statchg = gem_mii_statchg; 374 1.68 jdc 375 1.69 dyoung sc->sc_ethercom.ec_mii = mii; 376 1.68 jdc 377 1.68 jdc /* 378 1.68 jdc * Initialization based on `GEM Gigabit Ethernet ASIC Specification' 379 1.68 jdc * Section 3.2.1 `Initialization Sequence'. 380 1.68 jdc * However, we can't assume SERDES or Serialink if neither 381 1.68 jdc * GEM_MIF_CONFIG_MDI0 nor GEM_MIF_CONFIG_MDI1 are set 382 1.68 jdc * being set, as both are set on Sun X1141A (with SERDES). So, 383 1.68 jdc * we rely on our bus attachment setting GEM_SERDES or GEM_SERIAL. 384 1.89 jdc * Also, for variants that report 2 PHY's, we prefer the external 385 1.89 jdc * PHY over the internal PHY, so we look for that first. 386 1.68 jdc */ 387 1.68 jdc gem_mifinit(sc); 388 1.68 jdc 389 1.68 jdc if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) { 390 1.69 dyoung ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 391 1.69 dyoung ether_mediastatus); 392 1.89 jdc /* Look for external PHY */ 393 1.89 jdc if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 394 1.89 jdc sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; 395 1.89 jdc bus_space_write_4(t, h, GEM_MIF_CONFIG, 396 1.89 jdc sc->sc_mif_config); 397 1.89 jdc switch (sc->sc_variant) { 398 1.89 jdc case GEM_SUN_ERI: 399 1.89 jdc phyaddr = GEM_PHYAD_EXTERNAL; 400 1.89 jdc break; 401 1.89 jdc default: 402 1.89 jdc phyaddr = MII_PHY_ANY; 403 1.89 jdc break; 404 1.89 jdc } 405 1.89 jdc mii_attach(sc->sc_dev, mii, 0xffffffff, phyaddr, 406 1.89 jdc MII_OFFSET_ANY, MIIF_FORCEANEG); 407 1.89 jdc } 408 1.89 jdc #ifdef GEM_DEBUG 409 1.89 jdc else 410 1.89 jdc aprint_debug_dev(sc->sc_dev, "using external PHY\n"); 411 1.89 jdc #endif 412 1.89 jdc /* Look for internal PHY if no external PHY was found */ 413 1.117 msaitoh if (LIST_EMPTY(&mii->mii_phys) && 414 1.109 macallan ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI0) || 415 1.109 macallan (sc->sc_variant == GEM_APPLE_K2_GMAC))) { 416 1.89 jdc sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; 417 1.89 jdc bus_space_write_4(t, h, GEM_MIF_CONFIG, 418 1.89 jdc sc->sc_mif_config); 419 1.89 jdc switch (sc->sc_variant) { 420 1.89 jdc case GEM_SUN_ERI: 421 1.89 jdc case GEM_APPLE_K2_GMAC: 422 1.89 jdc phyaddr = GEM_PHYAD_INTERNAL; 423 1.89 jdc break; 424 1.89 jdc case GEM_APPLE_GMAC: 425 1.89 jdc phyaddr = GEM_PHYAD_EXTERNAL; 426 1.89 jdc break; 427 1.89 jdc default: 428 1.89 jdc phyaddr = MII_PHY_ANY; 429 1.89 jdc break; 430 1.89 jdc } 431 1.89 jdc mii_attach(sc->sc_dev, mii, 0xffffffff, phyaddr, 432 1.89 jdc MII_OFFSET_ANY, MIIF_FORCEANEG); 433 1.89 jdc #ifdef GEM_DEBUG 434 1.89 jdc if (!LIST_EMPTY(&mii->mii_phys)) 435 1.89 jdc aprint_debug_dev(sc->sc_dev, 436 1.89 jdc "using internal PHY\n"); 437 1.89 jdc #endif 438 1.89 jdc } 439 1.69 dyoung if (LIST_EMPTY(&mii->mii_phys)) { 440 1.68 jdc /* No PHY attached */ 441 1.85 dyoung aprint_error_dev(sc->sc_dev, 442 1.85 dyoung "PHY probe failed\n"); 443 1.85 dyoung gem_partial_detach(sc, GEM_ATT_MII); 444 1.85 dyoung return; 445 1.68 jdc } else { 446 1.69 dyoung struct mii_softc *child; 447 1.69 dyoung 448 1.68 jdc /* 449 1.68 jdc * Walk along the list of attached MII devices and 450 1.68 jdc * establish an `MII instance' to `PHY number' 451 1.68 jdc * mapping. 452 1.68 jdc */ 453 1.69 dyoung LIST_FOREACH(child, &mii->mii_phys, mii_list) { 454 1.68 jdc /* 455 1.68 jdc * Note: we support just one PHY: the internal 456 1.68 jdc * or external MII is already selected for us 457 1.68 jdc * by the GEM_MIF_CONFIG register. 458 1.68 jdc */ 459 1.68 jdc if (child->mii_phy > 1 || child->mii_inst > 0) { 460 1.85 dyoung aprint_error_dev(sc->sc_dev, 461 1.76 cegger "cannot accommodate MII device" 462 1.68 jdc " %s at PHY %d, instance %d\n", 463 1.77 xtraeme device_xname(child->mii_dev), 464 1.68 jdc child->mii_phy, child->mii_inst); 465 1.68 jdc continue; 466 1.68 jdc } 467 1.68 jdc sc->sc_phys[child->mii_inst] = child->mii_phy; 468 1.68 jdc } 469 1.68 jdc 470 1.68 jdc if (sc->sc_variant != GEM_SUN_ERI) 471 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 472 1.68 jdc GEM_MII_DATAPATH_MII); 473 1.68 jdc 474 1.68 jdc /* 475 1.68 jdc * XXX - we can really do the following ONLY if the 476 1.68 jdc * PHY indeed has the auto negotiation capability!! 477 1.68 jdc */ 478 1.117 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 479 1.68 jdc } 480 1.68 jdc } else { 481 1.69 dyoung ifmedia_init(&mii->mii_media, IFM_IMASK, gem_ser_mediachange, 482 1.69 dyoung gem_ser_mediastatus); 483 1.68 jdc /* SERDES or Serialink */ 484 1.68 jdc if (sc->sc_flags & GEM_SERDES) { 485 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 486 1.68 jdc GEM_MII_DATAPATH_SERDES); 487 1.68 jdc } else { 488 1.68 jdc sc->sc_flags |= GEM_SERIAL; 489 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 490 1.68 jdc GEM_MII_DATAPATH_SERIAL); 491 1.68 jdc } 492 1.68 jdc 493 1.85 dyoung aprint_normal_dev(sc->sc_dev, "using external PCS %s: ", 494 1.68 jdc sc->sc_flags & GEM_SERDES ? "SERDES" : "Serialink"); 495 1.68 jdc 496 1.117 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_AUTO, 0, NULL); 497 1.68 jdc /* Check for FDX and HDX capabilities */ 498 1.68 jdc sc->sc_mii_anar = bus_space_read_4(t, h, GEM_MII_ANAR); 499 1.68 jdc if (sc->sc_mii_anar & GEM_MII_ANEG_FUL_DUPLX) { 500 1.117 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | 501 1.117 msaitoh IFM_1000_SX | IFM_MANUAL | IFM_FDX, 0, NULL); 502 1.68 jdc aprint_normal("1000baseSX-FDX, "); 503 1.68 jdc } 504 1.68 jdc if (sc->sc_mii_anar & GEM_MII_ANEG_HLF_DUPLX) { 505 1.117 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | 506 1.117 msaitoh IFM_1000_SX | IFM_MANUAL | IFM_HDX, 0, NULL); 507 1.68 jdc aprint_normal("1000baseSX-HDX, "); 508 1.68 jdc } 509 1.117 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 510 1.68 jdc sc->sc_mii_media = IFM_AUTO; 511 1.68 jdc aprint_normal("auto\n"); 512 1.68 jdc 513 1.68 jdc gem_pcs_stop(sc, 1); 514 1.68 jdc } 515 1.68 jdc 516 1.1 eeh /* 517 1.1 eeh * From this point forward, the attachment cannot fail. A failure 518 1.1 eeh * before this point releases all resources that may have been 519 1.1 eeh * allocated. 520 1.1 eeh */ 521 1.1 eeh 522 1.1 eeh /* Announce ourselves. */ 523 1.85 dyoung aprint_normal_dev(sc->sc_dev, "Ethernet address %s", 524 1.6 thorpej ether_sprintf(enaddr)); 525 1.1 eeh 526 1.15 matt /* Get RX FIFO size */ 527 1.15 matt sc->sc_rxfifosize = 64 * 528 1.68 jdc bus_space_read_4(t, h, GEM_RX_FIFO_SIZE); 529 1.24 thorpej aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024); 530 1.15 matt 531 1.15 matt /* Get TX FIFO size */ 532 1.68 jdc v = bus_space_read_4(t, h, GEM_TX_FIFO_SIZE); 533 1.24 thorpej aprint_normal(", %uKB TX fifo\n", v / 16); 534 1.15 matt 535 1.1 eeh /* Initialize ifnet structure. */ 536 1.85 dyoung strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 537 1.1 eeh ifp->if_softc = sc; 538 1.114 msaitoh ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 539 1.41 christos sc->sc_if_flags = ifp->if_flags; 540 1.85 dyoung #if 0 541 1.73 jdc /* 542 1.73 jdc * The GEM hardware supports basic TCP checksum offloading only. 543 1.73 jdc * Several (all?) revisions (Sun rev. 01 and Apple rev. 00 and 80) 544 1.73 jdc * have bugs in the receive checksum, so don't enable it for now. 545 1.85 dyoung */ 546 1.73 jdc if ((GEM_IS_SUN(sc) && sc->sc_chiprev != 1) || 547 1.73 jdc (GEM_IS_APPLE(sc) && 548 1.73 jdc (sc->sc_chiprev != 0 && sc->sc_chiprev != 0x80))) 549 1.73 jdc ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx; 550 1.85 dyoung #endif 551 1.73 jdc ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx; 552 1.1 eeh ifp->if_start = gem_start; 553 1.1 eeh ifp->if_ioctl = gem_ioctl; 554 1.1 eeh ifp->if_watchdog = gem_watchdog; 555 1.1 eeh ifp->if_stop = gem_stop; 556 1.1 eeh ifp->if_init = gem_init; 557 1.1 eeh IFQ_SET_READY(&ifp->if_snd); 558 1.1 eeh 559 1.15 matt /* 560 1.15 matt * If we support GigE media, we support jumbo frames too. 561 1.15 matt * Unless we are Apple. 562 1.15 matt */ 563 1.122 msaitoh TAILQ_FOREACH(ife, &mii->mii_media.ifm_list, ifm_list) { 564 1.122 msaitoh if (IFM_SUBTYPE(ife->ifm_media) == IFM_1000_T || 565 1.122 msaitoh IFM_SUBTYPE(ife->ifm_media) == IFM_1000_SX || 566 1.122 msaitoh IFM_SUBTYPE(ife->ifm_media) == IFM_1000_LX || 567 1.122 msaitoh IFM_SUBTYPE(ife->ifm_media) == IFM_1000_CX) { 568 1.70 jdc if (!GEM_IS_APPLE(sc)) 569 1.15 matt sc->sc_ethercom.ec_capabilities 570 1.15 matt |= ETHERCAP_JUMBO_MTU; 571 1.15 matt sc->sc_flags |= GEM_GIGABIT; 572 1.15 matt break; 573 1.15 matt } 574 1.15 matt } 575 1.15 matt 576 1.1 eeh /* claim 802.1q capability */ 577 1.1 eeh sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 578 1.1 eeh 579 1.1 eeh /* Attach the interface. */ 580 1.1 eeh if_attach(ifp); 581 1.108 ozaki if_deferred_start_init(ifp, NULL); 582 1.6 thorpej ether_ifattach(ifp, enaddr); 583 1.79 dyoung ether_set_ifflags_cb(&sc->sc_ethercom, gem_ifflags_cb); 584 1.1 eeh 585 1.85 dyoung rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 586 1.102 tls RND_TYPE_NET, RND_FLAG_DEFAULT); 587 1.1 eeh 588 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 589 1.85 dyoung NULL, device_xname(sc->sc_dev), "interrupts"); 590 1.19 matt #ifdef GEM_COUNTERS 591 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR, 592 1.85 dyoung &sc->sc_ev_intr, device_xname(sc->sc_dev), "tx interrupts"); 593 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR, 594 1.85 dyoung &sc->sc_ev_intr, device_xname(sc->sc_dev), "rx interrupts"); 595 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR, 596 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx ring full"); 597 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR, 598 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx malloc failure"); 599 1.133 jdc evcnt_attach_dynamic(&sc->sc_ev_rxoverflow, EVCNT_TYPE_INTR, 600 1.133 jdc &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx overflow"); 601 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR, 602 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 0desc"); 603 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR, 604 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 1desc"); 605 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR, 606 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 2desc"); 607 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR, 608 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx 3desc"); 609 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR, 610 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >3desc"); 611 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR, 612 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >7desc"); 613 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR, 614 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >15desc"); 615 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR, 616 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >31desc"); 617 1.18 matt evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR, 618 1.85 dyoung &sc->sc_ev_rxint, device_xname(sc->sc_dev), "rx >63desc"); 619 1.19 matt #endif 620 1.1 eeh 621 1.85 dyoung callout_init(&sc->sc_tick_ch, 0); 622 1.128 thorpej callout_setfunc(&sc->sc_tick_ch, gem_tick, sc); 623 1.128 thorpej 624 1.99 jdc callout_init(&sc->sc_rx_watchdog, 0); 625 1.99 jdc callout_setfunc(&sc->sc_rx_watchdog, gem_rx_watchdog, sc); 626 1.1 eeh 627 1.85 dyoung sc->sc_att_stage = GEM_ATT_FINISHED; 628 1.1 eeh 629 1.1 eeh return; 630 1.1 eeh } 631 1.1 eeh 632 1.1 eeh void 633 1.81 dsl gem_tick(void *arg) 634 1.1 eeh { 635 1.1 eeh struct gem_softc *sc = arg; 636 1.1 eeh int s; 637 1.1 eeh 638 1.68 jdc if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) { 639 1.68 jdc /* 640 1.68 jdc * We have to reset everything if we failed to get a 641 1.68 jdc * PCS interrupt. Restarting the callout is handled 642 1.68 jdc * in gem_pcs_start(). 643 1.68 jdc */ 644 1.68 jdc gem_init(&sc->sc_ethercom.ec_if); 645 1.68 jdc } else { 646 1.68 jdc s = splnet(); 647 1.68 jdc mii_tick(&sc->sc_mii); 648 1.68 jdc splx(s); 649 1.128 thorpej callout_schedule(&sc->sc_tick_ch, hz); 650 1.68 jdc } 651 1.1 eeh } 652 1.1 eeh 653 1.41 christos static int 654 1.112 msaitoh gem_bitwait(struct gem_softc *sc, bus_space_handle_t h, int r, uint32_t clr, 655 1.112 msaitoh uint32_t set) 656 1.41 christos { 657 1.41 christos int i; 658 1.112 msaitoh uint32_t reg; 659 1.46 blymn 660 1.41 christos for (i = TRIES; i--; DELAY(100)) { 661 1.50 martin reg = bus_space_read_4(sc->sc_bustag, h, r); 662 1.50 martin if ((reg & clr) == 0 && (reg & set) == set) 663 1.41 christos return (1); 664 1.41 christos } 665 1.41 christos return (0); 666 1.41 christos } 667 1.41 christos 668 1.1 eeh void 669 1.81 dsl gem_reset(struct gem_softc *sc) 670 1.1 eeh { 671 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 672 1.50 martin bus_space_handle_t h = sc->sc_h2; 673 1.1 eeh int s; 674 1.1 eeh 675 1.1 eeh s = splnet(); 676 1.85 dyoung DPRINTF(sc, ("%s: gem_reset\n", device_xname(sc->sc_dev))); 677 1.1 eeh gem_reset_rx(sc); 678 1.1 eeh gem_reset_tx(sc); 679 1.1 eeh 680 1.1 eeh /* Do a full reset */ 681 1.117 msaitoh bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 682 1.50 martin if (!gem_bitwait(sc, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 683 1.85 dyoung aprint_error_dev(sc->sc_dev, "cannot reset device\n"); 684 1.1 eeh splx(s); 685 1.1 eeh } 686 1.1 eeh 687 1.1 eeh 688 1.1 eeh /* 689 1.1 eeh * gem_rxdrain: 690 1.1 eeh * 691 1.1 eeh * Drain the receive queue. 692 1.1 eeh */ 693 1.41 christos static void 694 1.1 eeh gem_rxdrain(struct gem_softc *sc) 695 1.1 eeh { 696 1.1 eeh struct gem_rxsoft *rxs; 697 1.1 eeh int i; 698 1.1 eeh 699 1.1 eeh for (i = 0; i < GEM_NRXDESC; i++) { 700 1.1 eeh rxs = &sc->sc_rxsoft[i]; 701 1.1 eeh if (rxs->rxs_mbuf != NULL) { 702 1.41 christos bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 703 1.41 christos rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 704 1.1 eeh bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 705 1.1 eeh m_freem(rxs->rxs_mbuf); 706 1.1 eeh rxs->rxs_mbuf = NULL; 707 1.1 eeh } 708 1.1 eeh } 709 1.1 eeh } 710 1.1 eeh 711 1.31 heas /* 712 1.1 eeh * Reset the whole thing. 713 1.1 eeh */ 714 1.41 christos static void 715 1.1 eeh gem_stop(struct ifnet *ifp, int disable) 716 1.1 eeh { 717 1.85 dyoung struct gem_softc *sc = ifp->if_softc; 718 1.1 eeh struct gem_txsoft *txs; 719 1.1 eeh 720 1.85 dyoung DPRINTF(sc, ("%s: gem_stop\n", device_xname(sc->sc_dev))); 721 1.1 eeh 722 1.95 martin callout_halt(&sc->sc_tick_ch, NULL); 723 1.101 jdc callout_halt(&sc->sc_rx_watchdog, NULL); 724 1.68 jdc if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) 725 1.68 jdc gem_pcs_stop(sc, disable); 726 1.68 jdc else 727 1.68 jdc mii_down(&sc->sc_mii); 728 1.1 eeh 729 1.1 eeh /* XXX - Should we reset these instead? */ 730 1.68 jdc gem_disable_tx(sc); 731 1.1 eeh gem_disable_rx(sc); 732 1.1 eeh 733 1.1 eeh /* 734 1.1 eeh * Release any queued transmit buffers. 735 1.1 eeh */ 736 1.1 eeh while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 737 1.21 lukem SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 738 1.1 eeh if (txs->txs_mbuf != NULL) { 739 1.41 christos bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0, 740 1.41 christos txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 741 1.1 eeh bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 742 1.1 eeh m_freem(txs->txs_mbuf); 743 1.1 eeh txs->txs_mbuf = NULL; 744 1.1 eeh } 745 1.1 eeh SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 746 1.1 eeh } 747 1.1 eeh 748 1.1 eeh /* 749 1.1 eeh * Mark the interface down and cancel the watchdog timer. 750 1.1 eeh */ 751 1.130 thorpej ifp->if_flags &= ~IFF_RUNNING; 752 1.41 christos sc->sc_if_flags = ifp->if_flags; 753 1.1 eeh ifp->if_timer = 0; 754 1.75 dyoung 755 1.75 dyoung if (disable) 756 1.75 dyoung gem_rxdrain(sc); 757 1.1 eeh } 758 1.1 eeh 759 1.1 eeh 760 1.1 eeh /* 761 1.1 eeh * Reset the receiver 762 1.1 eeh */ 763 1.1 eeh int 764 1.1 eeh gem_reset_rx(struct gem_softc *sc) 765 1.1 eeh { 766 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 767 1.50 martin bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 768 1.1 eeh 769 1.1 eeh /* 770 1.1 eeh * Resetting while DMA is in progress can cause a bus hang, so we 771 1.1 eeh * disable DMA first. 772 1.1 eeh */ 773 1.1 eeh gem_disable_rx(sc); 774 1.1 eeh bus_space_write_4(t, h, GEM_RX_CONFIG, 0); 775 1.68 jdc bus_space_barrier(t, h, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 776 1.1 eeh /* Wait till it finishes */ 777 1.50 martin if (!gem_bitwait(sc, h, GEM_RX_CONFIG, 1, 0)) 778 1.123 msaitoh aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n"); 779 1.99 jdc /* Wait 5ms extra. */ 780 1.99 jdc delay(5000); 781 1.1 eeh 782 1.1 eeh /* Finally, reset the ERX */ 783 1.50 martin bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_RX); 784 1.68 jdc bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 785 1.1 eeh /* Wait till it finishes */ 786 1.50 martin if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_RX, 0)) { 787 1.85 dyoung aprint_error_dev(sc->sc_dev, "cannot reset receiver\n"); 788 1.1 eeh return (1); 789 1.1 eeh } 790 1.1 eeh return (0); 791 1.1 eeh } 792 1.1 eeh 793 1.1 eeh 794 1.1 eeh /* 795 1.68 jdc * Reset the receiver DMA engine. 796 1.68 jdc * 797 1.68 jdc * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 798 1.68 jdc * etc in order to reset the receiver DMA engine only and not do a full 799 1.68 jdc * reset which amongst others also downs the link and clears the FIFOs. 800 1.68 jdc */ 801 1.68 jdc static void 802 1.68 jdc gem_reset_rxdma(struct gem_softc *sc) 803 1.68 jdc { 804 1.68 jdc struct ifnet *ifp = &sc->sc_ethercom.ec_if; 805 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 806 1.68 jdc bus_space_handle_t h = sc->sc_h1; 807 1.68 jdc int i; 808 1.68 jdc 809 1.68 jdc if (gem_reset_rx(sc) != 0) { 810 1.68 jdc gem_init(ifp); 811 1.68 jdc return; 812 1.68 jdc } 813 1.68 jdc for (i = 0; i < GEM_NRXDESC; i++) 814 1.68 jdc if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 815 1.68 jdc GEM_UPDATE_RXDESC(sc, i); 816 1.68 jdc sc->sc_rxptr = 0; 817 1.68 jdc GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 818 1.68 jdc GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 819 1.68 jdc 820 1.68 jdc /* Reprogram Descriptor Ring Base Addresses */ 821 1.129 thorpej bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 822 1.129 thorpej ((uint64_t)GEM_CDRXADDR(sc, 0)) >> 32); 823 1.68 jdc bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 824 1.68 jdc 825 1.68 jdc /* Redo ERX Configuration */ 826 1.68 jdc gem_rx_common(sc); 827 1.68 jdc 828 1.120 msaitoh /* Give the receiver a swift kick */ 829 1.68 jdc bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC - 4); 830 1.68 jdc } 831 1.68 jdc 832 1.68 jdc /* 833 1.68 jdc * Common RX configuration for gem_init() and gem_reset_rxdma(). 834 1.68 jdc */ 835 1.68 jdc static void 836 1.68 jdc gem_rx_common(struct gem_softc *sc) 837 1.68 jdc { 838 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 839 1.68 jdc bus_space_handle_t h = sc->sc_h1; 840 1.112 msaitoh uint32_t v; 841 1.68 jdc 842 1.68 jdc /* Encode Receive Descriptor ring size: four possible values */ 843 1.68 jdc v = gem_ringsize(GEM_NRXDESC /*XXX*/); 844 1.68 jdc 845 1.68 jdc /* Set receive h/w checksum offset */ 846 1.68 jdc #ifdef INET 847 1.68 jdc v |= (ETHER_HDR_LEN + sizeof(struct ip) + 848 1.68 jdc ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 849 1.68 jdc ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT; 850 1.68 jdc #endif 851 1.68 jdc 852 1.68 jdc /* Enable RX DMA */ 853 1.68 jdc bus_space_write_4(t, h, GEM_RX_CONFIG, 854 1.68 jdc v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 855 1.68 jdc (2 << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); 856 1.68 jdc 857 1.68 jdc /* 858 1.68 jdc * The following value is for an OFF Threshold of about 3/4 full 859 1.68 jdc * and an ON Threshold of 1/4 full. 860 1.68 jdc */ 861 1.68 jdc bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH, 862 1.68 jdc (3 * sc->sc_rxfifosize / 256) | 863 1.68 jdc ((sc->sc_rxfifosize / 256) << 12)); 864 1.68 jdc bus_space_write_4(t, h, GEM_RX_BLANKING, 865 1.99 jdc (6 << GEM_RX_BLANKING_TIME_SHIFT) | 8); 866 1.68 jdc } 867 1.68 jdc 868 1.68 jdc /* 869 1.1 eeh * Reset the transmitter 870 1.1 eeh */ 871 1.1 eeh int 872 1.1 eeh gem_reset_tx(struct gem_softc *sc) 873 1.1 eeh { 874 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 875 1.50 martin bus_space_handle_t h = sc->sc_h1, h2 = sc->sc_h2; 876 1.1 eeh 877 1.1 eeh /* 878 1.1 eeh * Resetting while DMA is in progress can cause a bus hang, so we 879 1.1 eeh * disable DMA first. 880 1.1 eeh */ 881 1.1 eeh gem_disable_tx(sc); 882 1.1 eeh bus_space_write_4(t, h, GEM_TX_CONFIG, 0); 883 1.68 jdc bus_space_barrier(t, h, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 884 1.1 eeh /* Wait till it finishes */ 885 1.50 martin if (!gem_bitwait(sc, h, GEM_TX_CONFIG, 1, 0)) 886 1.131 msaitoh aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n"); 887 1.1 eeh /* Wait 5ms extra. */ 888 1.1 eeh delay(5000); 889 1.1 eeh 890 1.1 eeh /* Finally, reset the ETX */ 891 1.50 martin bus_space_write_4(t, h2, GEM_RESET, GEM_RESET_TX); 892 1.68 jdc bus_space_barrier(t, h, GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE); 893 1.1 eeh /* Wait till it finishes */ 894 1.50 martin if (!gem_bitwait(sc, h2, GEM_RESET, GEM_RESET_TX, 0)) { 895 1.131 msaitoh aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n"); 896 1.1 eeh return (1); 897 1.1 eeh } 898 1.1 eeh return (0); 899 1.1 eeh } 900 1.1 eeh 901 1.1 eeh /* 902 1.1 eeh * disable receiver. 903 1.1 eeh */ 904 1.1 eeh int 905 1.1 eeh gem_disable_rx(struct gem_softc *sc) 906 1.1 eeh { 907 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 908 1.50 martin bus_space_handle_t h = sc->sc_h1; 909 1.112 msaitoh uint32_t cfg; 910 1.1 eeh 911 1.1 eeh /* Flip the enable bit */ 912 1.1 eeh cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 913 1.1 eeh cfg &= ~GEM_MAC_RX_ENABLE; 914 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); 915 1.68 jdc bus_space_barrier(t, h, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 916 1.1 eeh /* Wait for it to finish */ 917 1.50 martin return (gem_bitwait(sc, h, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); 918 1.1 eeh } 919 1.1 eeh 920 1.1 eeh /* 921 1.1 eeh * disable transmitter. 922 1.1 eeh */ 923 1.1 eeh int 924 1.1 eeh gem_disable_tx(struct gem_softc *sc) 925 1.1 eeh { 926 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 927 1.50 martin bus_space_handle_t h = sc->sc_h1; 928 1.112 msaitoh uint32_t cfg; 929 1.1 eeh 930 1.1 eeh /* Flip the enable bit */ 931 1.1 eeh cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); 932 1.1 eeh cfg &= ~GEM_MAC_TX_ENABLE; 933 1.1 eeh bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); 934 1.68 jdc bus_space_barrier(t, h, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); 935 1.1 eeh /* Wait for it to finish */ 936 1.50 martin return (gem_bitwait(sc, h, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); 937 1.1 eeh } 938 1.1 eeh 939 1.1 eeh /* 940 1.1 eeh * Initialize interface. 941 1.1 eeh */ 942 1.1 eeh int 943 1.1 eeh gem_meminit(struct gem_softc *sc) 944 1.1 eeh { 945 1.1 eeh struct gem_rxsoft *rxs; 946 1.1 eeh int i, error; 947 1.1 eeh 948 1.1 eeh /* 949 1.1 eeh * Initialize the transmit descriptor ring. 950 1.1 eeh */ 951 1.85 dyoung memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 952 1.1 eeh for (i = 0; i < GEM_NTXDESC; i++) { 953 1.1 eeh sc->sc_txdescs[i].gd_flags = 0; 954 1.1 eeh sc->sc_txdescs[i].gd_addr = 0; 955 1.1 eeh } 956 1.1 eeh GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, 957 1.117 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 958 1.14 matt sc->sc_txfree = GEM_NTXDESC-1; 959 1.1 eeh sc->sc_txnext = 0; 960 1.14 matt sc->sc_txwin = 0; 961 1.1 eeh 962 1.1 eeh /* 963 1.1 eeh * Initialize the receive descriptor and receive job 964 1.1 eeh * descriptor rings. 965 1.1 eeh */ 966 1.1 eeh for (i = 0; i < GEM_NRXDESC; i++) { 967 1.1 eeh rxs = &sc->sc_rxsoft[i]; 968 1.1 eeh if (rxs->rxs_mbuf == NULL) { 969 1.1 eeh if ((error = gem_add_rxbuf(sc, i)) != 0) { 970 1.85 dyoung aprint_error_dev(sc->sc_dev, 971 1.85 dyoung "unable to allocate or map rx " 972 1.1 eeh "buffer %d, error = %d\n", 973 1.76 cegger i, error); 974 1.1 eeh /* 975 1.1 eeh * XXX Should attempt to run with fewer receive 976 1.1 eeh * XXX buffers instead of just failing. 977 1.1 eeh */ 978 1.1 eeh gem_rxdrain(sc); 979 1.1 eeh return (1); 980 1.1 eeh } 981 1.1 eeh } else 982 1.1 eeh GEM_INIT_RXDESC(sc, i); 983 1.1 eeh } 984 1.1 eeh sc->sc_rxptr = 0; 985 1.68 jdc sc->sc_meminited = 1; 986 1.68 jdc GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); 987 1.68 jdc GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); 988 1.1 eeh 989 1.1 eeh return (0); 990 1.1 eeh } 991 1.1 eeh 992 1.1 eeh static int 993 1.1 eeh gem_ringsize(int sz) 994 1.1 eeh { 995 1.1 eeh switch (sz) { 996 1.1 eeh case 32: 997 1.29 christos return GEM_RING_SZ_32; 998 1.1 eeh case 64: 999 1.29 christos return GEM_RING_SZ_64; 1000 1.1 eeh case 128: 1001 1.29 christos return GEM_RING_SZ_128; 1002 1.1 eeh case 256: 1003 1.29 christos return GEM_RING_SZ_256; 1004 1.1 eeh case 512: 1005 1.29 christos return GEM_RING_SZ_512; 1006 1.1 eeh case 1024: 1007 1.29 christos return GEM_RING_SZ_1024; 1008 1.1 eeh case 2048: 1009 1.29 christos return GEM_RING_SZ_2048; 1010 1.1 eeh case 4096: 1011 1.29 christos return GEM_RING_SZ_4096; 1012 1.1 eeh case 8192: 1013 1.29 christos return GEM_RING_SZ_8192; 1014 1.1 eeh default: 1015 1.29 christos printf("gem: invalid Receive Descriptor ring size %d\n", sz); 1016 1.29 christos return GEM_RING_SZ_32; 1017 1.1 eeh } 1018 1.1 eeh } 1019 1.1 eeh 1020 1.68 jdc 1021 1.68 jdc /* 1022 1.68 jdc * Start PCS 1023 1.68 jdc */ 1024 1.68 jdc void 1025 1.68 jdc gem_pcs_start(struct gem_softc *sc) 1026 1.68 jdc { 1027 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 1028 1.68 jdc bus_space_handle_t h = sc->sc_h1; 1029 1.68 jdc uint32_t v; 1030 1.68 jdc 1031 1.68 jdc #ifdef GEM_DEBUG 1032 1.85 dyoung aprint_debug_dev(sc->sc_dev, "gem_pcs_start()\n"); 1033 1.68 jdc #endif 1034 1.68 jdc 1035 1.68 jdc /* 1036 1.68 jdc * Set up. We must disable the MII before modifying the 1037 1.68 jdc * GEM_MII_ANAR register 1038 1.68 jdc */ 1039 1.68 jdc if (sc->sc_flags & GEM_SERDES) { 1040 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 1041 1.68 jdc GEM_MII_DATAPATH_SERDES); 1042 1.68 jdc bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 1043 1.68 jdc GEM_MII_SLINK_LOOPBACK); 1044 1.68 jdc } else { 1045 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 1046 1.68 jdc GEM_MII_DATAPATH_SERIAL); 1047 1.68 jdc bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 0); 1048 1.68 jdc } 1049 1.68 jdc bus_space_write_4(t, h, GEM_MII_CONFIG, 0); 1050 1.68 jdc v = bus_space_read_4(t, h, GEM_MII_ANAR); 1051 1.68 jdc v |= (GEM_MII_ANEG_SYM_PAUSE | GEM_MII_ANEG_ASYM_PAUSE); 1052 1.115 msaitoh if (IFM_SUBTYPE(sc->sc_mii_media) == IFM_AUTO) 1053 1.68 jdc v |= (GEM_MII_ANEG_FUL_DUPLX | GEM_MII_ANEG_HLF_DUPLX); 1054 1.115 msaitoh else if ((IFM_OPTIONS(sc->sc_mii_media) & IFM_FDX) != 0) { 1055 1.68 jdc v |= GEM_MII_ANEG_FUL_DUPLX; 1056 1.68 jdc v &= ~GEM_MII_ANEG_HLF_DUPLX; 1057 1.115 msaitoh } else if ((IFM_OPTIONS(sc->sc_mii_media) & IFM_HDX) != 0) { 1058 1.68 jdc v &= ~GEM_MII_ANEG_FUL_DUPLX; 1059 1.68 jdc v |= GEM_MII_ANEG_HLF_DUPLX; 1060 1.68 jdc } 1061 1.68 jdc 1062 1.68 jdc /* Configure link. */ 1063 1.68 jdc bus_space_write_4(t, h, GEM_MII_ANAR, v); 1064 1.68 jdc bus_space_write_4(t, h, GEM_MII_CONTROL, 1065 1.68 jdc GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN); 1066 1.68 jdc bus_space_write_4(t, h, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 1067 1.68 jdc gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_ANEG_CPT); 1068 1.68 jdc 1069 1.68 jdc /* Start the 10 second timer */ 1070 1.128 thorpej callout_schedule(&sc->sc_tick_ch, hz * 10); 1071 1.68 jdc } 1072 1.68 jdc 1073 1.68 jdc /* 1074 1.68 jdc * Stop PCS 1075 1.68 jdc */ 1076 1.68 jdc void 1077 1.68 jdc gem_pcs_stop(struct gem_softc *sc, int disable) 1078 1.68 jdc { 1079 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 1080 1.68 jdc bus_space_handle_t h = sc->sc_h1; 1081 1.68 jdc 1082 1.68 jdc #ifdef GEM_DEBUG 1083 1.85 dyoung aprint_debug_dev(sc->sc_dev, "gem_pcs_stop()\n"); 1084 1.68 jdc #endif 1085 1.68 jdc 1086 1.68 jdc /* Tell link partner that we're going away */ 1087 1.68 jdc bus_space_write_4(t, h, GEM_MII_ANAR, GEM_MII_ANEG_RF); 1088 1.68 jdc 1089 1.68 jdc /* 1090 1.68 jdc * Disable PCS MII. The documentation suggests that setting 1091 1.68 jdc * GEM_MII_CONFIG_ENABLE to zero and then restarting auto- 1092 1.68 jdc * negotiation will shut down the link. However, it appears 1093 1.68 jdc * that we also need to unset the datapath mode. 1094 1.68 jdc */ 1095 1.68 jdc bus_space_write_4(t, h, GEM_MII_CONFIG, 0); 1096 1.68 jdc bus_space_write_4(t, h, GEM_MII_CONTROL, 1097 1.68 jdc GEM_MII_CONTROL_AUTONEG | GEM_MII_CONTROL_RAN); 1098 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII); 1099 1.68 jdc bus_space_write_4(t, h, GEM_MII_CONFIG, 0); 1100 1.68 jdc 1101 1.68 jdc if (disable) { 1102 1.68 jdc if (sc->sc_flags & GEM_SERDES) 1103 1.68 jdc bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 1104 1.68 jdc GEM_MII_SLINK_POWER_OFF); 1105 1.68 jdc else 1106 1.68 jdc bus_space_write_4(t, h, GEM_MII_SLINK_CONTROL, 1107 1.68 jdc GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_POWER_OFF); 1108 1.68 jdc } 1109 1.68 jdc 1110 1.68 jdc sc->sc_flags &= ~GEM_LINK; 1111 1.68 jdc sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE; 1112 1.68 jdc sc->sc_mii.mii_media_status = IFM_AVALID; 1113 1.68 jdc } 1114 1.68 jdc 1115 1.68 jdc 1116 1.1 eeh /* 1117 1.1 eeh * Initialization of interface; set up initialization block 1118 1.1 eeh * and transmit/receive descriptor rings. 1119 1.1 eeh */ 1120 1.1 eeh int 1121 1.1 eeh gem_init(struct ifnet *ifp) 1122 1.1 eeh { 1123 1.85 dyoung struct gem_softc *sc = ifp->if_softc; 1124 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 1125 1.50 martin bus_space_handle_t h = sc->sc_h1; 1126 1.69 dyoung int rc = 0, s; 1127 1.15 matt u_int max_frame_size; 1128 1.112 msaitoh uint32_t v; 1129 1.1 eeh 1130 1.1 eeh s = splnet(); 1131 1.1 eeh 1132 1.85 dyoung DPRINTF(sc, ("%s: gem_init: calling stop\n", device_xname(sc->sc_dev))); 1133 1.1 eeh /* 1134 1.1 eeh * Initialization sequence. The numbered steps below correspond 1135 1.1 eeh * to the sequence outlined in section 6.3.5.1 in the Ethernet 1136 1.1 eeh * Channel Engine manual (part of the PCIO manual). 1137 1.1 eeh * See also the STP2002-STQ document from Sun Microsystems. 1138 1.1 eeh */ 1139 1.1 eeh 1140 1.1 eeh /* step 1 & 2. Reset the Ethernet Channel */ 1141 1.1 eeh gem_stop(ifp, 0); 1142 1.1 eeh gem_reset(sc); 1143 1.85 dyoung DPRINTF(sc, ("%s: gem_init: restarting\n", device_xname(sc->sc_dev))); 1144 1.1 eeh 1145 1.1 eeh /* Re-initialize the MIF */ 1146 1.1 eeh gem_mifinit(sc); 1147 1.1 eeh 1148 1.68 jdc /* Set up correct datapath for non-SERDES/Serialink */ 1149 1.68 jdc if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 && 1150 1.68 jdc sc->sc_variant != GEM_SUN_ERI) 1151 1.68 jdc bus_space_write_4(t, h, GEM_MII_DATAPATH_MODE, 1152 1.68 jdc GEM_MII_DATAPATH_MII); 1153 1.68 jdc 1154 1.1 eeh /* Call MI reset function if any */ 1155 1.1 eeh if (sc->sc_hwreset) 1156 1.1 eeh (*sc->sc_hwreset)(sc); 1157 1.1 eeh 1158 1.1 eeh /* step 3. Setup data structures in host memory */ 1159 1.103 dholland if (gem_meminit(sc) != 0) { 1160 1.103 dholland splx(s); 1161 1.68 jdc return 1; 1162 1.103 dholland } 1163 1.1 eeh 1164 1.1 eeh /* step 4. TX MAC registers & counters */ 1165 1.1 eeh gem_init_regs(sc); 1166 1.111 riastrad max_frame_size = uimax(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU); 1167 1.15 matt max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN; 1168 1.15 matt if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) 1169 1.15 matt max_frame_size += ETHER_VLAN_ENCAP_LEN; 1170 1.1 eeh bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1171 1.15 matt max_frame_size|/* burst size */(0x2000<<16)); 1172 1.1 eeh 1173 1.1 eeh /* step 5. RX MAC registers & counters */ 1174 1.1 eeh gem_setladrf(sc); 1175 1.1 eeh 1176 1.1 eeh /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1177 1.129 thorpej bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 1178 1.129 thorpej ((uint64_t)GEM_CDTXADDR(sc, 0)) >> 32); 1179 1.4 thorpej bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 1180 1.4 thorpej 1181 1.129 thorpej bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 1182 1.129 thorpej ((uint64_t)GEM_CDRXADDR(sc, 0)) >> 32); 1183 1.4 thorpej bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 1184 1.1 eeh 1185 1.1 eeh /* step 8. Global Configuration & Interrupt Mask */ 1186 1.85 dyoung gem_inten(sc); 1187 1.16 matt bus_space_write_4(t, h, GEM_MAC_RX_MASK, 1188 1.68 jdc GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 1189 1.68 jdc bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXX */ 1190 1.68 jdc bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 1191 1.68 jdc GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 1192 1.5 thorpej 1193 1.1 eeh /* step 9. ETX Configuration: use mostly default values */ 1194 1.1 eeh 1195 1.68 jdc /* Enable TX DMA */ 1196 1.1 eeh v = gem_ringsize(GEM_NTXDESC /*XXX*/); 1197 1.31 heas bus_space_write_4(t, h, GEM_TX_CONFIG, 1198 1.87 jdc v | GEM_TX_CONFIG_TXDMA_EN | 1199 1.87 jdc (((sc->sc_flags & GEM_GIGABIT ? 0x4FF : 0x100) << 10) & 1200 1.87 jdc GEM_TX_CONFIG_TXFIFO_TH)); 1201 1.1 eeh bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext); 1202 1.1 eeh 1203 1.1 eeh /* step 10. ERX Configuration */ 1204 1.68 jdc gem_rx_common(sc); 1205 1.1 eeh 1206 1.1 eeh /* step 11. Configure Media */ 1207 1.69 dyoung if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0 && 1208 1.69 dyoung (rc = mii_ifmedia_change(&sc->sc_mii)) != 0) 1209 1.69 dyoung goto out; 1210 1.1 eeh 1211 1.1 eeh /* step 12. RX_MAC Configuration Register */ 1212 1.1 eeh v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 1213 1.35 heas v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC; 1214 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 1215 1.1 eeh 1216 1.1 eeh /* step 14. Issue Transmit Pending command */ 1217 1.1 eeh 1218 1.1 eeh /* Call MI initialization function if any */ 1219 1.1 eeh if (sc->sc_hwinit) 1220 1.1 eeh (*sc->sc_hwinit)(sc); 1221 1.1 eeh 1222 1.120 msaitoh /* step 15. Give the receiver a swift kick */ 1223 1.1 eeh bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); 1224 1.1 eeh 1225 1.68 jdc if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) 1226 1.68 jdc /* Configure PCS */ 1227 1.68 jdc gem_pcs_start(sc); 1228 1.68 jdc else 1229 1.68 jdc /* Start the one second timer. */ 1230 1.128 thorpej callout_schedule(&sc->sc_tick_ch, hz); 1231 1.1 eeh 1232 1.68 jdc sc->sc_flags &= ~GEM_LINK; 1233 1.1 eeh ifp->if_flags |= IFF_RUNNING; 1234 1.1 eeh ifp->if_timer = 0; 1235 1.41 christos sc->sc_if_flags = ifp->if_flags; 1236 1.69 dyoung out: 1237 1.1 eeh splx(s); 1238 1.1 eeh 1239 1.1 eeh return (0); 1240 1.1 eeh } 1241 1.1 eeh 1242 1.1 eeh void 1243 1.1 eeh gem_init_regs(struct gem_softc *sc) 1244 1.1 eeh { 1245 1.1 eeh struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1246 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 1247 1.50 martin bus_space_handle_t h = sc->sc_h1; 1248 1.58 dyoung const u_char *laddr = CLLADDR(ifp->if_sadl); 1249 1.112 msaitoh uint32_t v; 1250 1.1 eeh 1251 1.1 eeh /* These regs are not cleared on reset */ 1252 1.1 eeh if (!sc->sc_inited) { 1253 1.1 eeh 1254 1.68 jdc /* Load recommended values */ 1255 1.68 jdc bus_space_write_4(t, h, GEM_MAC_IPG0, 0x00); 1256 1.68 jdc bus_space_write_4(t, h, GEM_MAC_IPG1, 0x08); 1257 1.68 jdc bus_space_write_4(t, h, GEM_MAC_IPG2, 0x04); 1258 1.1 eeh 1259 1.1 eeh bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1260 1.1 eeh /* Max frame and max burst size */ 1261 1.1 eeh bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, 1262 1.68 jdc ETHER_MAX_LEN | (0x2000<<16)); 1263 1.15 matt 1264 1.68 jdc bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x07); 1265 1.68 jdc bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x04); 1266 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1267 1.1 eeh bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); 1268 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, 1269 1.15 matt ((laddr[5]<<8)|laddr[4])&0x3ff); 1270 1.13 matt 1271 1.1 eeh /* Secondary MAC addr set to 0:0:0:0:0:0 */ 1272 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); 1273 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); 1274 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); 1275 1.13 matt 1276 1.13 matt /* MAC control addr set to 01:80:c2:00:00:01 */ 1277 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); 1278 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); 1279 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); 1280 1.1 eeh 1281 1.1 eeh /* MAC filter addr set to 0:0:0:0:0:0 */ 1282 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); 1283 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); 1284 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); 1285 1.1 eeh 1286 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); 1287 1.1 eeh bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); 1288 1.1 eeh 1289 1.1 eeh sc->sc_inited = 1; 1290 1.1 eeh } 1291 1.1 eeh 1292 1.1 eeh /* Counters need to be zeroed */ 1293 1.1 eeh bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); 1294 1.1 eeh bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); 1295 1.1 eeh bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); 1296 1.1 eeh bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); 1297 1.1 eeh bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); 1298 1.1 eeh bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); 1299 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); 1300 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1301 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1302 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1303 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1304 1.1 eeh 1305 1.68 jdc /* Set XOFF PAUSE time. */ 1306 1.1 eeh bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 1307 1.68 jdc 1308 1.68 jdc /* 1309 1.68 jdc * Set the internal arbitration to "infinite" bursts of the 1310 1.68 jdc * maximum length of 31 * 64 bytes so DMA transfers aren't 1311 1.68 jdc * split up in cache line size chunks. This greatly improves 1312 1.68 jdc * especially RX performance. 1313 1.68 jdc * Enable silicon bug workarounds for the Apple variants. 1314 1.68 jdc */ 1315 1.68 jdc bus_space_write_4(t, h, GEM_CONFIG, 1316 1.68 jdc GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 1317 1.78 jdc ((sc->sc_flags & GEM_PCI) ? 1318 1.78 jdc GEM_CONFIG_BURST_INF : GEM_CONFIG_BURST_64) | (GEM_IS_APPLE(sc) ? 1319 1.68 jdc GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 1320 1.1 eeh 1321 1.1 eeh /* 1322 1.1 eeh * Set the station address. 1323 1.1 eeh */ 1324 1.13 matt bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); 1325 1.13 matt bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); 1326 1.13 matt bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); 1327 1.1 eeh 1328 1.15 matt /* 1329 1.15 matt * Enable MII outputs. Enable GMII if there is a gigabit PHY. 1330 1.15 matt */ 1331 1.70 jdc sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); 1332 1.15 matt v = GEM_MAC_XIF_TX_MII_ENA; 1333 1.118 msaitoh if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) { 1334 1.70 jdc if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { 1335 1.70 jdc v |= GEM_MAC_XIF_FDPLX_LED; 1336 1.70 jdc if (sc->sc_flags & GEM_GIGABIT) 1337 1.70 jdc v |= GEM_MAC_XIF_GMII_MODE; 1338 1.70 jdc } 1339 1.70 jdc } else { 1340 1.68 jdc v |= GEM_MAC_XIF_GMII_MODE; 1341 1.70 jdc } 1342 1.15 matt bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); 1343 1.1 eeh } 1344 1.1 eeh 1345 1.67 dyoung #ifdef GEM_DEBUG 1346 1.67 dyoung static void 1347 1.67 dyoung gem_txsoft_print(const struct gem_softc *sc, int firstdesc, int lastdesc) 1348 1.67 dyoung { 1349 1.67 dyoung int i; 1350 1.67 dyoung 1351 1.67 dyoung for (i = firstdesc;; i = GEM_NEXTTX(i)) { 1352 1.67 dyoung printf("descriptor %d:\t", i); 1353 1.67 dyoung printf("gd_flags: 0x%016" PRIx64 "\t", 1354 1.67 dyoung GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); 1355 1.67 dyoung printf("gd_addr: 0x%016" PRIx64 "\n", 1356 1.67 dyoung GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); 1357 1.67 dyoung if (i == lastdesc) 1358 1.67 dyoung break; 1359 1.67 dyoung } 1360 1.67 dyoung } 1361 1.67 dyoung #endif 1362 1.67 dyoung 1363 1.41 christos static void 1364 1.81 dsl gem_start(struct ifnet *ifp) 1365 1.1 eeh { 1366 1.85 dyoung struct gem_softc *sc = ifp->if_softc; 1367 1.1 eeh struct mbuf *m0, *m; 1368 1.64 dyoung struct gem_txsoft *txs; 1369 1.1 eeh bus_dmamap_t dmamap; 1370 1.49 martin int error, firsttx, nexttx = -1, lasttx = -1, ofree, seg; 1371 1.105 jdc #ifdef GEM_DEBUG 1372 1.105 jdc int otxnext; 1373 1.105 jdc #endif 1374 1.40 bouyer uint64_t flags = 0; 1375 1.1 eeh 1376 1.130 thorpej if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING) 1377 1.1 eeh return; 1378 1.1 eeh 1379 1.1 eeh /* 1380 1.1 eeh * Remember the previous number of free descriptors and 1381 1.1 eeh * the first descriptor we'll use. 1382 1.1 eeh */ 1383 1.1 eeh ofree = sc->sc_txfree; 1384 1.105 jdc #ifdef GEM_DEBUG 1385 1.105 jdc otxnext = sc->sc_txnext; 1386 1.105 jdc #endif 1387 1.1 eeh 1388 1.1 eeh DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", 1389 1.105 jdc device_xname(sc->sc_dev), ofree, otxnext)); 1390 1.1 eeh 1391 1.1 eeh /* 1392 1.1 eeh * Loop through the send queue, setting up transmit descriptors 1393 1.1 eeh * until we drain the queue, or use up all available transmit 1394 1.1 eeh * descriptors. 1395 1.1 eeh */ 1396 1.126 christos #ifdef INET 1397 1.124 msaitoh next: 1398 1.126 christos #endif 1399 1.11 thorpej while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL && 1400 1.68 jdc sc->sc_txfree != 0) { 1401 1.1 eeh /* 1402 1.1 eeh * Grab a packet off the queue. 1403 1.1 eeh */ 1404 1.1 eeh IFQ_POLL(&ifp->if_snd, m0); 1405 1.1 eeh if (m0 == NULL) 1406 1.1 eeh break; 1407 1.1 eeh m = NULL; 1408 1.1 eeh 1409 1.1 eeh dmamap = txs->txs_dmamap; 1410 1.1 eeh 1411 1.1 eeh /* 1412 1.1 eeh * Load the DMA map. If this fails, the packet either 1413 1.136 andvar * didn't fit in the allotted number of segments, or we were 1414 1.1 eeh * short on resources. In this case, we'll copy and try 1415 1.1 eeh * again. 1416 1.1 eeh */ 1417 1.1 eeh if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0, 1418 1.117 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT) != 0 || 1419 1.40 bouyer (m0->m_pkthdr.len < ETHER_MIN_TX && 1420 1.40 bouyer dmamap->dm_nsegs == GEM_NTXSEGS)) { 1421 1.15 matt if (m0->m_pkthdr.len > MCLBYTES) { 1422 1.85 dyoung aprint_error_dev(sc->sc_dev, 1423 1.85 dyoung "unable to allocate jumbo Tx cluster\n"); 1424 1.15 matt IFQ_DEQUEUE(&ifp->if_snd, m0); 1425 1.15 matt m_freem(m0); 1426 1.15 matt continue; 1427 1.15 matt } 1428 1.1 eeh MGETHDR(m, M_DONTWAIT, MT_DATA); 1429 1.1 eeh if (m == NULL) { 1430 1.85 dyoung aprint_error_dev(sc->sc_dev, 1431 1.85 dyoung "unable to allocate Tx mbuf\n"); 1432 1.1 eeh break; 1433 1.1 eeh } 1434 1.26 matt MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1435 1.1 eeh if (m0->m_pkthdr.len > MHLEN) { 1436 1.1 eeh MCLGET(m, M_DONTWAIT); 1437 1.1 eeh if ((m->m_flags & M_EXT) == 0) { 1438 1.85 dyoung aprint_error_dev(sc->sc_dev, 1439 1.85 dyoung "unable to allocate Tx cluster\n"); 1440 1.1 eeh m_freem(m); 1441 1.1 eeh break; 1442 1.1 eeh } 1443 1.1 eeh } 1444 1.53 christos m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1445 1.1 eeh m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1446 1.1 eeh error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, 1447 1.117 msaitoh m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1448 1.1 eeh if (error) { 1449 1.85 dyoung aprint_error_dev(sc->sc_dev, 1450 1.85 dyoung "unable to load Tx buffer, error = %d\n", 1451 1.85 dyoung error); 1452 1.1 eeh break; 1453 1.1 eeh } 1454 1.1 eeh } 1455 1.1 eeh 1456 1.1 eeh /* 1457 1.1 eeh * Ensure we have enough descriptors free to describe 1458 1.11 thorpej * the packet. 1459 1.1 eeh */ 1460 1.40 bouyer if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ? 1461 1.40 bouyer (sc->sc_txfree - 1) : sc->sc_txfree)) { 1462 1.1 eeh /* 1463 1.1 eeh * Not enough free descriptors to transmit this 1464 1.130 thorpej * packet. 1465 1.1 eeh */ 1466 1.1 eeh bus_dmamap_unload(sc->sc_dmatag, dmamap); 1467 1.138 rin m_freem(m); 1468 1.1 eeh break; 1469 1.1 eeh } 1470 1.1 eeh 1471 1.1 eeh IFQ_DEQUEUE(&ifp->if_snd, m0); 1472 1.1 eeh if (m != NULL) { 1473 1.1 eeh m_freem(m0); 1474 1.1 eeh m0 = m; 1475 1.1 eeh } 1476 1.1 eeh 1477 1.1 eeh /* 1478 1.1 eeh * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1479 1.1 eeh */ 1480 1.1 eeh 1481 1.1 eeh /* Sync the DMA map. */ 1482 1.1 eeh bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize, 1483 1.1 eeh BUS_DMASYNC_PREWRITE); 1484 1.1 eeh 1485 1.1 eeh /* 1486 1.1 eeh * Initialize the transmit descriptors. 1487 1.1 eeh */ 1488 1.105 jdc firsttx = sc->sc_txnext; 1489 1.105 jdc for (nexttx = firsttx, seg = 0; 1490 1.1 eeh seg < dmamap->dm_nsegs; 1491 1.1 eeh seg++, nexttx = GEM_NEXTTX(nexttx)) { 1492 1.1 eeh 1493 1.1 eeh /* 1494 1.1 eeh * If this is the first descriptor we're 1495 1.1 eeh * enqueueing, set the start of packet flag, 1496 1.1 eeh * and the checksum stuff if we want the hardware 1497 1.1 eeh * to do it. 1498 1.1 eeh */ 1499 1.1 eeh flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE; 1500 1.1 eeh if (nexttx == firsttx) { 1501 1.1 eeh flags |= GEM_TD_START_OF_PACKET; 1502 1.35 heas #ifdef INET 1503 1.35 heas /* h/w checksum */ 1504 1.68 jdc if (ifp->if_csum_flags_tx & M_CSUM_TCPv4 && 1505 1.68 jdc m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) { 1506 1.35 heas struct ether_header *eh; 1507 1.35 heas uint16_t offset, start; 1508 1.35 heas 1509 1.35 heas eh = mtod(m0, struct ether_header *); 1510 1.35 heas switch (ntohs(eh->ether_type)) { 1511 1.35 heas case ETHERTYPE_IP: 1512 1.35 heas start = ETHER_HDR_LEN; 1513 1.35 heas break; 1514 1.35 heas case ETHERTYPE_VLAN: 1515 1.35 heas start = ETHER_HDR_LEN + 1516 1.35 heas ETHER_VLAN_ENCAP_LEN; 1517 1.37 perry break; 1518 1.35 heas default: 1519 1.37 perry /* unsupported, drop it */ 1520 1.124 msaitoh bus_dmamap_unload(sc->sc_dmatag, 1521 1.124 msaitoh dmamap); 1522 1.124 msaitoh m_freem(m0); 1523 1.124 msaitoh goto next; 1524 1.35 heas } 1525 1.36 thorpej start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1526 1.36 thorpej offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start; 1527 1.35 heas flags |= (start << 1528 1.35 heas GEM_TD_CXSUM_STARTSHFT) | 1529 1.35 heas (offset << 1530 1.35 heas GEM_TD_CXSUM_STUFFSHFT) | 1531 1.35 heas GEM_TD_CXSUM_ENABLE; 1532 1.35 heas } 1533 1.35 heas #endif 1534 1.124 msaitoh if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 1535 1.124 msaitoh sc->sc_txwin = 0; 1536 1.124 msaitoh flags |= GEM_TD_INTERRUPT_ME; 1537 1.124 msaitoh } 1538 1.1 eeh } 1539 1.124 msaitoh sc->sc_txdescs[nexttx].gd_addr = 1540 1.124 msaitoh GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr); 1541 1.1 eeh if (seg == dmamap->dm_nsegs - 1) { 1542 1.1 eeh flags |= GEM_TD_END_OF_PACKET; 1543 1.40 bouyer } else { 1544 1.40 bouyer /* last flag set outside of loop */ 1545 1.40 bouyer sc->sc_txdescs[nexttx].gd_flags = 1546 1.40 bouyer GEM_DMA_WRITE(sc, flags); 1547 1.1 eeh } 1548 1.1 eeh lasttx = nexttx; 1549 1.1 eeh } 1550 1.40 bouyer if (m0->m_pkthdr.len < ETHER_MIN_TX) { 1551 1.40 bouyer /* add padding buffer at end of chain */ 1552 1.40 bouyer flags &= ~GEM_TD_END_OF_PACKET; 1553 1.40 bouyer sc->sc_txdescs[lasttx].gd_flags = 1554 1.40 bouyer GEM_DMA_WRITE(sc, flags); 1555 1.40 bouyer 1556 1.40 bouyer sc->sc_txdescs[nexttx].gd_addr = 1557 1.40 bouyer GEM_DMA_WRITE(sc, 1558 1.40 bouyer sc->sc_nulldmamap->dm_segs[0].ds_addr); 1559 1.40 bouyer flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) & 1560 1.40 bouyer GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET; 1561 1.40 bouyer lasttx = nexttx; 1562 1.40 bouyer nexttx = GEM_NEXTTX(nexttx); 1563 1.40 bouyer seg++; 1564 1.40 bouyer } 1565 1.40 bouyer sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags); 1566 1.30 christos 1567 1.30 christos KASSERT(lasttx != -1); 1568 1.1 eeh 1569 1.40 bouyer /* 1570 1.40 bouyer * Store a pointer to the packet so we can free it later, 1571 1.40 bouyer * and remember what txdirty will be once the packet is 1572 1.40 bouyer * done. 1573 1.40 bouyer */ 1574 1.40 bouyer txs->txs_mbuf = m0; 1575 1.40 bouyer txs->txs_firstdesc = sc->sc_txnext; 1576 1.40 bouyer txs->txs_lastdesc = lasttx; 1577 1.40 bouyer txs->txs_ndescs = seg; 1578 1.40 bouyer 1579 1.1 eeh #ifdef GEM_DEBUG 1580 1.1 eeh if (ifp->if_flags & IFF_DEBUG) { 1581 1.1 eeh printf(" gem_start %p transmit chain:\n", txs); 1582 1.67 dyoung gem_txsoft_print(sc, txs->txs_firstdesc, 1583 1.67 dyoung txs->txs_lastdesc); 1584 1.1 eeh } 1585 1.1 eeh #endif 1586 1.1 eeh 1587 1.1 eeh /* Sync the descriptors we're using. */ 1588 1.65 dyoung GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs, 1589 1.117 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1590 1.1 eeh 1591 1.1 eeh /* Advance the tx pointer. */ 1592 1.40 bouyer sc->sc_txfree -= txs->txs_ndescs; 1593 1.1 eeh sc->sc_txnext = nexttx; 1594 1.1 eeh 1595 1.21 lukem SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1596 1.1 eeh SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1597 1.1 eeh 1598 1.1 eeh /* 1599 1.1 eeh * Pass the packet to any BPF listeners. 1600 1.1 eeh */ 1601 1.110 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT); 1602 1.1 eeh } 1603 1.1 eeh 1604 1.1 eeh if (sc->sc_txfree != ofree) { 1605 1.1 eeh DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", 1606 1.105 jdc device_xname(sc->sc_dev), lasttx, otxnext)); 1607 1.1 eeh /* 1608 1.31 heas * The entire packet chain is set up. 1609 1.1 eeh * Kick the transmitter. 1610 1.1 eeh */ 1611 1.1 eeh DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", 1612 1.85 dyoung device_xname(sc->sc_dev), nexttx)); 1613 1.50 martin bus_space_write_4(sc->sc_bustag, sc->sc_h1, GEM_TX_KICK, 1614 1.1 eeh sc->sc_txnext); 1615 1.1 eeh 1616 1.1 eeh /* Set a watchdog timer in case the chip flakes out. */ 1617 1.1 eeh ifp->if_timer = 5; 1618 1.1 eeh DPRINTF(sc, ("%s: gem_start: watchdog %d\n", 1619 1.85 dyoung device_xname(sc->sc_dev), ifp->if_timer)); 1620 1.1 eeh } 1621 1.1 eeh } 1622 1.1 eeh 1623 1.1 eeh /* 1624 1.1 eeh * Transmit interrupt. 1625 1.1 eeh */ 1626 1.1 eeh int 1627 1.81 dsl gem_tint(struct gem_softc *sc) 1628 1.1 eeh { 1629 1.1 eeh struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1630 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 1631 1.50 martin bus_space_handle_t mac = sc->sc_h1; 1632 1.1 eeh struct gem_txsoft *txs; 1633 1.1 eeh int txlast; 1634 1.14 matt int progress = 0; 1635 1.112 msaitoh uint32_t v; 1636 1.1 eeh 1637 1.125 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1638 1.125 thorpej 1639 1.85 dyoung DPRINTF(sc, ("%s: gem_tint\n", device_xname(sc->sc_dev))); 1640 1.1 eeh 1641 1.71 jdc /* Unload collision counters ... */ 1642 1.71 jdc v = bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + 1643 1.71 jdc bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); 1644 1.137 riastrad if_statadd_ref(ifp, nsr, if_collisions, v + 1645 1.71 jdc bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + 1646 1.125 thorpej bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT)); 1647 1.137 riastrad if_statadd_ref(ifp, nsr, if_oerrors, v); 1648 1.1 eeh 1649 1.71 jdc /* ... then clear the hardware counters. */ 1650 1.1 eeh bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); 1651 1.1 eeh bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); 1652 1.1 eeh bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); 1653 1.1 eeh bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); 1654 1.1 eeh 1655 1.1 eeh /* 1656 1.1 eeh * Go through our Tx list and free mbufs for those 1657 1.1 eeh * frames that have been transmitted. 1658 1.1 eeh */ 1659 1.1 eeh while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1660 1.1 eeh /* 1661 1.68 jdc * In theory, we could harvest some descriptors before 1662 1.1 eeh * the ring is empty, but that's a bit complicated. 1663 1.1 eeh * 1664 1.1 eeh * GEM_TX_COMPLETION points to the last descriptor 1665 1.1 eeh * processed +1. 1666 1.62 dyoung * 1667 1.62 dyoung * Let's assume that the NIC writes back to the Tx 1668 1.62 dyoung * descriptors before it updates the completion 1669 1.62 dyoung * register. If the NIC has posted writes to the 1670 1.62 dyoung * Tx descriptors, PCI ordering requires that the 1671 1.62 dyoung * posted writes flush to RAM before the register-read 1672 1.62 dyoung * finishes. So let's read the completion register, 1673 1.62 dyoung * before syncing the descriptors, so that we 1674 1.62 dyoung * examine Tx descriptors that are at least as 1675 1.62 dyoung * current as the completion register. 1676 1.1 eeh */ 1677 1.1 eeh txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); 1678 1.1 eeh DPRINTF(sc, 1679 1.1 eeh ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", 1680 1.1 eeh txs->txs_lastdesc, txlast)); 1681 1.1 eeh if (txs->txs_firstdesc <= txs->txs_lastdesc) { 1682 1.63 dyoung if (txlast >= txs->txs_firstdesc && 1683 1.63 dyoung txlast <= txs->txs_lastdesc) 1684 1.1 eeh break; 1685 1.63 dyoung } else if (txlast >= txs->txs_firstdesc || 1686 1.68 jdc txlast <= txs->txs_lastdesc) 1687 1.63 dyoung break; 1688 1.1 eeh 1689 1.66 dyoung GEM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndescs, 1690 1.117 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1691 1.62 dyoung 1692 1.62 dyoung #ifdef GEM_DEBUG /* XXX DMA synchronization? */ 1693 1.62 dyoung if (ifp->if_flags & IFF_DEBUG) { 1694 1.62 dyoung printf(" txsoft %p transmit chain:\n", txs); 1695 1.67 dyoung gem_txsoft_print(sc, txs->txs_firstdesc, 1696 1.67 dyoung txs->txs_lastdesc); 1697 1.62 dyoung } 1698 1.62 dyoung #endif 1699 1.62 dyoung 1700 1.62 dyoung 1701 1.1 eeh DPRINTF(sc, ("gem_tint: releasing a desc\n")); 1702 1.21 lukem SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 1703 1.1 eeh 1704 1.1 eeh sc->sc_txfree += txs->txs_ndescs; 1705 1.1 eeh 1706 1.1 eeh bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 1707 1.1 eeh 0, txs->txs_dmamap->dm_mapsize, 1708 1.1 eeh BUS_DMASYNC_POSTWRITE); 1709 1.1 eeh bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); 1710 1.138 rin m_freem(txs->txs_mbuf); 1711 1.138 rin txs->txs_mbuf = NULL; 1712 1.1 eeh 1713 1.1 eeh SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1714 1.1 eeh 1715 1.137 riastrad if_statinc_ref(ifp, nsr, if_opackets); 1716 1.14 matt progress = 1; 1717 1.1 eeh } 1718 1.1 eeh 1719 1.125 thorpej IF_STAT_PUTREF(ifp); 1720 1.125 thorpej 1721 1.28 chs #if 0 1722 1.1 eeh DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " 1723 1.55 dyoung "GEM_TX_DATA_PTR %" PRIx64 "GEM_TX_COMPLETION %" PRIx32 "\n", 1724 1.50 martin bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_STATE_MACHINE), 1725 1.55 dyoung ((uint64_t)bus_space_read_4(sc->sc_bustag, sc->sc_h1, 1726 1.4 thorpej GEM_TX_DATA_PTR_HI) << 32) | 1727 1.50 martin bus_space_read_4(sc->sc_bustag, sc->sc_h1, 1728 1.4 thorpej GEM_TX_DATA_PTR_LO), 1729 1.50 martin bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_TX_COMPLETION))); 1730 1.28 chs #endif 1731 1.1 eeh 1732 1.14 matt if (progress) { 1733 1.14 matt if (sc->sc_txfree == GEM_NTXDESC - 1) 1734 1.14 matt sc->sc_txwin = 0; 1735 1.14 matt 1736 1.68 jdc ifp->if_timer = SIMPLEQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; 1737 1.108 ozaki if_schedule_deferred_start(ifp); 1738 1.14 matt } 1739 1.1 eeh DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", 1740 1.85 dyoung device_xname(sc->sc_dev), ifp->if_timer)); 1741 1.1 eeh 1742 1.1 eeh return (1); 1743 1.1 eeh } 1744 1.1 eeh 1745 1.1 eeh /* 1746 1.1 eeh * Receive interrupt. 1747 1.1 eeh */ 1748 1.1 eeh int 1749 1.81 dsl gem_rint(struct gem_softc *sc) 1750 1.1 eeh { 1751 1.1 eeh struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1752 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 1753 1.50 martin bus_space_handle_t h = sc->sc_h1; 1754 1.1 eeh struct gem_rxsoft *rxs; 1755 1.1 eeh struct mbuf *m; 1756 1.112 msaitoh uint64_t rxstat; 1757 1.112 msaitoh uint32_t rxcomp; 1758 1.18 matt int i, len, progress = 0; 1759 1.1 eeh 1760 1.85 dyoung DPRINTF(sc, ("%s: gem_rint\n", device_xname(sc->sc_dev))); 1761 1.18 matt 1762 1.18 matt /* 1763 1.68 jdc * Ignore spurious interrupt that sometimes occurs before 1764 1.68 jdc * we are set up when we network boot. 1765 1.68 jdc */ 1766 1.68 jdc if (!sc->sc_meminited) 1767 1.68 jdc return 1; 1768 1.68 jdc 1769 1.68 jdc /* 1770 1.18 matt * Read the completion register once. This limits 1771 1.18 matt * how long the following loop can execute. 1772 1.18 matt */ 1773 1.18 matt rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); 1774 1.18 matt 1775 1.1 eeh /* 1776 1.68 jdc * XXX Read the lastrx only once at the top for speed. 1777 1.1 eeh */ 1778 1.1 eeh DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", 1779 1.18 matt sc->sc_rxptr, rxcomp)); 1780 1.18 matt 1781 1.18 matt /* 1782 1.18 matt * Go into the loop at least once. 1783 1.18 matt */ 1784 1.18 matt for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp; 1785 1.1 eeh i = GEM_NEXTRX(i)) { 1786 1.1 eeh rxs = &sc->sc_rxsoft[i]; 1787 1.1 eeh 1788 1.1 eeh GEM_CDRXSYNC(sc, i, 1789 1.117 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1790 1.1 eeh 1791 1.2 eeh rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); 1792 1.1 eeh 1793 1.1 eeh if (rxstat & GEM_RD_OWN) { 1794 1.56 dyoung GEM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 1795 1.1 eeh /* 1796 1.1 eeh * We have processed all of the receive buffers. 1797 1.1 eeh */ 1798 1.1 eeh break; 1799 1.1 eeh } 1800 1.1 eeh 1801 1.18 matt progress++; 1802 1.18 matt 1803 1.1 eeh if (rxstat & GEM_RD_BAD_CRC) { 1804 1.125 thorpej if_statinc(ifp, if_ierrors); 1805 1.133 jdc DPRINTF(sc, ("%s: receive error: CRC error\n", 1806 1.133 jdc device_xname(sc->sc_dev))); 1807 1.1 eeh GEM_INIT_RXDESC(sc, i); 1808 1.1 eeh continue; 1809 1.1 eeh } 1810 1.1 eeh 1811 1.1 eeh bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1812 1.1 eeh rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1813 1.1 eeh #ifdef GEM_DEBUG 1814 1.1 eeh if (ifp->if_flags & IFF_DEBUG) { 1815 1.1 eeh printf(" rxsoft %p descriptor %d: ", rxs, i); 1816 1.1 eeh printf("gd_flags: 0x%016llx\t", (long long) 1817 1.2 eeh GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); 1818 1.1 eeh printf("gd_addr: 0x%016llx\n", (long long) 1819 1.2 eeh GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); 1820 1.1 eeh } 1821 1.1 eeh #endif 1822 1.1 eeh 1823 1.35 heas /* No errors; receive the packet. */ 1824 1.35 heas len = GEM_RD_BUFLEN(rxstat); 1825 1.1 eeh 1826 1.1 eeh /* 1827 1.1 eeh * Allocate a new mbuf cluster. If that fails, we are 1828 1.1 eeh * out of memory, and must drop the packet and recycle 1829 1.1 eeh * the buffer that's already attached to this descriptor. 1830 1.1 eeh */ 1831 1.1 eeh m = rxs->rxs_mbuf; 1832 1.1 eeh if (gem_add_rxbuf(sc, i) != 0) { 1833 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxnobuf); 1834 1.125 thorpej if_statinc(ifp, if_ierrors); 1835 1.99 jdc aprint_error_dev(sc->sc_dev, 1836 1.99 jdc "receive error: RX no buffer space\n"); 1837 1.1 eeh GEM_INIT_RXDESC(sc, i); 1838 1.1 eeh bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1839 1.1 eeh rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1840 1.1 eeh continue; 1841 1.1 eeh } 1842 1.1 eeh m->m_data += 2; /* We're already off by two */ 1843 1.1 eeh 1844 1.106 ozaki m_set_rcvif(m, ifp); 1845 1.1 eeh m->m_pkthdr.len = m->m_len = len; 1846 1.1 eeh 1847 1.35 heas #ifdef INET 1848 1.35 heas /* hardware checksum */ 1849 1.68 jdc if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) { 1850 1.35 heas struct ether_header *eh; 1851 1.35 heas struct ip *ip; 1852 1.35 heas int32_t hlen, pktlen; 1853 1.35 heas 1854 1.35 heas if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) { 1855 1.35 heas pktlen = m->m_pkthdr.len - ETHER_HDR_LEN - 1856 1.35 heas ETHER_VLAN_ENCAP_LEN; 1857 1.72 jdc eh = (struct ether_header *) (mtod(m, char *) + 1858 1.72 jdc ETHER_VLAN_ENCAP_LEN); 1859 1.35 heas } else { 1860 1.35 heas pktlen = m->m_pkthdr.len - ETHER_HDR_LEN; 1861 1.35 heas eh = mtod(m, struct ether_header *); 1862 1.35 heas } 1863 1.35 heas if (ntohs(eh->ether_type) != ETHERTYPE_IP) 1864 1.35 heas goto swcsum; 1865 1.54 christos ip = (struct ip *) ((char *)eh + ETHER_HDR_LEN); 1866 1.35 heas 1867 1.35 heas /* IPv4 only */ 1868 1.35 heas if (ip->ip_v != IPVERSION) 1869 1.35 heas goto swcsum; 1870 1.35 heas 1871 1.35 heas hlen = ip->ip_hl << 2; 1872 1.35 heas if (hlen < sizeof(struct ip)) 1873 1.35 heas goto swcsum; 1874 1.35 heas 1875 1.38 heas /* 1876 1.38 heas * bail if too short, has random trailing garbage, 1877 1.38 heas * truncated, fragment, or has ethernet pad. 1878 1.38 heas */ 1879 1.35 heas if ((ntohs(ip->ip_len) < hlen) || 1880 1.38 heas (ntohs(ip->ip_len) != pktlen) || 1881 1.35 heas (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK))) 1882 1.35 heas goto swcsum; 1883 1.35 heas 1884 1.35 heas switch (ip->ip_p) { 1885 1.35 heas case IPPROTO_TCP: 1886 1.35 heas if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 1887 1.35 heas goto swcsum; 1888 1.35 heas if (pktlen < (hlen + sizeof(struct tcphdr))) 1889 1.35 heas goto swcsum; 1890 1.35 heas m->m_pkthdr.csum_flags = M_CSUM_TCPv4; 1891 1.35 heas break; 1892 1.35 heas case IPPROTO_UDP: 1893 1.68 jdc /* FALLTHROUGH */ 1894 1.35 heas default: 1895 1.35 heas goto swcsum; 1896 1.35 heas } 1897 1.35 heas 1898 1.35 heas /* the uncomplemented sum is expected */ 1899 1.35 heas m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM; 1900 1.35 heas 1901 1.35 heas /* if the pkt had ip options, we have to deduct them */ 1902 1.35 heas if (hlen > sizeof(struct ip)) { 1903 1.35 heas uint16_t *opts; 1904 1.35 heas uint32_t optsum, temp; 1905 1.35 heas 1906 1.35 heas optsum = 0; 1907 1.35 heas temp = hlen - sizeof(struct ip); 1908 1.54 christos opts = (uint16_t *) ((char *) ip + 1909 1.35 heas sizeof(struct ip)); 1910 1.35 heas 1911 1.35 heas while (temp > 1) { 1912 1.35 heas optsum += ntohs(*opts++); 1913 1.35 heas temp -= 2; 1914 1.35 heas } 1915 1.35 heas while (optsum >> 16) 1916 1.35 heas optsum = (optsum >> 16) + 1917 1.35 heas (optsum & 0xffff); 1918 1.35 heas 1919 1.83 tsutsui /* Deduct ip opts sum from hwsum. */ 1920 1.83 tsutsui m->m_pkthdr.csum_data += (uint16_t)~optsum; 1921 1.35 heas 1922 1.35 heas while (m->m_pkthdr.csum_data >> 16) 1923 1.35 heas m->m_pkthdr.csum_data = 1924 1.35 heas (m->m_pkthdr.csum_data >> 16) + 1925 1.35 heas (m->m_pkthdr.csum_data & 1926 1.35 heas 0xffff); 1927 1.35 heas } 1928 1.35 heas 1929 1.35 heas m->m_pkthdr.csum_flags |= M_CSUM_DATA | 1930 1.35 heas M_CSUM_NO_PSEUDOHDR; 1931 1.35 heas } else 1932 1.35 heas swcsum: 1933 1.35 heas m->m_pkthdr.csum_flags = 0; 1934 1.35 heas #endif 1935 1.1 eeh /* Pass it on. */ 1936 1.104 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 1937 1.1 eeh } 1938 1.1 eeh 1939 1.18 matt if (progress) { 1940 1.18 matt /* Update the receive pointer. */ 1941 1.18 matt if (i == sc->sc_rxptr) { 1942 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxfull); 1943 1.19 matt #ifdef GEM_DEBUG 1944 1.28 chs if (ifp->if_flags & IFF_DEBUG) 1945 1.19 matt printf("%s: rint: ring wrap\n", 1946 1.85 dyoung device_xname(sc->sc_dev)); 1947 1.19 matt #endif 1948 1.18 matt } 1949 1.18 matt sc->sc_rxptr = i; 1950 1.18 matt bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); 1951 1.18 matt } 1952 1.19 matt #ifdef GEM_COUNTERS 1953 1.18 matt if (progress <= 4) { 1954 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]); 1955 1.28 chs } else if (progress < 32) { 1956 1.18 matt if (progress < 16) 1957 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]); 1958 1.18 matt else 1959 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]); 1960 1.31 heas 1961 1.18 matt } else { 1962 1.18 matt if (progress < 64) 1963 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]); 1964 1.18 matt else 1965 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]); 1966 1.18 matt } 1967 1.19 matt #endif 1968 1.1 eeh 1969 1.1 eeh DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", 1970 1.1 eeh sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); 1971 1.1 eeh 1972 1.71 jdc /* Read error counters ... */ 1973 1.125 thorpej if_statadd(ifp, if_ierrors, 1974 1.71 jdc bus_space_read_4(t, h, GEM_MAC_RX_LEN_ERR_CNT) + 1975 1.71 jdc bus_space_read_4(t, h, GEM_MAC_RX_ALIGN_ERR) + 1976 1.71 jdc bus_space_read_4(t, h, GEM_MAC_RX_CRC_ERR_CNT) + 1977 1.125 thorpej bus_space_read_4(t, h, GEM_MAC_RX_CODE_VIOL)); 1978 1.71 jdc 1979 1.71 jdc /* ... then clear the hardware counters. */ 1980 1.71 jdc bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); 1981 1.71 jdc bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); 1982 1.71 jdc bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); 1983 1.71 jdc bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); 1984 1.71 jdc 1985 1.1 eeh return (1); 1986 1.1 eeh } 1987 1.1 eeh 1988 1.1 eeh 1989 1.1 eeh /* 1990 1.1 eeh * gem_add_rxbuf: 1991 1.1 eeh * 1992 1.1 eeh * Add a receive buffer to the indicated descriptor. 1993 1.1 eeh */ 1994 1.1 eeh int 1995 1.1 eeh gem_add_rxbuf(struct gem_softc *sc, int idx) 1996 1.1 eeh { 1997 1.1 eeh struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1998 1.1 eeh struct mbuf *m; 1999 1.1 eeh int error; 2000 1.1 eeh 2001 1.1 eeh MGETHDR(m, M_DONTWAIT, MT_DATA); 2002 1.1 eeh if (m == NULL) 2003 1.1 eeh return (ENOBUFS); 2004 1.1 eeh 2005 1.26 matt MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 2006 1.1 eeh MCLGET(m, M_DONTWAIT); 2007 1.1 eeh if ((m->m_flags & M_EXT) == 0) { 2008 1.1 eeh m_freem(m); 2009 1.1 eeh return (ENOBUFS); 2010 1.1 eeh } 2011 1.1 eeh 2012 1.1 eeh #ifdef GEM_DEBUG 2013 1.27 wiz /* bzero the packet to check DMA */ 2014 1.1 eeh memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 2015 1.1 eeh #endif 2016 1.1 eeh 2017 1.1 eeh if (rxs->rxs_mbuf != NULL) 2018 1.1 eeh bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); 2019 1.1 eeh 2020 1.1 eeh rxs->rxs_mbuf = m; 2021 1.1 eeh 2022 1.1 eeh error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, 2023 1.1 eeh m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 2024 1.117 msaitoh BUS_DMA_READ | BUS_DMA_NOWAIT); 2025 1.1 eeh if (error) { 2026 1.85 dyoung aprint_error_dev(sc->sc_dev, 2027 1.85 dyoung "can't load rx DMA map %d, error = %d\n", idx, error); 2028 1.1 eeh panic("gem_add_rxbuf"); /* XXX */ 2029 1.1 eeh } 2030 1.1 eeh 2031 1.1 eeh bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 2032 1.1 eeh rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2033 1.1 eeh 2034 1.1 eeh GEM_INIT_RXDESC(sc, idx); 2035 1.1 eeh 2036 1.1 eeh return (0); 2037 1.1 eeh } 2038 1.1 eeh 2039 1.1 eeh 2040 1.1 eeh int 2041 1.68 jdc gem_eint(struct gem_softc *sc, u_int status) 2042 1.1 eeh { 2043 1.1 eeh char bits[128]; 2044 1.112 msaitoh uint32_t r, v; 2045 1.1 eeh 2046 1.1 eeh if ((status & GEM_INTR_MIF) != 0) { 2047 1.85 dyoung printf("%s: XXXlink status changed\n", device_xname(sc->sc_dev)); 2048 1.1 eeh return (1); 2049 1.1 eeh } 2050 1.1 eeh 2051 1.68 jdc if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 2052 1.68 jdc gem_reset_rxdma(sc); 2053 1.68 jdc return (1); 2054 1.68 jdc } 2055 1.68 jdc 2056 1.68 jdc if (status & GEM_INTR_BERR) { 2057 1.78 jdc if (sc->sc_flags & GEM_PCI) 2058 1.78 jdc r = GEM_ERROR_STATUS; 2059 1.78 jdc else 2060 1.78 jdc r = GEM_SBUS_ERROR_STATUS; 2061 1.78 jdc bus_space_read_4(sc->sc_bustag, sc->sc_h2, r); 2062 1.78 jdc v = bus_space_read_4(sc->sc_bustag, sc->sc_h2, r); 2063 1.85 dyoung aprint_error_dev(sc->sc_dev, "bus error interrupt: 0x%02x\n", 2064 1.76 cegger v); 2065 1.68 jdc return (1); 2066 1.68 jdc } 2067 1.80 christos snprintb(bits, sizeof(bits), GEM_INTR_BITS, status); 2068 1.85 dyoung printf("%s: status=%s\n", device_xname(sc->sc_dev), bits); 2069 1.117 msaitoh 2070 1.1 eeh return (1); 2071 1.1 eeh } 2072 1.1 eeh 2073 1.1 eeh 2074 1.68 jdc /* 2075 1.68 jdc * PCS interrupts. 2076 1.68 jdc * We should receive these when the link status changes, but sometimes 2077 1.68 jdc * we don't receive them for link up. We compensate for this in the 2078 1.68 jdc * gem_tick() callout. 2079 1.68 jdc */ 2080 1.68 jdc int 2081 1.68 jdc gem_pint(struct gem_softc *sc) 2082 1.68 jdc { 2083 1.68 jdc struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2084 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 2085 1.68 jdc bus_space_handle_t h = sc->sc_h1; 2086 1.112 msaitoh uint32_t v, v2; 2087 1.68 jdc 2088 1.68 jdc /* 2089 1.68 jdc * Clear the PCS interrupt from GEM_STATUS. The PCS register is 2090 1.68 jdc * latched, so we have to read it twice. There is only one bit in 2091 1.68 jdc * use, so the value is meaningless. 2092 1.68 jdc */ 2093 1.68 jdc bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS); 2094 1.68 jdc bus_space_read_4(t, h, GEM_MII_INTERRUP_STATUS); 2095 1.68 jdc 2096 1.68 jdc if ((ifp->if_flags & IFF_UP) == 0) 2097 1.68 jdc return 1; 2098 1.68 jdc 2099 1.68 jdc if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) == 0) 2100 1.68 jdc return 1; 2101 1.68 jdc 2102 1.68 jdc v = bus_space_read_4(t, h, GEM_MII_STATUS); 2103 1.68 jdc /* If we see remote fault, our link partner is probably going away */ 2104 1.68 jdc if ((v & GEM_MII_STATUS_REM_FLT) != 0) { 2105 1.68 jdc gem_bitwait(sc, h, GEM_MII_STATUS, GEM_MII_STATUS_REM_FLT, 0); 2106 1.68 jdc v = bus_space_read_4(t, h, GEM_MII_STATUS); 2107 1.68 jdc /* Otherwise, we may need to wait after auto-negotiation completes */ 2108 1.68 jdc } else if ((v & (GEM_MII_STATUS_LINK_STS | GEM_MII_STATUS_ANEG_CPT)) == 2109 1.68 jdc GEM_MII_STATUS_ANEG_CPT) { 2110 1.68 jdc gem_bitwait(sc, h, GEM_MII_STATUS, 0, GEM_MII_STATUS_LINK_STS); 2111 1.68 jdc v = bus_space_read_4(t, h, GEM_MII_STATUS); 2112 1.68 jdc } 2113 1.68 jdc if ((v & GEM_MII_STATUS_LINK_STS) != 0) { 2114 1.68 jdc if (sc->sc_flags & GEM_LINK) { 2115 1.68 jdc return 1; 2116 1.68 jdc } 2117 1.68 jdc callout_stop(&sc->sc_tick_ch); 2118 1.68 jdc v = bus_space_read_4(t, h, GEM_MII_ANAR); 2119 1.68 jdc v2 = bus_space_read_4(t, h, GEM_MII_ANLPAR); 2120 1.68 jdc sc->sc_mii.mii_media_active = IFM_ETHER | IFM_1000_SX; 2121 1.68 jdc sc->sc_mii.mii_media_status = IFM_AVALID | IFM_ACTIVE; 2122 1.68 jdc v &= v2; 2123 1.68 jdc if (v & GEM_MII_ANEG_FUL_DUPLX) { 2124 1.68 jdc sc->sc_mii.mii_media_active |= IFM_FDX; 2125 1.68 jdc #ifdef GEM_DEBUG 2126 1.85 dyoung aprint_debug_dev(sc->sc_dev, "link up: full duplex\n"); 2127 1.68 jdc #endif 2128 1.68 jdc } else if (v & GEM_MII_ANEG_HLF_DUPLX) { 2129 1.68 jdc sc->sc_mii.mii_media_active |= IFM_HDX; 2130 1.68 jdc #ifdef GEM_DEBUG 2131 1.85 dyoung aprint_debug_dev(sc->sc_dev, "link up: half duplex\n"); 2132 1.68 jdc #endif 2133 1.68 jdc } else { 2134 1.68 jdc #ifdef GEM_DEBUG 2135 1.85 dyoung aprint_debug_dev(sc->sc_dev, "duplex mismatch\n"); 2136 1.68 jdc #endif 2137 1.68 jdc } 2138 1.68 jdc gem_statuschange(sc); 2139 1.68 jdc } else { 2140 1.68 jdc if ((sc->sc_flags & GEM_LINK) == 0) { 2141 1.68 jdc return 1; 2142 1.68 jdc } 2143 1.68 jdc sc->sc_mii.mii_media_active = IFM_ETHER | IFM_NONE; 2144 1.68 jdc sc->sc_mii.mii_media_status = IFM_AVALID; 2145 1.68 jdc #ifdef GEM_DEBUG 2146 1.85 dyoung aprint_debug_dev(sc->sc_dev, "link down\n"); 2147 1.68 jdc #endif 2148 1.68 jdc gem_statuschange(sc); 2149 1.68 jdc 2150 1.68 jdc /* Start the 10 second timer */ 2151 1.128 thorpej callout_schedule(&sc->sc_tick_ch, hz * 10); 2152 1.68 jdc } 2153 1.68 jdc return 1; 2154 1.68 jdc } 2155 1.68 jdc 2156 1.68 jdc 2157 1.68 jdc 2158 1.1 eeh int 2159 1.81 dsl gem_intr(void *v) 2160 1.1 eeh { 2161 1.85 dyoung struct gem_softc *sc = v; 2162 1.41 christos struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2163 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 2164 1.68 jdc bus_space_handle_t h = sc->sc_h1; 2165 1.112 msaitoh uint32_t status; 2166 1.1 eeh int r = 0; 2167 1.3 eeh #ifdef GEM_DEBUG 2168 1.1 eeh char bits[128]; 2169 1.3 eeh #endif 2170 1.1 eeh 2171 1.68 jdc /* XXX We should probably mask out interrupts until we're done */ 2172 1.68 jdc 2173 1.19 matt sc->sc_ev_intr.ev_count++; 2174 1.19 matt 2175 1.68 jdc status = bus_space_read_4(t, h, GEM_STATUS); 2176 1.80 christos #ifdef GEM_DEBUG 2177 1.80 christos snprintb(bits, sizeof(bits), GEM_INTR_BITS, status); 2178 1.80 christos #endif 2179 1.28 chs DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n", 2180 1.85 dyoung device_xname(sc->sc_dev), (status >> 19), bits)); 2181 1.1 eeh 2182 1.1 eeh if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) 2183 1.1 eeh r |= gem_eint(sc, status); 2184 1.1 eeh 2185 1.68 jdc /* We don't bother with GEM_INTR_TX_DONE */ 2186 1.18 matt if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) { 2187 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_txint); 2188 1.1 eeh r |= gem_tint(sc); 2189 1.18 matt } 2190 1.1 eeh 2191 1.18 matt if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) { 2192 1.19 matt GEM_COUNTER_INCR(sc, sc_ev_rxint); 2193 1.1 eeh r |= gem_rint(sc); 2194 1.18 matt } 2195 1.1 eeh 2196 1.1 eeh /* We should eventually do more than just print out error stats. */ 2197 1.1 eeh if (status & GEM_INTR_TX_MAC) { 2198 1.68 jdc int txstat = bus_space_read_4(t, h, GEM_MAC_TX_STATUS); 2199 1.1 eeh if (txstat & ~GEM_MAC_TX_XMIT_DONE) 2200 1.14 matt printf("%s: MAC tx fault, status %x\n", 2201 1.85 dyoung device_xname(sc->sc_dev), txstat); 2202 1.41 christos if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) 2203 1.41 christos gem_init(ifp); 2204 1.1 eeh } 2205 1.1 eeh if (status & GEM_INTR_RX_MAC) { 2206 1.68 jdc int rxstat = bus_space_read_4(t, h, GEM_MAC_RX_STATUS); 2207 1.41 christos /* 2208 1.68 jdc * At least with GEM_SUN_GEM and some GEM_SUN_ERI 2209 1.68 jdc * revisions GEM_MAC_RX_OVERFLOW happen often due to a 2210 1.99 jdc * silicon bug so handle them silently. So if we detect 2211 1.99 jdc * an RX FIFO overflow, we fire off a timer, and check 2212 1.99 jdc * whether we're still making progress by looking at the 2213 1.99 jdc * RX FIFO write and read pointers. 2214 1.41 christos */ 2215 1.68 jdc if (rxstat & GEM_MAC_RX_OVERFLOW) { 2216 1.125 thorpej if_statinc(ifp, if_ierrors); 2217 1.133 jdc GEM_COUNTER_INCR(sc, sc_ev_rxoverflow); 2218 1.133 jdc #ifdef GEM_DEBUG 2219 1.99 jdc aprint_error_dev(sc->sc_dev, 2220 1.99 jdc "receive error: RX overflow sc->rxptr %d, complete %d\n", sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); 2221 1.133 jdc #endif 2222 1.99 jdc sc->sc_rx_fifo_wr_ptr = 2223 1.99 jdc bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR); 2224 1.99 jdc sc->sc_rx_fifo_rd_ptr = 2225 1.99 jdc bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR); 2226 1.99 jdc callout_schedule(&sc->sc_rx_watchdog, 400); 2227 1.68 jdc } else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) 2228 1.73 jdc printf("%s: MAC rx fault, status 0x%02x\n", 2229 1.85 dyoung device_xname(sc->sc_dev), rxstat); 2230 1.1 eeh } 2231 1.68 jdc if (status & GEM_INTR_PCS) { 2232 1.68 jdc r |= gem_pint(sc); 2233 1.68 jdc } 2234 1.68 jdc 2235 1.68 jdc /* Do we need to do anything with these? 2236 1.68 jdc if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 2237 1.68 jdc status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS); 2238 1.68 jdc if ((status2 & GEM_MAC_PAUSED) != 0) 2239 1.85 dyoung aprintf_debug_dev(sc->sc_dev, "PAUSE received (%d slots)\n", 2240 1.76 cegger GEM_MAC_PAUSE_TIME(status2)); 2241 1.68 jdc if ((status2 & GEM_MAC_PAUSE) != 0) 2242 1.85 dyoung aprintf_debug_dev(sc->sc_dev, "transited to PAUSE state\n"); 2243 1.68 jdc if ((status2 & GEM_MAC_RESUME) != 0) 2244 1.85 dyoung aprintf_debug_dev(sc->sc_dev, "transited to non-PAUSE state\n"); 2245 1.68 jdc } 2246 1.68 jdc if ((status & GEM_INTR_MIF) != 0) 2247 1.85 dyoung aprintf_debug_dev(sc->sc_dev, "MIF interrupt\n"); 2248 1.68 jdc */ 2249 1.45 heas rnd_add_uint32(&sc->rnd_source, status); 2250 1.1 eeh return (r); 2251 1.1 eeh } 2252 1.1 eeh 2253 1.99 jdc void 2254 1.99 jdc gem_rx_watchdog(void *arg) 2255 1.99 jdc { 2256 1.99 jdc struct gem_softc *sc = arg; 2257 1.99 jdc struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2258 1.99 jdc bus_space_tag_t t = sc->sc_bustag; 2259 1.99 jdc bus_space_handle_t h = sc->sc_h1; 2260 1.112 msaitoh uint32_t rx_fifo_wr_ptr; 2261 1.112 msaitoh uint32_t rx_fifo_rd_ptr; 2262 1.112 msaitoh uint32_t state; 2263 1.99 jdc 2264 1.99 jdc if ((ifp->if_flags & IFF_RUNNING) == 0) { 2265 1.99 jdc aprint_error_dev(sc->sc_dev, "receiver not running\n"); 2266 1.99 jdc return; 2267 1.99 jdc } 2268 1.99 jdc 2269 1.99 jdc rx_fifo_wr_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_WR_PTR); 2270 1.99 jdc rx_fifo_rd_ptr = bus_space_read_4(t, h, GEM_RX_FIFO_RD_PTR); 2271 1.99 jdc state = bus_space_read_4(t, h, GEM_MAC_MAC_STATE); 2272 1.99 jdc if ((state & GEM_MAC_STATE_OVERFLOW) == GEM_MAC_STATE_OVERFLOW && 2273 1.99 jdc ((rx_fifo_wr_ptr == rx_fifo_rd_ptr) || 2274 1.99 jdc ((sc->sc_rx_fifo_wr_ptr == rx_fifo_wr_ptr) && 2275 1.99 jdc (sc->sc_rx_fifo_rd_ptr == rx_fifo_rd_ptr)))) 2276 1.99 jdc { 2277 1.99 jdc /* 2278 1.99 jdc * The RX state machine is still in overflow state and 2279 1.99 jdc * the RX FIFO write and read pointers seem to be 2280 1.99 jdc * stuck. Whack the chip over the head to get things 2281 1.99 jdc * going again. 2282 1.99 jdc */ 2283 1.99 jdc aprint_error_dev(sc->sc_dev, 2284 1.99 jdc "receiver stuck in overflow, resetting\n"); 2285 1.99 jdc gem_init(ifp); 2286 1.99 jdc } else { 2287 1.133 jdc int needreset = 1; 2288 1.99 jdc if ((state & GEM_MAC_STATE_OVERFLOW) != GEM_MAC_STATE_OVERFLOW) { 2289 1.133 jdc DPRINTF(sc, 2290 1.133 jdc ("%s: rx_watchdog: not in overflow state: 0x%x\n", 2291 1.133 jdc device_xname(sc->sc_dev), state)); 2292 1.99 jdc } 2293 1.99 jdc if (rx_fifo_wr_ptr != rx_fifo_rd_ptr) { 2294 1.133 jdc DPRINTF(sc, 2295 1.133 jdc ("%s: rx_watchdog: wr & rd ptr different\n", 2296 1.134 christos device_xname(sc->sc_dev))); 2297 1.133 jdc needreset = 0; 2298 1.99 jdc } 2299 1.99 jdc if (sc->sc_rx_fifo_wr_ptr != rx_fifo_wr_ptr) { 2300 1.133 jdc DPRINTF(sc, ("%s: rx_watchdog: wr pointer != saved\n", 2301 1.134 christos device_xname(sc->sc_dev))); 2302 1.133 jdc needreset = 0; 2303 1.99 jdc } 2304 1.99 jdc if (sc->sc_rx_fifo_rd_ptr != rx_fifo_rd_ptr) { 2305 1.133 jdc DPRINTF(sc, ("%s: rx_watchdog: rd pointer != saved\n", 2306 1.134 christos device_xname(sc->sc_dev))); 2307 1.133 jdc needreset = 0; 2308 1.133 jdc } 2309 1.133 jdc if (needreset) { 2310 1.99 jdc aprint_error_dev(sc->sc_dev, 2311 1.133 jdc "rx_watchdog: resetting anyway\n"); 2312 1.133 jdc gem_init(ifp); 2313 1.99 jdc } 2314 1.99 jdc } 2315 1.99 jdc } 2316 1.1 eeh 2317 1.1 eeh void 2318 1.81 dsl gem_watchdog(struct ifnet *ifp) 2319 1.1 eeh { 2320 1.1 eeh struct gem_softc *sc = ifp->if_softc; 2321 1.1 eeh 2322 1.1 eeh DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " 2323 1.1 eeh "GEM_MAC_RX_CONFIG %x\n", 2324 1.50 martin bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_RX_CONFIG), 2325 1.50 martin bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_STATUS), 2326 1.50 martin bus_space_read_4(sc->sc_bustag, sc->sc_h1, GEM_MAC_RX_CONFIG))); 2327 1.1 eeh 2328 1.85 dyoung log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 2329 1.125 thorpej if_statinc(ifp, if_oerrors); 2330 1.1 eeh 2331 1.1 eeh /* Try to get more packets going. */ 2332 1.99 jdc gem_init(ifp); 2333 1.1 eeh gem_start(ifp); 2334 1.1 eeh } 2335 1.1 eeh 2336 1.1 eeh /* 2337 1.1 eeh * Initialize the MII Management Interface 2338 1.1 eeh */ 2339 1.1 eeh void 2340 1.81 dsl gem_mifinit(struct gem_softc *sc) 2341 1.1 eeh { 2342 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 2343 1.50 martin bus_space_handle_t mif = sc->sc_h1; 2344 1.1 eeh 2345 1.1 eeh /* Configure the MIF in frame mode */ 2346 1.1 eeh sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); 2347 1.1 eeh sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; 2348 1.1 eeh bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); 2349 1.1 eeh } 2350 1.1 eeh 2351 1.1 eeh /* 2352 1.1 eeh * MII interface 2353 1.1 eeh * 2354 1.1 eeh * The GEM MII interface supports at least three different operating modes: 2355 1.1 eeh * 2356 1.1 eeh * Bitbang mode is implemented using data, clock and output enable registers. 2357 1.1 eeh * 2358 1.1 eeh * Frame mode is implemented by loading a complete frame into the frame 2359 1.1 eeh * register and polling the valid bit for completion. 2360 1.1 eeh * 2361 1.1 eeh * Polling mode uses the frame register but completion is indicated by 2362 1.1 eeh * an interrupt. 2363 1.1 eeh * 2364 1.1 eeh */ 2365 1.1 eeh static int 2366 1.113 msaitoh gem_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 2367 1.1 eeh { 2368 1.85 dyoung struct gem_softc *sc = device_private(self); 2369 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 2370 1.50 martin bus_space_handle_t mif = sc->sc_h1; 2371 1.1 eeh int n; 2372 1.112 msaitoh uint32_t v; 2373 1.1 eeh 2374 1.1 eeh #ifdef GEM_DEBUG1 2375 1.1 eeh if (sc->sc_debug) 2376 1.68 jdc printf("gem_mii_readreg: PHY %d reg %d\n", phy, reg); 2377 1.1 eeh #endif 2378 1.1 eeh 2379 1.1 eeh /* Construct the frame command */ 2380 1.1 eeh v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | 2381 1.1 eeh GEM_MIF_FRAME_READ; 2382 1.1 eeh 2383 1.1 eeh bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 2384 1.1 eeh for (n = 0; n < 100; n++) { 2385 1.1 eeh DELAY(1); 2386 1.1 eeh v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 2387 1.113 msaitoh if (v & GEM_MIF_FRAME_TA0) { 2388 1.113 msaitoh *val = v & GEM_MIF_FRAME_DATA; 2389 1.113 msaitoh return 0; 2390 1.113 msaitoh } 2391 1.1 eeh } 2392 1.1 eeh 2393 1.85 dyoung printf("%s: mii_read timeout\n", device_xname(sc->sc_dev)); 2394 1.113 msaitoh return ETIMEDOUT; 2395 1.1 eeh } 2396 1.1 eeh 2397 1.113 msaitoh static int 2398 1.113 msaitoh gem_mii_writereg(device_t self, int phy, int reg, uint16_t val) 2399 1.1 eeh { 2400 1.85 dyoung struct gem_softc *sc = device_private(self); 2401 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 2402 1.50 martin bus_space_handle_t mif = sc->sc_h1; 2403 1.1 eeh int n; 2404 1.112 msaitoh uint32_t v; 2405 1.1 eeh 2406 1.1 eeh #ifdef GEM_DEBUG1 2407 1.1 eeh if (sc->sc_debug) 2408 1.68 jdc printf("gem_mii_writereg: PHY %d reg %d val %x\n", 2409 1.1 eeh phy, reg, val); 2410 1.1 eeh #endif 2411 1.1 eeh 2412 1.1 eeh /* Construct the frame command */ 2413 1.1 eeh v = GEM_MIF_FRAME_WRITE | 2414 1.1 eeh (phy << GEM_MIF_PHY_SHIFT) | 2415 1.1 eeh (reg << GEM_MIF_REG_SHIFT) | 2416 1.1 eeh (val & GEM_MIF_FRAME_DATA); 2417 1.1 eeh 2418 1.1 eeh bus_space_write_4(t, mif, GEM_MIF_FRAME, v); 2419 1.1 eeh for (n = 0; n < 100; n++) { 2420 1.1 eeh DELAY(1); 2421 1.1 eeh v = bus_space_read_4(t, mif, GEM_MIF_FRAME); 2422 1.1 eeh if (v & GEM_MIF_FRAME_TA0) 2423 1.113 msaitoh return 0; 2424 1.1 eeh } 2425 1.1 eeh 2426 1.85 dyoung printf("%s: mii_write timeout\n", device_xname(sc->sc_dev)); 2427 1.113 msaitoh return ETIMEDOUT; 2428 1.1 eeh } 2429 1.1 eeh 2430 1.1 eeh static void 2431 1.100 matt gem_mii_statchg(struct ifnet *ifp) 2432 1.1 eeh { 2433 1.100 matt struct gem_softc *sc = ifp->if_softc; 2434 1.3 eeh #ifdef GEM_DEBUG 2435 1.1 eeh int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 2436 1.3 eeh #endif 2437 1.1 eeh 2438 1.1 eeh #ifdef GEM_DEBUG 2439 1.1 eeh if (sc->sc_debug) 2440 1.31 heas printf("gem_mii_statchg: status change: phy = %d\n", 2441 1.28 chs sc->sc_phys[instance]); 2442 1.1 eeh #endif 2443 1.68 jdc gem_statuschange(sc); 2444 1.68 jdc } 2445 1.1 eeh 2446 1.68 jdc /* 2447 1.68 jdc * Common status change for gem_mii_statchg() and gem_pint() 2448 1.68 jdc */ 2449 1.68 jdc void 2450 1.68 jdc gem_statuschange(struct gem_softc* sc) 2451 1.68 jdc { 2452 1.68 jdc struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2453 1.68 jdc bus_space_tag_t t = sc->sc_bustag; 2454 1.68 jdc bus_space_handle_t mac = sc->sc_h1; 2455 1.68 jdc int gigabit; 2456 1.112 msaitoh uint32_t rxcfg, txcfg, v; 2457 1.68 jdc 2458 1.68 jdc if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0 && 2459 1.68 jdc IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) 2460 1.68 jdc sc->sc_flags |= GEM_LINK; 2461 1.68 jdc else 2462 1.68 jdc sc->sc_flags &= ~GEM_LINK; 2463 1.68 jdc 2464 1.70 jdc if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000)) 2465 1.68 jdc gigabit = 1; 2466 1.70 jdc else 2467 1.68 jdc gigabit = 0; 2468 1.1 eeh 2469 1.68 jdc /* 2470 1.68 jdc * The configuration done here corresponds to the steps F) and 2471 1.68 jdc * G) and as far as enabling of RX and TX MAC goes also step H) 2472 1.68 jdc * of the initialization sequence outlined in section 3.2.1 of 2473 1.68 jdc * the GEM Gigabit Ethernet ASIC Specification. 2474 1.68 jdc */ 2475 1.68 jdc 2476 1.68 jdc rxcfg = bus_space_read_4(t, mac, GEM_MAC_RX_CONFIG); 2477 1.68 jdc rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE); 2478 1.68 jdc txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 2479 1.68 jdc if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 2480 1.68 jdc txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 2481 1.68 jdc else if (gigabit) { 2482 1.68 jdc rxcfg |= GEM_MAC_RX_CARR_EXTEND; 2483 1.68 jdc txcfg |= GEM_MAC_RX_CARR_EXTEND; 2484 1.68 jdc } 2485 1.1 eeh bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); 2486 1.68 jdc bus_space_barrier(t, mac, GEM_MAC_TX_CONFIG, 4, 2487 1.68 jdc BUS_SPACE_BARRIER_WRITE); 2488 1.68 jdc if (!gem_bitwait(sc, mac, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 2489 1.85 dyoung aprint_normal_dev(sc->sc_dev, "cannot disable TX MAC\n"); 2490 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, txcfg); 2491 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 0); 2492 1.68 jdc bus_space_barrier(t, mac, GEM_MAC_RX_CONFIG, 4, 2493 1.68 jdc BUS_SPACE_BARRIER_WRITE); 2494 1.68 jdc if (!gem_bitwait(sc, mac, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 2495 1.85 dyoung aprint_normal_dev(sc->sc_dev, "cannot disable RX MAC\n"); 2496 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, rxcfg); 2497 1.68 jdc 2498 1.68 jdc v = bus_space_read_4(t, mac, GEM_MAC_CONTROL_CONFIG) & 2499 1.68 jdc ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 2500 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_CONTROL_CONFIG, v); 2501 1.68 jdc 2502 1.68 jdc if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) == 0 && 2503 1.68 jdc gigabit != 0) 2504 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME, 2505 1.68 jdc GEM_MAC_SLOT_TIME_CARR_EXTEND); 2506 1.68 jdc else 2507 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_SLOT_TIME, 2508 1.68 jdc GEM_MAC_SLOT_TIME_NORMAL); 2509 1.1 eeh 2510 1.1 eeh /* XIF Configuration */ 2511 1.68 jdc if (sc->sc_flags & GEM_LINK) 2512 1.68 jdc v = GEM_MAC_XIF_LINK_LED; 2513 1.68 jdc else 2514 1.68 jdc v = 0; 2515 1.1 eeh v |= GEM_MAC_XIF_TX_MII_ENA; 2516 1.70 jdc 2517 1.70 jdc /* If an external transceiver is connected, enable its MII drivers */ 2518 1.70 jdc sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); 2519 1.70 jdc if ((sc->sc_flags &(GEM_SERDES | GEM_SERIAL)) == 0) { 2520 1.70 jdc if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { 2521 1.70 jdc if (gigabit) 2522 1.70 jdc v |= GEM_MAC_XIF_GMII_MODE; 2523 1.70 jdc else 2524 1.70 jdc v &= ~GEM_MAC_XIF_GMII_MODE; 2525 1.70 jdc } else 2526 1.70 jdc /* Internal MII needs buf enable */ 2527 1.70 jdc v |= GEM_MAC_XIF_MII_BUF_ENA; 2528 1.97 jdc /* MII needs echo disable if half duplex. */ 2529 1.97 jdc if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 2530 1.97 jdc /* turn on full duplex LED */ 2531 1.97 jdc v |= GEM_MAC_XIF_FDPLX_LED; 2532 1.97 jdc else 2533 1.97 jdc /* half duplex -- disable echo */ 2534 1.97 jdc v |= GEM_MAC_XIF_ECHO_DISABL; 2535 1.68 jdc } else { 2536 1.70 jdc if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 2537 1.70 jdc v |= GEM_MAC_XIF_FDPLX_LED; 2538 1.70 jdc v |= GEM_MAC_XIF_GMII_MODE; 2539 1.68 jdc } 2540 1.70 jdc bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); 2541 1.70 jdc 2542 1.68 jdc if ((ifp->if_flags & IFF_RUNNING) != 0 && 2543 1.68 jdc (sc->sc_flags & GEM_LINK) != 0) { 2544 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 2545 1.68 jdc txcfg | GEM_MAC_TX_ENABLE); 2546 1.68 jdc bus_space_write_4(t, mac, GEM_MAC_RX_CONFIG, 2547 1.68 jdc rxcfg | GEM_MAC_RX_ENABLE); 2548 1.68 jdc } 2549 1.1 eeh } 2550 1.1 eeh 2551 1.1 eeh int 2552 1.69 dyoung gem_ser_mediachange(struct ifnet *ifp) 2553 1.1 eeh { 2554 1.1 eeh struct gem_softc *sc = ifp->if_softc; 2555 1.68 jdc u_int s, t; 2556 1.1 eeh 2557 1.69 dyoung if (IFM_TYPE(sc->sc_mii.mii_media.ifm_media) != IFM_ETHER) 2558 1.68 jdc return EINVAL; 2559 1.1 eeh 2560 1.69 dyoung s = IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media); 2561 1.69 dyoung if (s == IFM_AUTO) { 2562 1.69 dyoung if (sc->sc_mii_media != s) { 2563 1.69 dyoung #ifdef GEM_DEBUG 2564 1.85 dyoung aprint_debug_dev(sc->sc_dev, "setting media to auto\n"); 2565 1.69 dyoung #endif 2566 1.69 dyoung sc->sc_mii_media = s; 2567 1.69 dyoung if (ifp->if_flags & IFF_UP) { 2568 1.69 dyoung gem_pcs_stop(sc, 0); 2569 1.69 dyoung gem_pcs_start(sc); 2570 1.69 dyoung } 2571 1.69 dyoung } 2572 1.69 dyoung return 0; 2573 1.69 dyoung } 2574 1.69 dyoung if (s == IFM_1000_SX) { 2575 1.116 msaitoh t = IFM_OPTIONS(sc->sc_mii.mii_media.ifm_media) 2576 1.116 msaitoh & (IFM_FDX | IFM_HDX); 2577 1.116 msaitoh if ((sc->sc_mii_media & (IFM_FDX | IFM_HDX)) != t) { 2578 1.116 msaitoh sc->sc_mii_media &= ~(IFM_FDX | IFM_HDX); 2579 1.116 msaitoh sc->sc_mii_media |= t; 2580 1.116 msaitoh #ifdef GEM_DEBUG 2581 1.116 msaitoh aprint_debug_dev(sc->sc_dev, 2582 1.116 msaitoh "setting media to 1000baseSX-%s\n", 2583 1.116 msaitoh t == IFM_FDX ? "FDX" : "HDX"); 2584 1.68 jdc #endif 2585 1.116 msaitoh if (ifp->if_flags & IFF_UP) { 2586 1.116 msaitoh gem_pcs_stop(sc, 0); 2587 1.116 msaitoh gem_pcs_start(sc); 2588 1.68 jdc } 2589 1.68 jdc } 2590 1.116 msaitoh return 0; 2591 1.69 dyoung } 2592 1.69 dyoung return EINVAL; 2593 1.1 eeh } 2594 1.1 eeh 2595 1.1 eeh void 2596 1.69 dyoung gem_ser_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2597 1.1 eeh { 2598 1.1 eeh struct gem_softc *sc = ifp->if_softc; 2599 1.1 eeh 2600 1.1 eeh if ((ifp->if_flags & IFF_UP) == 0) 2601 1.1 eeh return; 2602 1.1 eeh ifmr->ifm_active = sc->sc_mii.mii_media_active; 2603 1.1 eeh ifmr->ifm_status = sc->sc_mii.mii_media_status; 2604 1.1 eeh } 2605 1.1 eeh 2606 1.79 dyoung static int 2607 1.79 dyoung gem_ifflags_cb(struct ethercom *ec) 2608 1.79 dyoung { 2609 1.79 dyoung struct ifnet *ifp = &ec->ec_if; 2610 1.79 dyoung struct gem_softc *sc = ifp->if_softc; 2611 1.121 msaitoh u_short change = ifp->if_flags ^ sc->sc_if_flags; 2612 1.79 dyoung 2613 1.117 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 2614 1.79 dyoung return ENETRESET; 2615 1.79 dyoung else if ((change & IFF_PROMISC) != 0) 2616 1.79 dyoung gem_setladrf(sc); 2617 1.79 dyoung return 0; 2618 1.79 dyoung } 2619 1.79 dyoung 2620 1.1 eeh /* 2621 1.1 eeh * Process an ioctl request. 2622 1.1 eeh */ 2623 1.1 eeh int 2624 1.79 dyoung gem_ioctl(struct ifnet *ifp, unsigned long cmd, void *data) 2625 1.1 eeh { 2626 1.1 eeh struct gem_softc *sc = ifp->if_softc; 2627 1.1 eeh int s, error = 0; 2628 1.1 eeh 2629 1.20 matt s = splnet(); 2630 1.1 eeh 2631 1.79 dyoung if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 2632 1.74 dyoung error = 0; 2633 1.74 dyoung if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 2634 1.74 dyoung ; 2635 1.74 dyoung else if (ifp->if_flags & IFF_RUNNING) { 2636 1.1 eeh /* 2637 1.1 eeh * Multicast list has changed; set the hardware filter 2638 1.1 eeh * accordingly. 2639 1.1 eeh */ 2640 1.74 dyoung gem_setladrf(sc); 2641 1.1 eeh } 2642 1.1 eeh } 2643 1.1 eeh 2644 1.1 eeh /* Try to get things going again */ 2645 1.43 christos if (ifp->if_flags & IFF_UP) 2646 1.1 eeh gem_start(ifp); 2647 1.1 eeh splx(s); 2648 1.1 eeh return (error); 2649 1.1 eeh } 2650 1.1 eeh 2651 1.85 dyoung static void 2652 1.85 dyoung gem_inten(struct gem_softc *sc) 2653 1.85 dyoung { 2654 1.85 dyoung bus_space_tag_t t = sc->sc_bustag; 2655 1.85 dyoung bus_space_handle_t h = sc->sc_h1; 2656 1.85 dyoung uint32_t v; 2657 1.85 dyoung 2658 1.85 dyoung if ((sc->sc_flags & (GEM_SERDES | GEM_SERIAL)) != 0) 2659 1.85 dyoung v = GEM_INTR_PCS; 2660 1.85 dyoung else 2661 1.85 dyoung v = GEM_INTR_MIF; 2662 1.85 dyoung bus_space_write_4(t, h, GEM_INTMASK, 2663 1.85 dyoung ~(GEM_INTR_TX_INTME | 2664 1.85 dyoung GEM_INTR_TX_EMPTY | 2665 1.85 dyoung GEM_INTR_TX_MAC | 2666 1.117 msaitoh GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF | 2667 1.117 msaitoh GEM_INTR_RX_TAG_ERR | GEM_INTR_MAC_CONTROL | 2668 1.85 dyoung GEM_INTR_BERR | v)); 2669 1.85 dyoung } 2670 1.85 dyoung 2671 1.85 dyoung bool 2672 1.93 dyoung gem_resume(device_t self, const pmf_qual_t *qual) 2673 1.85 dyoung { 2674 1.85 dyoung struct gem_softc *sc = device_private(self); 2675 1.85 dyoung 2676 1.85 dyoung gem_inten(sc); 2677 1.85 dyoung 2678 1.85 dyoung return true; 2679 1.85 dyoung } 2680 1.85 dyoung 2681 1.85 dyoung bool 2682 1.93 dyoung gem_suspend(device_t self, const pmf_qual_t *qual) 2683 1.85 dyoung { 2684 1.85 dyoung struct gem_softc *sc = device_private(self); 2685 1.85 dyoung bus_space_tag_t t = sc->sc_bustag; 2686 1.85 dyoung bus_space_handle_t h = sc->sc_h1; 2687 1.85 dyoung 2688 1.85 dyoung bus_space_write_4(t, h, GEM_INTMASK, ~(uint32_t)0); 2689 1.85 dyoung 2690 1.85 dyoung return true; 2691 1.85 dyoung } 2692 1.1 eeh 2693 1.85 dyoung bool 2694 1.85 dyoung gem_shutdown(device_t self, int howto) 2695 1.1 eeh { 2696 1.85 dyoung struct gem_softc *sc = device_private(self); 2697 1.1 eeh struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2698 1.1 eeh 2699 1.1 eeh gem_stop(ifp, 1); 2700 1.85 dyoung 2701 1.85 dyoung return true; 2702 1.1 eeh } 2703 1.1 eeh 2704 1.1 eeh /* 2705 1.1 eeh * Set up the logical address filter. 2706 1.1 eeh */ 2707 1.1 eeh void 2708 1.81 dsl gem_setladrf(struct gem_softc *sc) 2709 1.1 eeh { 2710 1.15 matt struct ethercom *ec = &sc->sc_ethercom; 2711 1.15 matt struct ifnet *ifp = &ec->ec_if; 2712 1.1 eeh struct ether_multi *enm; 2713 1.1 eeh struct ether_multistep step; 2714 1.1 eeh bus_space_tag_t t = sc->sc_bustag; 2715 1.50 martin bus_space_handle_t h = sc->sc_h1; 2716 1.112 msaitoh uint32_t crc; 2717 1.112 msaitoh uint32_t hash[16]; 2718 1.112 msaitoh uint32_t v; 2719 1.15 matt int i; 2720 1.1 eeh 2721 1.1 eeh /* Get current RX configuration */ 2722 1.1 eeh v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); 2723 1.1 eeh 2724 1.15 matt /* 2725 1.15 matt * Turn off promiscuous mode, promiscuous group mode (all multicast), 2726 1.15 matt * and hash filter. Depending on the case, the right bit will be 2727 1.15 matt * enabled. 2728 1.15 matt */ 2729 1.117 msaitoh v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_HASH_FILTER | 2730 1.15 matt GEM_MAC_RX_PROMISC_GRP); 2731 1.15 matt 2732 1.1 eeh if ((ifp->if_flags & IFF_PROMISC) != 0) { 2733 1.15 matt /* Turn on promiscuous mode */ 2734 1.1 eeh v |= GEM_MAC_RX_PROMISCUOUS; 2735 1.1 eeh ifp->if_flags |= IFF_ALLMULTI; 2736 1.1 eeh goto chipit; 2737 1.1 eeh } 2738 1.1 eeh 2739 1.1 eeh /* 2740 1.1 eeh * Set up multicast address filter by passing all multicast addresses 2741 1.15 matt * through a crc generator, and then using the high order 8 bits as an 2742 1.15 matt * index into the 256 bit logical address filter. The high order 4 2743 1.41 christos * bits selects the word, while the other 4 bits select the bit within 2744 1.15 matt * the word (where bit 0 is the MSB). 2745 1.1 eeh */ 2746 1.1 eeh 2747 1.15 matt /* Clear hash table */ 2748 1.15 matt memset(hash, 0, sizeof(hash)); 2749 1.15 matt 2750 1.119 msaitoh ETHER_LOCK(ec); 2751 1.1 eeh ETHER_FIRST_MULTI(step, ec, enm); 2752 1.1 eeh while (enm != NULL) { 2753 1.6 thorpej if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2754 1.1 eeh /* 2755 1.1 eeh * We must listen to a range of multicast addresses. 2756 1.1 eeh * For now, just accept all multicasts, rather than 2757 1.1 eeh * trying to set only those filter bits needed to match 2758 1.1 eeh * the range. (At this time, the only use of address 2759 1.1 eeh * ranges is for IP multicast routing, for which the 2760 1.1 eeh * range is big enough to require all bits set.) 2761 1.68 jdc * XXX should use the address filters for this 2762 1.1 eeh */ 2763 1.1 eeh ifp->if_flags |= IFF_ALLMULTI; 2764 1.15 matt v |= GEM_MAC_RX_PROMISC_GRP; 2765 1.119 msaitoh ETHER_UNLOCK(ec); 2766 1.1 eeh goto chipit; 2767 1.1 eeh } 2768 1.1 eeh 2769 1.15 matt /* Get the LE CRC32 of the address */ 2770 1.15 matt crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo)); 2771 1.1 eeh 2772 1.1 eeh /* Just want the 8 most significant bits. */ 2773 1.1 eeh crc >>= 24; 2774 1.1 eeh 2775 1.1 eeh /* Set the corresponding bit in the filter. */ 2776 1.15 matt hash[crc >> 4] |= 1 << (15 - (crc & 15)); 2777 1.1 eeh 2778 1.1 eeh ETHER_NEXT_MULTI(step, enm); 2779 1.1 eeh } 2780 1.119 msaitoh ETHER_UNLOCK(ec); 2781 1.1 eeh 2782 1.15 matt v |= GEM_MAC_RX_HASH_FILTER; 2783 1.1 eeh ifp->if_flags &= ~IFF_ALLMULTI; 2784 1.1 eeh 2785 1.15 matt /* Now load the hash table into the chip (if we are using it) */ 2786 1.15 matt for (i = 0; i < 16; i++) { 2787 1.15 matt bus_space_write_4(t, h, 2788 1.15 matt GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), 2789 1.15 matt hash[i]); 2790 1.15 matt } 2791 1.15 matt 2792 1.1 eeh chipit: 2793 1.41 christos sc->sc_if_flags = ifp->if_flags; 2794 1.1 eeh bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); 2795 1.1 eeh } 2796