1 1.44 riastrad /* $NetBSD: if_ale.c,v 1.44 2024/06/29 12:11:11 riastradh Exp $ */ 2 1.2 tsutsui 3 1.1 cegger /*- 4 1.1 cegger * Copyright (c) 2008, Pyun YongHyeon <yongari (at) FreeBSD.org> 5 1.1 cegger * All rights reserved. 6 1.1 cegger * 7 1.1 cegger * Redistribution and use in source and binary forms, with or without 8 1.1 cegger * modification, are permitted provided that the following conditions 9 1.1 cegger * are met: 10 1.1 cegger * 1. Redistributions of source code must retain the above copyright 11 1.1 cegger * notice unmodified, this list of conditions, and the following 12 1.1 cegger * disclaimer. 13 1.1 cegger * 2. Redistributions in binary form must reproduce the above copyright 14 1.1 cegger * notice, this list of conditions and the following disclaimer in the 15 1.1 cegger * documentation and/or other materials provided with the distribution. 16 1.1 cegger * 17 1.1 cegger * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 1.1 cegger * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 1.1 cegger * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 1.1 cegger * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 1.1 cegger * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 1.1 cegger * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 1.1 cegger * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 1.1 cegger * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 1.1 cegger * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 1.1 cegger * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 1.1 cegger * SUCH DAMAGE. 28 1.1 cegger * 29 1.1 cegger * $FreeBSD: src/sys/dev/ale/if_ale.c,v 1.3 2008/12/03 09:01:12 yongari Exp $ 30 1.1 cegger */ 31 1.1 cegger 32 1.1 cegger /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */ 33 1.1 cegger 34 1.2 tsutsui #include <sys/cdefs.h> 35 1.44 riastrad __KERNEL_RCSID(0, "$NetBSD: if_ale.c,v 1.44 2024/06/29 12:11:11 riastradh Exp $"); 36 1.2 tsutsui 37 1.1 cegger #include "vlan.h" 38 1.1 cegger 39 1.1 cegger #include <sys/param.h> 40 1.1 cegger #include <sys/proc.h> 41 1.1 cegger #include <sys/endian.h> 42 1.1 cegger #include <sys/systm.h> 43 1.1 cegger #include <sys/types.h> 44 1.1 cegger #include <sys/sockio.h> 45 1.1 cegger #include <sys/mbuf.h> 46 1.1 cegger #include <sys/queue.h> 47 1.1 cegger #include <sys/kernel.h> 48 1.1 cegger #include <sys/device.h> 49 1.1 cegger #include <sys/callout.h> 50 1.1 cegger #include <sys/socket.h> 51 1.1 cegger 52 1.1 cegger #include <sys/bus.h> 53 1.1 cegger 54 1.1 cegger #include <net/if.h> 55 1.1 cegger #include <net/if_dl.h> 56 1.1 cegger #include <net/if_llc.h> 57 1.1 cegger #include <net/if_media.h> 58 1.1 cegger #include <net/if_ether.h> 59 1.1 cegger 60 1.1 cegger #ifdef INET 61 1.1 cegger #include <netinet/in.h> 62 1.1 cegger #include <netinet/in_systm.h> 63 1.1 cegger #include <netinet/in_var.h> 64 1.1 cegger #include <netinet/ip.h> 65 1.1 cegger #endif 66 1.1 cegger 67 1.1 cegger #include <net/if_types.h> 68 1.1 cegger #include <net/if_vlanvar.h> 69 1.1 cegger 70 1.1 cegger #include <net/bpf.h> 71 1.1 cegger 72 1.1 cegger #include <dev/mii/mii.h> 73 1.1 cegger #include <dev/mii/miivar.h> 74 1.1 cegger 75 1.1 cegger #include <dev/pci/pcireg.h> 76 1.1 cegger #include <dev/pci/pcivar.h> 77 1.1 cegger #include <dev/pci/pcidevs.h> 78 1.1 cegger 79 1.1 cegger #include <dev/pci/if_alereg.h> 80 1.1 cegger 81 1.1 cegger static int ale_match(device_t, cfdata_t, void *); 82 1.1 cegger static void ale_attach(device_t, device_t, void *); 83 1.1 cegger static int ale_detach(device_t, int); 84 1.1 cegger 85 1.27 msaitoh static int ale_miibus_readreg(device_t, int, int, uint16_t *); 86 1.27 msaitoh static int ale_miibus_writereg(device_t, int, int, uint16_t); 87 1.14 matt static void ale_miibus_statchg(struct ifnet *); 88 1.1 cegger 89 1.1 cegger static int ale_init(struct ifnet *); 90 1.1 cegger static void ale_start(struct ifnet *); 91 1.1 cegger static int ale_ioctl(struct ifnet *, u_long, void *); 92 1.1 cegger static void ale_watchdog(struct ifnet *); 93 1.1 cegger static int ale_mediachange(struct ifnet *); 94 1.1 cegger static void ale_mediastatus(struct ifnet *, struct ifmediareq *); 95 1.1 cegger 96 1.1 cegger static int ale_intr(void *); 97 1.1 cegger static int ale_rxeof(struct ale_softc *sc); 98 1.1 cegger static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **, 99 1.1 cegger uint32_t, uint32_t *); 100 1.1 cegger static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t); 101 1.1 cegger static void ale_txeof(struct ale_softc *); 102 1.1 cegger 103 1.1 cegger static int ale_dma_alloc(struct ale_softc *); 104 1.1 cegger static void ale_dma_free(struct ale_softc *); 105 1.41 thorpej static int ale_encap(struct ale_softc *, struct mbuf *); 106 1.1 cegger static void ale_init_rx_pages(struct ale_softc *); 107 1.1 cegger static void ale_init_tx_ring(struct ale_softc *); 108 1.1 cegger 109 1.1 cegger static void ale_stop(struct ifnet *, int); 110 1.1 cegger static void ale_tick(void *); 111 1.1 cegger static void ale_get_macaddr(struct ale_softc *); 112 1.1 cegger static void ale_mac_config(struct ale_softc *); 113 1.1 cegger static void ale_phy_reset(struct ale_softc *); 114 1.1 cegger static void ale_reset(struct ale_softc *); 115 1.1 cegger static void ale_rxfilter(struct ale_softc *); 116 1.1 cegger static void ale_rxvlan(struct ale_softc *); 117 1.1 cegger static void ale_stats_clear(struct ale_softc *); 118 1.1 cegger static void ale_stats_update(struct ale_softc *); 119 1.1 cegger static void ale_stop_mac(struct ale_softc *); 120 1.1 cegger 121 1.1 cegger CFATTACH_DECL_NEW(ale, sizeof(struct ale_softc), 122 1.1 cegger ale_match, ale_attach, ale_detach, NULL); 123 1.1 cegger 124 1.1 cegger int aledebug = 0; 125 1.1 cegger #define DPRINTF(x) do { if (aledebug) printf x; } while (0) 126 1.1 cegger 127 1.15 christos #define ALE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4) 128 1.1 cegger 129 1.1 cegger static int 130 1.27 msaitoh ale_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 131 1.1 cegger { 132 1.1 cegger struct ale_softc *sc = device_private(dev); 133 1.1 cegger uint32_t v; 134 1.1 cegger int i; 135 1.1 cegger 136 1.1 cegger if (phy != sc->ale_phyaddr) 137 1.27 msaitoh return -1; 138 1.1 cegger 139 1.6 cegger if (sc->ale_flags & ALE_FLAG_FASTETHER) { 140 1.6 cegger switch (reg) { 141 1.36 msaitoh case MII_100T2CR: 142 1.36 msaitoh case MII_100T2SR: 143 1.6 cegger case MII_EXTSR: 144 1.27 msaitoh *val = 0; 145 1.6 cegger return 0; 146 1.6 cegger default: 147 1.6 cegger break; 148 1.6 cegger } 149 1.6 cegger } 150 1.6 cegger 151 1.1 cegger CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 152 1.1 cegger MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 153 1.1 cegger for (i = ALE_PHY_TIMEOUT; i > 0; i--) { 154 1.1 cegger DELAY(5); 155 1.1 cegger v = CSR_READ_4(sc, ALE_MDIO); 156 1.1 cegger if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 157 1.1 cegger break; 158 1.1 cegger } 159 1.1 cegger 160 1.1 cegger if (i == 0) { 161 1.1 cegger printf("%s: phy read timeout: phy %d, reg %d\n", 162 1.1 cegger device_xname(sc->sc_dev), phy, reg); 163 1.27 msaitoh return ETIMEDOUT; 164 1.1 cegger } 165 1.1 cegger 166 1.27 msaitoh *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT; 167 1.27 msaitoh return 0; 168 1.1 cegger } 169 1.1 cegger 170 1.27 msaitoh static int 171 1.27 msaitoh ale_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 172 1.1 cegger { 173 1.1 cegger struct ale_softc *sc = device_private(dev); 174 1.1 cegger uint32_t v; 175 1.1 cegger int i; 176 1.1 cegger 177 1.1 cegger if (phy != sc->ale_phyaddr) 178 1.27 msaitoh return -1; 179 1.1 cegger 180 1.6 cegger if (sc->ale_flags & ALE_FLAG_FASTETHER) { 181 1.6 cegger switch (reg) { 182 1.36 msaitoh case MII_100T2CR: 183 1.36 msaitoh case MII_100T2SR: 184 1.6 cegger case MII_EXTSR: 185 1.27 msaitoh return 0; 186 1.6 cegger default: 187 1.6 cegger break; 188 1.6 cegger } 189 1.6 cegger } 190 1.6 cegger 191 1.1 cegger CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 192 1.36 msaitoh (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 193 1.1 cegger MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 194 1.1 cegger for (i = ALE_PHY_TIMEOUT; i > 0; i--) { 195 1.1 cegger DELAY(5); 196 1.1 cegger v = CSR_READ_4(sc, ALE_MDIO); 197 1.1 cegger if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 198 1.1 cegger break; 199 1.1 cegger } 200 1.1 cegger 201 1.27 msaitoh if (i == 0) { 202 1.1 cegger printf("%s: phy write timeout: phy %d, reg %d\n", 203 1.1 cegger device_xname(sc->sc_dev), phy, reg); 204 1.27 msaitoh return ETIMEDOUT; 205 1.27 msaitoh } 206 1.27 msaitoh 207 1.27 msaitoh return 0; 208 1.1 cegger } 209 1.1 cegger 210 1.1 cegger static void 211 1.14 matt ale_miibus_statchg(struct ifnet *ifp) 212 1.1 cegger { 213 1.14 matt struct ale_softc *sc = ifp->if_softc; 214 1.14 matt struct mii_data *mii = &sc->sc_miibus; 215 1.1 cegger uint32_t reg; 216 1.1 cegger 217 1.1 cegger if ((ifp->if_flags & IFF_RUNNING) == 0) 218 1.1 cegger return; 219 1.1 cegger 220 1.1 cegger sc->ale_flags &= ~ALE_FLAG_LINK; 221 1.1 cegger if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 222 1.1 cegger (IFM_ACTIVE | IFM_AVALID)) { 223 1.1 cegger switch (IFM_SUBTYPE(mii->mii_media_active)) { 224 1.1 cegger case IFM_10_T: 225 1.1 cegger case IFM_100_TX: 226 1.1 cegger sc->ale_flags |= ALE_FLAG_LINK; 227 1.1 cegger break; 228 1.1 cegger 229 1.1 cegger case IFM_1000_T: 230 1.1 cegger if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) 231 1.1 cegger sc->ale_flags |= ALE_FLAG_LINK; 232 1.1 cegger break; 233 1.1 cegger 234 1.1 cegger default: 235 1.1 cegger break; 236 1.1 cegger } 237 1.1 cegger } 238 1.1 cegger 239 1.1 cegger /* Stop Rx/Tx MACs. */ 240 1.1 cegger ale_stop_mac(sc); 241 1.1 cegger 242 1.1 cegger /* Program MACs with resolved speed/duplex/flow-control. */ 243 1.1 cegger if ((sc->ale_flags & ALE_FLAG_LINK) != 0) { 244 1.1 cegger ale_mac_config(sc); 245 1.1 cegger /* Reenable Tx/Rx MACs. */ 246 1.1 cegger reg = CSR_READ_4(sc, ALE_MAC_CFG); 247 1.1 cegger reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 248 1.1 cegger CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 249 1.1 cegger } 250 1.1 cegger } 251 1.1 cegger 252 1.1 cegger void 253 1.1 cegger ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 254 1.1 cegger { 255 1.1 cegger struct ale_softc *sc = ifp->if_softc; 256 1.1 cegger struct mii_data *mii = &sc->sc_miibus; 257 1.1 cegger 258 1.1 cegger mii_pollstat(mii); 259 1.1 cegger ifmr->ifm_status = mii->mii_media_status; 260 1.1 cegger ifmr->ifm_active = mii->mii_media_active; 261 1.1 cegger } 262 1.1 cegger 263 1.1 cegger int 264 1.1 cegger ale_mediachange(struct ifnet *ifp) 265 1.1 cegger { 266 1.1 cegger struct ale_softc *sc = ifp->if_softc; 267 1.1 cegger struct mii_data *mii = &sc->sc_miibus; 268 1.1 cegger int error; 269 1.1 cegger 270 1.1 cegger if (mii->mii_instance != 0) { 271 1.1 cegger struct mii_softc *miisc; 272 1.1 cegger 273 1.1 cegger LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 274 1.1 cegger mii_phy_reset(miisc); 275 1.1 cegger } 276 1.1 cegger error = mii_mediachg(mii); 277 1.1 cegger 278 1.1 cegger return error; 279 1.1 cegger } 280 1.1 cegger 281 1.1 cegger int 282 1.1 cegger ale_match(device_t dev, cfdata_t match, void *aux) 283 1.1 cegger { 284 1.1 cegger struct pci_attach_args *pa = aux; 285 1.1 cegger 286 1.1 cegger return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC && 287 1.1 cegger PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_L1E); 288 1.1 cegger } 289 1.1 cegger 290 1.1 cegger void 291 1.1 cegger ale_get_macaddr(struct ale_softc *sc) 292 1.1 cegger { 293 1.1 cegger uint32_t ea[2], reg; 294 1.1 cegger int i, vpdc; 295 1.1 cegger 296 1.1 cegger reg = CSR_READ_4(sc, ALE_SPI_CTRL); 297 1.1 cegger if ((reg & SPI_VPD_ENB) != 0) { 298 1.1 cegger reg &= ~SPI_VPD_ENB; 299 1.1 cegger CSR_WRITE_4(sc, ALE_SPI_CTRL, reg); 300 1.1 cegger } 301 1.1 cegger 302 1.15 christos if (pci_get_capability(sc->sc_pct, sc->sc_pcitag, PCI_CAP_VPD, 303 1.1 cegger &vpdc, NULL)) { 304 1.1 cegger /* 305 1.1 cegger * PCI VPD capability found, let TWSI reload EEPROM. 306 1.1 cegger * This will set ethernet address of controller. 307 1.1 cegger */ 308 1.1 cegger CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) | 309 1.1 cegger TWSI_CTRL_SW_LD_START); 310 1.1 cegger for (i = 100; i > 0; i--) { 311 1.1 cegger DELAY(1000); 312 1.1 cegger reg = CSR_READ_4(sc, ALE_TWSI_CTRL); 313 1.1 cegger if ((reg & TWSI_CTRL_SW_LD_START) == 0) 314 1.1 cegger break; 315 1.1 cegger } 316 1.1 cegger if (i == 0) 317 1.1 cegger printf("%s: reloading EEPROM timeout!\n", 318 1.1 cegger device_xname(sc->sc_dev)); 319 1.1 cegger } else { 320 1.1 cegger if (aledebug) 321 1.1 cegger printf("%s: PCI VPD capability not found!\n", 322 1.1 cegger device_xname(sc->sc_dev)); 323 1.1 cegger } 324 1.1 cegger 325 1.1 cegger ea[0] = CSR_READ_4(sc, ALE_PAR0); 326 1.1 cegger ea[1] = CSR_READ_4(sc, ALE_PAR1); 327 1.1 cegger sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF; 328 1.1 cegger sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF; 329 1.1 cegger sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF; 330 1.1 cegger sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF; 331 1.1 cegger sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF; 332 1.1 cegger sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF; 333 1.1 cegger } 334 1.1 cegger 335 1.1 cegger void 336 1.1 cegger ale_phy_reset(struct ale_softc *sc) 337 1.1 cegger { 338 1.1 cegger /* Reset magic from Linux. */ 339 1.1 cegger CSR_WRITE_2(sc, ALE_GPHY_CTRL, 340 1.1 cegger GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | 341 1.1 cegger GPHY_CTRL_PHY_PLL_ON); 342 1.36 msaitoh DELAY(1000); 343 1.1 cegger CSR_WRITE_2(sc, ALE_GPHY_CTRL, 344 1.1 cegger GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | 345 1.1 cegger GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON); 346 1.36 msaitoh DELAY(1000); 347 1.1 cegger 348 1.1 cegger #define ATPHY_DBG_ADDR 0x1D 349 1.1 cegger #define ATPHY_DBG_DATA 0x1E 350 1.1 cegger 351 1.1 cegger /* Enable hibernation mode. */ 352 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 353 1.1 cegger ATPHY_DBG_ADDR, 0x0B); 354 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 355 1.1 cegger ATPHY_DBG_DATA, 0xBC00); 356 1.1 cegger /* Set Class A/B for all modes. */ 357 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 358 1.1 cegger ATPHY_DBG_ADDR, 0x00); 359 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 360 1.1 cegger ATPHY_DBG_DATA, 0x02EF); 361 1.1 cegger /* Enable 10BT power saving. */ 362 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 363 1.1 cegger ATPHY_DBG_ADDR, 0x12); 364 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 365 1.1 cegger ATPHY_DBG_DATA, 0x4C04); 366 1.1 cegger /* Adjust 1000T power. */ 367 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 368 1.1 cegger ATPHY_DBG_ADDR, 0x04); 369 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 370 1.13 cegger ATPHY_DBG_DATA, 0x8BBB); 371 1.1 cegger /* 10BT center tap voltage. */ 372 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 373 1.1 cegger ATPHY_DBG_ADDR, 0x05); 374 1.1 cegger ale_miibus_writereg(sc->sc_dev, sc->ale_phyaddr, 375 1.13 cegger ATPHY_DBG_DATA, 0x2C46); 376 1.1 cegger 377 1.1 cegger #undef ATPHY_DBG_ADDR 378 1.1 cegger #undef ATPHY_DBG_DATA 379 1.36 msaitoh DELAY(1000); 380 1.1 cegger } 381 1.1 cegger 382 1.1 cegger void 383 1.1 cegger ale_attach(device_t parent, device_t self, void *aux) 384 1.1 cegger { 385 1.1 cegger struct ale_softc *sc = device_private(self); 386 1.1 cegger struct pci_attach_args *pa = aux; 387 1.1 cegger pci_chipset_tag_t pc = pa->pa_pc; 388 1.1 cegger pci_intr_handle_t ih; 389 1.1 cegger const char *intrstr; 390 1.1 cegger struct ifnet *ifp; 391 1.29 msaitoh struct mii_data * const mii = &sc->sc_miibus; 392 1.36 msaitoh pcireg_t memtype; 393 1.5 cegger int mii_flags, error = 0; 394 1.1 cegger uint32_t rxf_len, txf_len; 395 1.4 cegger const char *chipname; 396 1.17 christos char intrbuf[PCI_INTRSTR_LEN]; 397 1.1 cegger 398 1.1 cegger aprint_naive("\n"); 399 1.1 cegger aprint_normal(": Attansic/Atheros L1E Ethernet\n"); 400 1.1 cegger 401 1.1 cegger sc->sc_dev = self; 402 1.1 cegger sc->sc_dmat = pa->pa_dmat; 403 1.1 cegger sc->sc_pct = pa->pa_pc; 404 1.1 cegger sc->sc_pcitag = pa->pa_tag; 405 1.1 cegger 406 1.1 cegger /* 407 1.1 cegger * Allocate IO memory 408 1.1 cegger */ 409 1.1 cegger memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, ALE_PCIR_BAR); 410 1.1 cegger switch (memtype) { 411 1.1 cegger case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 412 1.1 cegger case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M: 413 1.1 cegger case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 414 1.1 cegger break; 415 1.1 cegger default: 416 1.1 cegger aprint_error_dev(self, "invalid base address register\n"); 417 1.1 cegger break; 418 1.1 cegger } 419 1.1 cegger 420 1.1 cegger if (pci_mapreg_map(pa, ALE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt, 421 1.1 cegger &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) { 422 1.1 cegger aprint_error_dev(self, "could not map mem space\n"); 423 1.1 cegger return; 424 1.1 cegger } 425 1.1 cegger 426 1.1 cegger if (pci_intr_map(pa, &ih) != 0) { 427 1.1 cegger aprint_error_dev(self, "could not map interrupt\n"); 428 1.1 cegger goto fail; 429 1.1 cegger } 430 1.1 cegger 431 1.1 cegger /* 432 1.1 cegger * Allocate IRQ 433 1.1 cegger */ 434 1.17 christos intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf)); 435 1.26 jdolecek sc->sc_irq_handle = pci_intr_establish_xname(pc, ih, IPL_NET, ale_intr, 436 1.26 jdolecek sc, device_xname(self)); 437 1.1 cegger if (sc->sc_irq_handle == NULL) { 438 1.1 cegger aprint_error_dev(self, "could not establish interrupt"); 439 1.1 cegger if (intrstr != NULL) 440 1.1 cegger aprint_error(" at %s", intrstr); 441 1.1 cegger aprint_error("\n"); 442 1.1 cegger goto fail; 443 1.1 cegger } 444 1.1 cegger 445 1.1 cegger /* Set PHY address. */ 446 1.1 cegger sc->ale_phyaddr = ALE_PHY_ADDR; 447 1.1 cegger 448 1.1 cegger /* Reset PHY. */ 449 1.36 msaitoh ale_phy_reset(sc); 450 1.36 msaitoh 451 1.36 msaitoh /* Reset the ethernet controller. */ 452 1.36 msaitoh ale_reset(sc); 453 1.36 msaitoh 454 1.1 cegger /* Get PCI and chip id/revision. */ 455 1.1 cegger sc->ale_rev = PCI_REVISION(pa->pa_class); 456 1.1 cegger if (sc->ale_rev >= 0xF0) { 457 1.1 cegger /* L2E Rev. B. AR8114 */ 458 1.1 cegger sc->ale_flags |= ALE_FLAG_FASTETHER; 459 1.4 cegger chipname = "AR8114 (L2E RevB)"; 460 1.1 cegger } else { 461 1.1 cegger if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) { 462 1.1 cegger /* L1E AR8121 */ 463 1.1 cegger sc->ale_flags |= ALE_FLAG_JUMBO; 464 1.4 cegger chipname = "AR8121 (L1E)"; 465 1.1 cegger } else { 466 1.1 cegger /* L2E Rev. A. AR8113 */ 467 1.1 cegger sc->ale_flags |= ALE_FLAG_FASTETHER; 468 1.4 cegger chipname = "AR8113 (L2E RevA)"; 469 1.1 cegger } 470 1.1 cegger } 471 1.4 cegger aprint_normal_dev(self, "%s, %s\n", chipname, intrstr); 472 1.1 cegger 473 1.1 cegger /* 474 1.1 cegger * All known controllers seems to require 4 bytes alignment 475 1.1 cegger * of Tx buffers to make Tx checksum offload with custom 476 1.1 cegger * checksum generation method work. 477 1.1 cegger */ 478 1.1 cegger sc->ale_flags |= ALE_FLAG_TXCSUM_BUG; 479 1.1 cegger 480 1.1 cegger /* 481 1.1 cegger * All known controllers seems to have issues on Rx checksum 482 1.1 cegger * offload for fragmented IP datagrams. 483 1.1 cegger */ 484 1.1 cegger sc->ale_flags |= ALE_FLAG_RXCSUM_BUG; 485 1.1 cegger 486 1.1 cegger /* 487 1.1 cegger * Don't use Tx CMB. It is known to cause RRS update failure 488 1.1 cegger * under certain circumstances. Typical phenomenon of the 489 1.1 cegger * issue would be unexpected sequence number encountered in 490 1.1 cegger * Rx handler. 491 1.1 cegger */ 492 1.1 cegger sc->ale_flags |= ALE_FLAG_TXCMB_BUG; 493 1.1 cegger sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >> 494 1.1 cegger MASTER_CHIP_REV_SHIFT; 495 1.1 cegger aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->ale_rev); 496 1.1 cegger aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->ale_chip_rev); 497 1.1 cegger 498 1.1 cegger /* 499 1.1 cegger * Uninitialized hardware returns an invalid chip id/revision 500 1.1 cegger * as well as 0xFFFFFFFF for Tx/Rx fifo length. 501 1.1 cegger */ 502 1.1 cegger txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN); 503 1.1 cegger rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); 504 1.1 cegger if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF || 505 1.1 cegger rxf_len == 0xFFFFFFF) { 506 1.1 cegger aprint_error_dev(self, "chip revision : 0x%04x, %u Tx FIFO " 507 1.1 cegger "%u Rx FIFO -- not initialized?\n", 508 1.1 cegger sc->ale_chip_rev, txf_len, rxf_len); 509 1.1 cegger goto fail; 510 1.1 cegger } 511 1.1 cegger 512 1.1 cegger if (aledebug) { 513 1.1 cegger printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev), 514 1.1 cegger txf_len, rxf_len); 515 1.1 cegger } 516 1.1 cegger 517 1.1 cegger /* Set max allowable DMA size. */ 518 1.1 cegger sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128; 519 1.1 cegger sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128; 520 1.1 cegger 521 1.1 cegger callout_init(&sc->sc_tick_ch, 0); 522 1.1 cegger callout_setfunc(&sc->sc_tick_ch, ale_tick, sc); 523 1.1 cegger 524 1.1 cegger error = ale_dma_alloc(sc); 525 1.1 cegger if (error) 526 1.1 cegger goto fail; 527 1.1 cegger 528 1.1 cegger /* Load station address. */ 529 1.1 cegger ale_get_macaddr(sc); 530 1.1 cegger 531 1.1 cegger aprint_normal_dev(self, "Ethernet address %s\n", 532 1.1 cegger ether_sprintf(sc->ale_eaddr)); 533 1.1 cegger 534 1.1 cegger ifp = &sc->sc_ec.ec_if; 535 1.1 cegger ifp->if_softc = sc; 536 1.1 cegger ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 537 1.1 cegger ifp->if_init = ale_init; 538 1.1 cegger ifp->if_ioctl = ale_ioctl; 539 1.1 cegger ifp->if_start = ale_start; 540 1.1 cegger ifp->if_stop = ale_stop; 541 1.1 cegger ifp->if_watchdog = ale_watchdog; 542 1.1 cegger IFQ_SET_MAXLEN(&ifp->if_snd, ALE_TX_RING_CNT - 1); 543 1.1 cegger IFQ_SET_READY(&ifp->if_snd); 544 1.1 cegger strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 545 1.1 cegger 546 1.1 cegger sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU; 547 1.1 cegger 548 1.1 cegger #ifdef ALE_CHECKSUM 549 1.1 cegger ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 550 1.1 cegger IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 551 1.19 christos IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 552 1.1 cegger #endif 553 1.1 cegger 554 1.1 cegger #if NVLAN > 0 555 1.1 cegger sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 556 1.33 msaitoh sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 557 1.1 cegger #endif 558 1.1 cegger 559 1.1 cegger /* Set up MII bus. */ 560 1.29 msaitoh mii->mii_ifp = ifp; 561 1.29 msaitoh mii->mii_readreg = ale_miibus_readreg; 562 1.29 msaitoh mii->mii_writereg = ale_miibus_writereg; 563 1.29 msaitoh mii->mii_statchg = ale_miibus_statchg; 564 1.29 msaitoh 565 1.29 msaitoh sc->sc_ec.ec_mii = mii; 566 1.29 msaitoh ifmedia_init(&mii->mii_media, 0, ale_mediachange, ale_mediastatus); 567 1.5 cegger mii_flags = 0; 568 1.5 cegger if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) 569 1.5 cegger mii_flags |= MIIF_DOPAUSE; 570 1.29 msaitoh mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 571 1.5 cegger MII_OFFSET_ANY, mii_flags); 572 1.1 cegger 573 1.29 msaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) { 574 1.1 cegger aprint_error_dev(self, "no PHY found!\n"); 575 1.29 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 576 1.29 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 577 1.1 cegger } else 578 1.29 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 579 1.1 cegger 580 1.1 cegger if_attach(ifp); 581 1.22 ozaki if_deferred_start_init(ifp, NULL); 582 1.1 cegger ether_ifattach(ifp, sc->ale_eaddr); 583 1.1 cegger 584 1.8 tsutsui if (pmf_device_register(self, NULL, NULL)) 585 1.8 tsutsui pmf_class_network_register(self, ifp); 586 1.8 tsutsui else 587 1.1 cegger aprint_error_dev(self, "couldn't establish power handler\n"); 588 1.1 cegger 589 1.1 cegger return; 590 1.1 cegger fail: 591 1.1 cegger ale_dma_free(sc); 592 1.1 cegger if (sc->sc_irq_handle != NULL) { 593 1.1 cegger pci_intr_disestablish(pc, sc->sc_irq_handle); 594 1.1 cegger sc->sc_irq_handle = NULL; 595 1.1 cegger } 596 1.1 cegger if (sc->sc_mem_size) { 597 1.1 cegger bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 598 1.1 cegger sc->sc_mem_size = 0; 599 1.1 cegger } 600 1.1 cegger } 601 1.1 cegger 602 1.1 cegger static int 603 1.1 cegger ale_detach(device_t self, int flags) 604 1.1 cegger { 605 1.1 cegger struct ale_softc *sc = device_private(self); 606 1.1 cegger struct ifnet *ifp = &sc->sc_ec.ec_if; 607 1.1 cegger int s; 608 1.1 cegger 609 1.3 cegger pmf_device_deregister(self); 610 1.1 cegger s = splnet(); 611 1.1 cegger ale_stop(ifp, 0); 612 1.1 cegger splx(s); 613 1.1 cegger 614 1.1 cegger mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY); 615 1.1 cegger 616 1.1 cegger ether_ifdetach(ifp); 617 1.1 cegger if_detach(ifp); 618 1.1 cegger ale_dma_free(sc); 619 1.1 cegger 620 1.39 thorpej /* Delete all remaining media. */ 621 1.39 thorpej ifmedia_fini(&sc->sc_miibus.mii_media); 622 1.39 thorpej 623 1.1 cegger if (sc->sc_irq_handle != NULL) { 624 1.1 cegger pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle); 625 1.1 cegger sc->sc_irq_handle = NULL; 626 1.1 cegger } 627 1.1 cegger if (sc->sc_mem_size) { 628 1.1 cegger bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size); 629 1.1 cegger sc->sc_mem_size = 0; 630 1.1 cegger } 631 1.1 cegger 632 1.1 cegger return 0; 633 1.1 cegger } 634 1.1 cegger 635 1.1 cegger 636 1.1 cegger static int 637 1.1 cegger ale_dma_alloc(struct ale_softc *sc) 638 1.1 cegger { 639 1.1 cegger struct ale_txdesc *txd; 640 1.1 cegger int nsegs, error, guard_size, i; 641 1.1 cegger 642 1.1 cegger if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) 643 1.1 cegger guard_size = ALE_JUMBO_FRAMELEN; 644 1.1 cegger else 645 1.1 cegger guard_size = ALE_MAX_FRAMELEN; 646 1.1 cegger sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ, 647 1.1 cegger ALE_RX_PAGE_ALIGN); 648 1.1 cegger 649 1.1 cegger /* 650 1.1 cegger * Create DMA stuffs for TX ring 651 1.1 cegger */ 652 1.1 cegger error = bus_dmamap_create(sc->sc_dmat, ALE_TX_RING_SZ, 1, 653 1.1 cegger ALE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_ring_map); 654 1.1 cegger if (error) { 655 1.1 cegger sc->ale_cdata.ale_tx_ring_map = NULL; 656 1.1 cegger return ENOBUFS; 657 1.1 cegger } 658 1.1 cegger 659 1.15 christos /* Allocate DMA'able memory for TX ring */ 660 1.15 christos error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_RING_SZ, 661 1.40 thorpej PAGE_SIZE, 0, &sc->ale_cdata.ale_tx_ring_seg, 1, 662 1.1 cegger &nsegs, BUS_DMA_WAITOK); 663 1.1 cegger if (error) { 664 1.1 cegger printf("%s: could not allocate DMA'able memory for Tx ring, " 665 1.1 cegger "error = %i\n", device_xname(sc->sc_dev), error); 666 1.1 cegger return error; 667 1.1 cegger } 668 1.1 cegger 669 1.1 cegger error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_ring_seg, 670 1.1 cegger nsegs, ALE_TX_RING_SZ, (void **)&sc->ale_cdata.ale_tx_ring, 671 1.1 cegger BUS_DMA_NOWAIT); 672 1.1 cegger if (error) 673 1.1 cegger return ENOBUFS; 674 1.1 cegger 675 1.1 cegger memset(sc->ale_cdata.ale_tx_ring, 0, ALE_TX_RING_SZ); 676 1.1 cegger 677 1.1 cegger /* Load the DMA map for Tx ring. */ 678 1.15 christos error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 679 1.1 cegger sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ, NULL, BUS_DMA_WAITOK); 680 1.1 cegger if (error) { 681 1.1 cegger printf("%s: could not load DMA'able memory for Tx ring.\n", 682 1.1 cegger device_xname(sc->sc_dev)); 683 1.15 christos bus_dmamem_free(sc->sc_dmat, 684 1.1 cegger &sc->ale_cdata.ale_tx_ring_seg, 1); 685 1.1 cegger return error; 686 1.1 cegger } 687 1.15 christos sc->ale_cdata.ale_tx_ring_paddr = 688 1.1 cegger sc->ale_cdata.ale_tx_ring_map->dm_segs[0].ds_addr; 689 1.1 cegger 690 1.1 cegger for (i = 0; i < ALE_RX_PAGES; i++) { 691 1.1 cegger /* 692 1.1 cegger * Create DMA stuffs for RX pages 693 1.1 cegger */ 694 1.1 cegger error = bus_dmamap_create(sc->sc_dmat, sc->ale_pagesize, 1, 695 1.15 christos sc->ale_pagesize, 0, BUS_DMA_NOWAIT, 696 1.1 cegger &sc->ale_cdata.ale_rx_page[i].page_map); 697 1.1 cegger if (error) { 698 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_map = NULL; 699 1.1 cegger return ENOBUFS; 700 1.1 cegger } 701 1.1 cegger 702 1.1 cegger /* Allocate DMA'able memory for RX pages */ 703 1.1 cegger error = bus_dmamem_alloc(sc->sc_dmat, sc->ale_pagesize, 704 1.40 thorpej PAGE_SIZE, 0, &sc->ale_cdata.ale_rx_page[i].page_seg, 705 1.1 cegger 1, &nsegs, BUS_DMA_WAITOK); 706 1.1 cegger if (error) { 707 1.1 cegger printf("%s: could not allocate DMA'able memory for " 708 1.1 cegger "Rx ring.\n", device_xname(sc->sc_dev)); 709 1.1 cegger return error; 710 1.1 cegger } 711 1.15 christos error = bus_dmamem_map(sc->sc_dmat, 712 1.1 cegger &sc->ale_cdata.ale_rx_page[i].page_seg, nsegs, 713 1.15 christos sc->ale_pagesize, 714 1.1 cegger (void **)&sc->ale_cdata.ale_rx_page[i].page_addr, 715 1.1 cegger BUS_DMA_NOWAIT); 716 1.1 cegger if (error) 717 1.1 cegger return ENOBUFS; 718 1.1 cegger 719 1.1 cegger memset(sc->ale_cdata.ale_rx_page[i].page_addr, 0, 720 1.1 cegger sc->ale_pagesize); 721 1.1 cegger 722 1.1 cegger /* Load the DMA map for Rx pages. */ 723 1.1 cegger error = bus_dmamap_load(sc->sc_dmat, 724 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_map, 725 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_addr, 726 1.1 cegger sc->ale_pagesize, NULL, BUS_DMA_WAITOK); 727 1.1 cegger if (error) { 728 1.1 cegger printf("%s: could not load DMA'able memory for " 729 1.1 cegger "Rx pages.\n", device_xname(sc->sc_dev)); 730 1.1 cegger bus_dmamem_free(sc->sc_dmat, 731 1.1 cegger &sc->ale_cdata.ale_rx_page[i].page_seg, 1); 732 1.1 cegger return error; 733 1.1 cegger } 734 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_paddr = 735 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_map->dm_segs[0].ds_addr; 736 1.1 cegger } 737 1.1 cegger 738 1.1 cegger /* 739 1.1 cegger * Create DMA stuffs for Tx CMB. 740 1.1 cegger */ 741 1.1 cegger error = bus_dmamap_create(sc->sc_dmat, ALE_TX_CMB_SZ, 1, 742 1.1 cegger ALE_TX_CMB_SZ, 0, BUS_DMA_NOWAIT, &sc->ale_cdata.ale_tx_cmb_map); 743 1.1 cegger if (error) { 744 1.1 cegger sc->ale_cdata.ale_tx_cmb_map = NULL; 745 1.1 cegger return ENOBUFS; 746 1.1 cegger } 747 1.1 cegger 748 1.1 cegger /* Allocate DMA'able memory for Tx CMB. */ 749 1.40 thorpej error = bus_dmamem_alloc(sc->sc_dmat, ALE_TX_CMB_SZ, PAGE_SIZE, 0, 750 1.1 cegger &sc->ale_cdata.ale_tx_cmb_seg, 1, &nsegs, BUS_DMA_WAITOK); 751 1.1 cegger 752 1.1 cegger if (error) { 753 1.1 cegger printf("%s: could not allocate DMA'able memory for Tx CMB.\n", 754 1.1 cegger device_xname(sc->sc_dev)); 755 1.1 cegger return error; 756 1.1 cegger } 757 1.1 cegger 758 1.1 cegger error = bus_dmamem_map(sc->sc_dmat, &sc->ale_cdata.ale_tx_cmb_seg, 759 1.1 cegger nsegs, ALE_TX_CMB_SZ, (void **)&sc->ale_cdata.ale_tx_cmb, 760 1.1 cegger BUS_DMA_NOWAIT); 761 1.15 christos if (error) 762 1.1 cegger return ENOBUFS; 763 1.1 cegger 764 1.1 cegger memset(sc->ale_cdata.ale_tx_cmb, 0, ALE_TX_CMB_SZ); 765 1.1 cegger 766 1.1 cegger /* Load the DMA map for Tx CMB. */ 767 1.15 christos error = bus_dmamap_load(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 768 1.1 cegger sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ, NULL, BUS_DMA_WAITOK); 769 1.1 cegger if (error) { 770 1.1 cegger printf("%s: could not load DMA'able memory for Tx CMB.\n", 771 1.1 cegger device_xname(sc->sc_dev)); 772 1.1 cegger bus_dmamem_free(sc->sc_dmat, 773 1.1 cegger &sc->ale_cdata.ale_tx_cmb_seg, 1); 774 1.1 cegger return error; 775 1.1 cegger } 776 1.1 cegger 777 1.15 christos sc->ale_cdata.ale_tx_cmb_paddr = 778 1.1 cegger sc->ale_cdata.ale_tx_cmb_map->dm_segs[0].ds_addr; 779 1.1 cegger 780 1.1 cegger for (i = 0; i < ALE_RX_PAGES; i++) { 781 1.1 cegger /* 782 1.1 cegger * Create DMA stuffs for Rx CMB. 783 1.1 cegger */ 784 1.1 cegger error = bus_dmamap_create(sc->sc_dmat, ALE_RX_CMB_SZ, 1, 785 1.1 cegger ALE_RX_CMB_SZ, 0, BUS_DMA_NOWAIT, 786 1.1 cegger &sc->ale_cdata.ale_rx_page[i].cmb_map); 787 1.1 cegger if (error) { 788 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_map = NULL; 789 1.1 cegger return ENOBUFS; 790 1.1 cegger } 791 1.1 cegger 792 1.1 cegger /* Allocate DMA'able memory for Rx CMB */ 793 1.1 cegger error = bus_dmamem_alloc(sc->sc_dmat, ALE_RX_CMB_SZ, 794 1.40 thorpej PAGE_SIZE, 0, &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1, 795 1.1 cegger &nsegs, BUS_DMA_WAITOK); 796 1.1 cegger if (error) { 797 1.1 cegger printf("%s: could not allocate DMA'able memory for " 798 1.1 cegger "Rx CMB\n", device_xname(sc->sc_dev)); 799 1.1 cegger return error; 800 1.1 cegger } 801 1.15 christos error = bus_dmamem_map(sc->sc_dmat, 802 1.1 cegger &sc->ale_cdata.ale_rx_page[i].cmb_seg, nsegs, 803 1.15 christos ALE_RX_CMB_SZ, 804 1.1 cegger (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr, 805 1.1 cegger BUS_DMA_NOWAIT); 806 1.1 cegger if (error) 807 1.1 cegger return ENOBUFS; 808 1.1 cegger 809 1.1 cegger memset(sc->ale_cdata.ale_rx_page[i].cmb_addr, 0, ALE_RX_CMB_SZ); 810 1.1 cegger 811 1.1 cegger /* Load the DMA map for Rx CMB */ 812 1.1 cegger error = bus_dmamap_load(sc->sc_dmat, 813 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_map, 814 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_addr, 815 1.1 cegger ALE_RX_CMB_SZ, NULL, BUS_DMA_WAITOK); 816 1.1 cegger if (error) { 817 1.1 cegger printf("%s: could not load DMA'able memory for Rx CMB" 818 1.1 cegger "\n", device_xname(sc->sc_dev)); 819 1.1 cegger bus_dmamem_free(sc->sc_dmat, 820 1.1 cegger &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1); 821 1.1 cegger return error; 822 1.1 cegger } 823 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_paddr = 824 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_map->dm_segs[0].ds_addr; 825 1.1 cegger } 826 1.1 cegger 827 1.1 cegger 828 1.1 cegger /* Create DMA maps for Tx buffers. */ 829 1.1 cegger for (i = 0; i < ALE_TX_RING_CNT; i++) { 830 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[i]; 831 1.1 cegger txd->tx_m = NULL; 832 1.1 cegger txd->tx_dmamap = NULL; 833 1.1 cegger error = bus_dmamap_create(sc->sc_dmat, ALE_TSO_MAXSIZE, 834 1.1 cegger ALE_MAXTXSEGS, ALE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT, 835 1.1 cegger &txd->tx_dmamap); 836 1.1 cegger if (error) { 837 1.1 cegger txd->tx_dmamap = NULL; 838 1.1 cegger printf("%s: could not create Tx dmamap.\n", 839 1.1 cegger device_xname(sc->sc_dev)); 840 1.1 cegger return error; 841 1.1 cegger } 842 1.1 cegger } 843 1.1 cegger 844 1.1 cegger return 0; 845 1.1 cegger } 846 1.1 cegger 847 1.1 cegger static void 848 1.1 cegger ale_dma_free(struct ale_softc *sc) 849 1.1 cegger { 850 1.1 cegger struct ale_txdesc *txd; 851 1.1 cegger int i; 852 1.1 cegger 853 1.1 cegger /* Tx buffers. */ 854 1.1 cegger for (i = 0; i < ALE_TX_RING_CNT; i++) { 855 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[i]; 856 1.1 cegger if (txd->tx_dmamap != NULL) { 857 1.1 cegger bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap); 858 1.1 cegger txd->tx_dmamap = NULL; 859 1.1 cegger } 860 1.1 cegger } 861 1.1 cegger 862 1.1 cegger /* Tx descriptor ring. */ 863 1.1 cegger if (sc->ale_cdata.ale_tx_ring_map != NULL) 864 1.1 cegger bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map); 865 1.1 cegger if (sc->ale_cdata.ale_tx_ring_map != NULL && 866 1.1 cegger sc->ale_cdata.ale_tx_ring != NULL) 867 1.1 cegger bus_dmamem_free(sc->sc_dmat, 868 1.1 cegger &sc->ale_cdata.ale_tx_ring_seg, 1); 869 1.1 cegger sc->ale_cdata.ale_tx_ring = NULL; 870 1.1 cegger sc->ale_cdata.ale_tx_ring_map = NULL; 871 1.1 cegger 872 1.1 cegger /* Rx page block. */ 873 1.1 cegger for (i = 0; i < ALE_RX_PAGES; i++) { 874 1.1 cegger if (sc->ale_cdata.ale_rx_page[i].page_map != NULL) 875 1.1 cegger bus_dmamap_unload(sc->sc_dmat, 876 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_map); 877 1.1 cegger if (sc->ale_cdata.ale_rx_page[i].page_map != NULL && 878 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_addr != NULL) 879 1.1 cegger bus_dmamem_free(sc->sc_dmat, 880 1.1 cegger &sc->ale_cdata.ale_rx_page[i].page_seg, 1); 881 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_addr = NULL; 882 1.1 cegger sc->ale_cdata.ale_rx_page[i].page_map = NULL; 883 1.1 cegger } 884 1.1 cegger 885 1.1 cegger /* Rx CMB. */ 886 1.1 cegger for (i = 0; i < ALE_RX_PAGES; i++) { 887 1.1 cegger if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL) 888 1.1 cegger bus_dmamap_unload(sc->sc_dmat, 889 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_map); 890 1.1 cegger if (sc->ale_cdata.ale_rx_page[i].cmb_map != NULL && 891 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL) 892 1.1 cegger bus_dmamem_free(sc->sc_dmat, 893 1.1 cegger &sc->ale_cdata.ale_rx_page[i].cmb_seg, 1); 894 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL; 895 1.1 cegger sc->ale_cdata.ale_rx_page[i].cmb_map = NULL; 896 1.1 cegger } 897 1.1 cegger 898 1.1 cegger /* Tx CMB. */ 899 1.1 cegger if (sc->ale_cdata.ale_tx_cmb_map != NULL) 900 1.1 cegger bus_dmamap_unload(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map); 901 1.1 cegger if (sc->ale_cdata.ale_tx_cmb_map != NULL && 902 1.1 cegger sc->ale_cdata.ale_tx_cmb != NULL) 903 1.1 cegger bus_dmamem_free(sc->sc_dmat, 904 1.1 cegger &sc->ale_cdata.ale_tx_cmb_seg, 1); 905 1.1 cegger sc->ale_cdata.ale_tx_cmb = NULL; 906 1.1 cegger sc->ale_cdata.ale_tx_cmb_map = NULL; 907 1.1 cegger 908 1.1 cegger } 909 1.1 cegger 910 1.1 cegger static int 911 1.41 thorpej ale_encap(struct ale_softc *sc, struct mbuf * const m) 912 1.1 cegger { 913 1.1 cegger struct ale_txdesc *txd, *txd_last; 914 1.1 cegger struct tx_desc *desc; 915 1.1 cegger bus_dmamap_t map; 916 1.1 cegger uint32_t cflags, poff, vtag; 917 1.1 cegger int error, i, nsegs, prod; 918 1.1 cegger 919 1.1 cegger cflags = vtag = 0; 920 1.1 cegger poff = 0; 921 1.1 cegger 922 1.1 cegger prod = sc->ale_cdata.ale_tx_prod; 923 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[prod]; 924 1.1 cegger txd_last = txd; 925 1.1 cegger map = txd->tx_dmamap; 926 1.1 cegger 927 1.41 thorpej error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT); 928 1.1 cegger if (error == EFBIG) { 929 1.41 thorpej struct mbuf *mnew = m_defrag(m, M_NOWAIT); 930 1.41 thorpej if (mnew != NULL) { 931 1.41 thorpej KASSERT(m == mnew); 932 1.41 thorpej error = bus_dmamap_load_mbuf(sc->sc_dmat, map, mnew, 933 1.41 thorpej BUS_DMA_NOWAIT); 934 1.43 thorpej } else { 935 1.43 thorpej /* Just drop if we can't defrag. */ 936 1.43 thorpej error = EFBIG; 937 1.41 thorpej } 938 1.43 thorpej if (error) { 939 1.43 thorpej if (error == EFBIG) { 940 1.43 thorpej printf("%s: Tx packet consumes too many " 941 1.43 thorpej "DMA segments, dropping...\n", 942 1.43 thorpej device_xname(sc->sc_dev)); 943 1.43 thorpej } 944 1.43 thorpej return error; 945 1.1 cegger } 946 1.43 thorpej } else if (error) { 947 1.1 cegger return error; 948 1.1 cegger } 949 1.1 cegger 950 1.1 cegger nsegs = map->dm_nsegs; 951 1.41 thorpej KASSERT(nsegs != 0); 952 1.1 cegger 953 1.1 cegger /* Check descriptor overrun. */ 954 1.1 cegger if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 2) { 955 1.1 cegger bus_dmamap_unload(sc->sc_dmat, map); 956 1.1 cegger return ENOBUFS; 957 1.1 cegger } 958 1.1 cegger bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 959 1.1 cegger BUS_DMASYNC_PREWRITE); 960 1.1 cegger 961 1.1 cegger /* Configure Tx checksum offload. */ 962 1.1 cegger if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) { 963 1.1 cegger /* 964 1.1 cegger * AR81xx supports Tx custom checksum offload feature 965 1.1 cegger * that offloads single 16bit checksum computation. 966 1.1 cegger * So you can choose one among IP, TCP and UDP. 967 1.1 cegger * Normally driver sets checksum start/insertion 968 1.1 cegger * position from the information of TCP/UDP frame as 969 1.1 cegger * TCP/UDP checksum takes more time than that of IP. 970 1.1 cegger * However it seems that custom checksum offload 971 1.1 cegger * requires 4 bytes aligned Tx buffers due to hardware 972 1.1 cegger * bug. 973 1.1 cegger * AR81xx also supports explicit Tx checksum computation 974 1.1 cegger * if it is told that the size of IP header and TCP 975 1.1 cegger * header(for UDP, the header size does not matter 976 1.1 cegger * because it's fixed length). However with this scheme 977 1.1 cegger * TSO does not work so you have to choose one either 978 1.1 cegger * TSO or explicit Tx checksum offload. I chosen TSO 979 1.1 cegger * plus custom checksum offload with work-around which 980 1.1 cegger * will cover most common usage for this consumer 981 1.1 cegger * ethernet controller. The work-around takes a lot of 982 1.1 cegger * CPU cycles if Tx buffer is not aligned on 4 bytes 983 1.1 cegger * boundary, though. 984 1.1 cegger */ 985 1.1 cegger cflags |= ALE_TD_CXSUM; 986 1.1 cegger /* Set checksum start offset. */ 987 1.1 cegger cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT); 988 1.1 cegger } 989 1.1 cegger 990 1.1 cegger #if NVLAN > 0 991 1.1 cegger /* Configure VLAN hardware tag insertion. */ 992 1.23 knakahar if (vlan_has_tag(m)) { 993 1.23 knakahar vtag = ALE_TX_VLAN_TAG(htons(vlan_get_tag(m))); 994 1.1 cegger vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK); 995 1.1 cegger cflags |= ALE_TD_INSERT_VLAN_TAG; 996 1.1 cegger } 997 1.1 cegger #endif 998 1.1 cegger 999 1.1 cegger desc = NULL; 1000 1.1 cegger for (i = 0; i < nsegs; i++) { 1001 1.1 cegger desc = &sc->ale_cdata.ale_tx_ring[prod]; 1002 1.1 cegger desc->addr = htole64(map->dm_segs[i].ds_addr); 1003 1.15 christos desc->len = 1004 1.1 cegger htole32(ALE_TX_BYTES(map->dm_segs[i].ds_len) | vtag); 1005 1.1 cegger desc->flags = htole32(cflags); 1006 1.1 cegger sc->ale_cdata.ale_tx_cnt++; 1007 1.1 cegger ALE_DESC_INC(prod, ALE_TX_RING_CNT); 1008 1.1 cegger } 1009 1.1 cegger /* Update producer index. */ 1010 1.1 cegger sc->ale_cdata.ale_tx_prod = prod; 1011 1.1 cegger 1012 1.1 cegger /* Finally set EOP on the last descriptor. */ 1013 1.1 cegger prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT; 1014 1.1 cegger desc = &sc->ale_cdata.ale_tx_ring[prod]; 1015 1.1 cegger desc->flags |= htole32(ALE_TD_EOP); 1016 1.1 cegger 1017 1.1 cegger /* Swap dmamap of the first and the last. */ 1018 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[prod]; 1019 1.1 cegger map = txd_last->tx_dmamap; 1020 1.1 cegger txd_last->tx_dmamap = txd->tx_dmamap; 1021 1.1 cegger txd->tx_dmamap = map; 1022 1.1 cegger txd->tx_m = m; 1023 1.1 cegger 1024 1.1 cegger /* Sync descriptors. */ 1025 1.1 cegger bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0, 1026 1.1 cegger sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1027 1.1 cegger 1028 1.1 cegger return 0; 1029 1.1 cegger } 1030 1.1 cegger 1031 1.1 cegger static void 1032 1.1 cegger ale_start(struct ifnet *ifp) 1033 1.1 cegger { 1034 1.30 msaitoh struct ale_softc *sc = ifp->if_softc; 1035 1.1 cegger struct mbuf *m_head; 1036 1.41 thorpej int enq, error; 1037 1.1 cegger 1038 1.36 msaitoh if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1039 1.36 msaitoh return; 1040 1.36 msaitoh 1041 1.1 cegger /* Reclaim transmitted frames. */ 1042 1.1 cegger if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT) 1043 1.1 cegger ale_txeof(sc); 1044 1.1 cegger 1045 1.1 cegger enq = 0; 1046 1.1 cegger for (;;) { 1047 1.42 thorpej IFQ_POLL(&ifp->if_snd, m_head); 1048 1.1 cegger if (m_head == NULL) 1049 1.1 cegger break; 1050 1.1 cegger 1051 1.1 cegger /* 1052 1.1 cegger * Pack the data into the transmit ring. If we 1053 1.1 cegger * don't have room, set the OACTIVE flag and wait 1054 1.1 cegger * for the NIC to drain the ring. 1055 1.1 cegger */ 1056 1.41 thorpej if ((error = ale_encap(sc, m_head)) != 0) { 1057 1.41 thorpej if (error == EFBIG) { 1058 1.41 thorpej /* This is fatal for the packet. */ 1059 1.42 thorpej IFQ_DEQUEUE(&ifp->if_snd, m_head); 1060 1.41 thorpej m_freem(m_head); 1061 1.41 thorpej if_statinc(ifp, if_oerrors); 1062 1.41 thorpej continue; 1063 1.41 thorpej } 1064 1.1 cegger ifp->if_flags |= IFF_OACTIVE; 1065 1.1 cegger break; 1066 1.1 cegger } 1067 1.42 thorpej IFQ_DEQUEUE(&ifp->if_snd, m_head); 1068 1.1 cegger enq = 1; 1069 1.1 cegger 1070 1.1 cegger /* 1071 1.1 cegger * If there's a BPF listener, bounce a copy of this frame 1072 1.1 cegger * to him. 1073 1.1 cegger */ 1074 1.24 msaitoh bpf_mtap(ifp, m_head, BPF_D_OUT); 1075 1.1 cegger } 1076 1.1 cegger 1077 1.1 cegger if (enq) { 1078 1.1 cegger /* Kick. */ 1079 1.1 cegger CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX, 1080 1.1 cegger sc->ale_cdata.ale_tx_prod); 1081 1.1 cegger 1082 1.1 cegger /* Set a timeout in case the chip goes out to lunch. */ 1083 1.1 cegger ifp->if_timer = ALE_TX_TIMEOUT; 1084 1.1 cegger } 1085 1.1 cegger } 1086 1.1 cegger 1087 1.1 cegger static void 1088 1.1 cegger ale_watchdog(struct ifnet *ifp) 1089 1.1 cegger { 1090 1.1 cegger struct ale_softc *sc = ifp->if_softc; 1091 1.1 cegger 1092 1.1 cegger if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { 1093 1.1 cegger printf("%s: watchdog timeout (missed link)\n", 1094 1.1 cegger device_xname(sc->sc_dev)); 1095 1.38 thorpej if_statinc(ifp, if_oerrors); 1096 1.1 cegger ale_init(ifp); 1097 1.1 cegger return; 1098 1.1 cegger } 1099 1.1 cegger 1100 1.1 cegger printf("%s: watchdog timeout\n", device_xname(sc->sc_dev)); 1101 1.38 thorpej if_statinc(ifp, if_oerrors); 1102 1.1 cegger ale_init(ifp); 1103 1.1 cegger 1104 1.1 cegger if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1105 1.1 cegger ale_start(ifp); 1106 1.1 cegger } 1107 1.1 cegger 1108 1.1 cegger static int 1109 1.1 cegger ale_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1110 1.1 cegger { 1111 1.1 cegger struct ale_softc *sc = ifp->if_softc; 1112 1.1 cegger int s, error; 1113 1.1 cegger 1114 1.1 cegger s = splnet(); 1115 1.1 cegger 1116 1.1 cegger error = ether_ioctl(ifp, cmd, data); 1117 1.1 cegger if (error == ENETRESET) { 1118 1.1 cegger if (ifp->if_flags & IFF_RUNNING) 1119 1.1 cegger ale_rxfilter(sc); 1120 1.1 cegger error = 0; 1121 1.1 cegger } 1122 1.1 cegger 1123 1.1 cegger splx(s); 1124 1.1 cegger return error; 1125 1.1 cegger } 1126 1.1 cegger 1127 1.1 cegger static void 1128 1.1 cegger ale_mac_config(struct ale_softc *sc) 1129 1.1 cegger { 1130 1.1 cegger struct mii_data *mii; 1131 1.1 cegger uint32_t reg; 1132 1.1 cegger 1133 1.1 cegger mii = &sc->sc_miibus; 1134 1.1 cegger reg = CSR_READ_4(sc, ALE_MAC_CFG); 1135 1.1 cegger reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 1136 1.1 cegger MAC_CFG_SPEED_MASK); 1137 1.1 cegger 1138 1.1 cegger /* Reprogram MAC with resolved speed/duplex. */ 1139 1.1 cegger switch (IFM_SUBTYPE(mii->mii_media_active)) { 1140 1.1 cegger case IFM_10_T: 1141 1.1 cegger case IFM_100_TX: 1142 1.1 cegger reg |= MAC_CFG_SPEED_10_100; 1143 1.1 cegger break; 1144 1.1 cegger case IFM_1000_T: 1145 1.1 cegger reg |= MAC_CFG_SPEED_1000; 1146 1.1 cegger break; 1147 1.1 cegger } 1148 1.1 cegger if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1149 1.1 cegger reg |= MAC_CFG_FULL_DUPLEX; 1150 1.1 cegger if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1151 1.1 cegger reg |= MAC_CFG_TX_FC; 1152 1.1 cegger if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1153 1.1 cegger reg |= MAC_CFG_RX_FC; 1154 1.1 cegger } 1155 1.1 cegger CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1156 1.1 cegger } 1157 1.1 cegger 1158 1.1 cegger static void 1159 1.1 cegger ale_stats_clear(struct ale_softc *sc) 1160 1.1 cegger { 1161 1.1 cegger struct smb sb; 1162 1.1 cegger uint32_t *reg; 1163 1.1 cegger int i; 1164 1.1 cegger 1165 1.1 cegger for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { 1166 1.1 cegger CSR_READ_4(sc, ALE_RX_MIB_BASE + i); 1167 1.1 cegger i += sizeof(uint32_t); 1168 1.1 cegger } 1169 1.1 cegger /* Read Tx statistics. */ 1170 1.1 cegger for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { 1171 1.1 cegger CSR_READ_4(sc, ALE_TX_MIB_BASE + i); 1172 1.1 cegger i += sizeof(uint32_t); 1173 1.1 cegger } 1174 1.1 cegger } 1175 1.1 cegger 1176 1.1 cegger static void 1177 1.1 cegger ale_stats_update(struct ale_softc *sc) 1178 1.1 cegger { 1179 1.1 cegger struct ifnet *ifp = &sc->sc_ec.ec_if; 1180 1.1 cegger struct ale_hw_stats *stat; 1181 1.1 cegger struct smb sb, *smb; 1182 1.1 cegger uint32_t *reg; 1183 1.1 cegger int i; 1184 1.1 cegger 1185 1.1 cegger stat = &sc->ale_stats; 1186 1.1 cegger smb = &sb; 1187 1.1 cegger 1188 1.1 cegger /* Read Rx statistics. */ 1189 1.1 cegger for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { 1190 1.1 cegger *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i); 1191 1.1 cegger i += sizeof(uint32_t); 1192 1.1 cegger } 1193 1.1 cegger /* Read Tx statistics. */ 1194 1.1 cegger for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { 1195 1.1 cegger *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i); 1196 1.1 cegger i += sizeof(uint32_t); 1197 1.1 cegger } 1198 1.1 cegger 1199 1.1 cegger /* Rx stats. */ 1200 1.1 cegger stat->rx_frames += smb->rx_frames; 1201 1.1 cegger stat->rx_bcast_frames += smb->rx_bcast_frames; 1202 1.1 cegger stat->rx_mcast_frames += smb->rx_mcast_frames; 1203 1.1 cegger stat->rx_pause_frames += smb->rx_pause_frames; 1204 1.1 cegger stat->rx_control_frames += smb->rx_control_frames; 1205 1.1 cegger stat->rx_crcerrs += smb->rx_crcerrs; 1206 1.1 cegger stat->rx_lenerrs += smb->rx_lenerrs; 1207 1.1 cegger stat->rx_bytes += smb->rx_bytes; 1208 1.1 cegger stat->rx_runts += smb->rx_runts; 1209 1.1 cegger stat->rx_fragments += smb->rx_fragments; 1210 1.1 cegger stat->rx_pkts_64 += smb->rx_pkts_64; 1211 1.1 cegger stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 1212 1.1 cegger stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 1213 1.1 cegger stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 1214 1.1 cegger stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 1215 1.1 cegger stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 1216 1.1 cegger stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 1217 1.1 cegger stat->rx_pkts_truncated += smb->rx_pkts_truncated; 1218 1.1 cegger stat->rx_fifo_oflows += smb->rx_fifo_oflows; 1219 1.1 cegger stat->rx_rrs_errs += smb->rx_rrs_errs; 1220 1.1 cegger stat->rx_alignerrs += smb->rx_alignerrs; 1221 1.1 cegger stat->rx_bcast_bytes += smb->rx_bcast_bytes; 1222 1.1 cegger stat->rx_mcast_bytes += smb->rx_mcast_bytes; 1223 1.1 cegger stat->rx_pkts_filtered += smb->rx_pkts_filtered; 1224 1.1 cegger 1225 1.1 cegger /* Tx stats. */ 1226 1.1 cegger stat->tx_frames += smb->tx_frames; 1227 1.1 cegger stat->tx_bcast_frames += smb->tx_bcast_frames; 1228 1.1 cegger stat->tx_mcast_frames += smb->tx_mcast_frames; 1229 1.1 cegger stat->tx_pause_frames += smb->tx_pause_frames; 1230 1.1 cegger stat->tx_excess_defer += smb->tx_excess_defer; 1231 1.1 cegger stat->tx_control_frames += smb->tx_control_frames; 1232 1.1 cegger stat->tx_deferred += smb->tx_deferred; 1233 1.1 cegger stat->tx_bytes += smb->tx_bytes; 1234 1.1 cegger stat->tx_pkts_64 += smb->tx_pkts_64; 1235 1.1 cegger stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 1236 1.1 cegger stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 1237 1.1 cegger stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 1238 1.1 cegger stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 1239 1.1 cegger stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 1240 1.1 cegger stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 1241 1.1 cegger stat->tx_single_colls += smb->tx_single_colls; 1242 1.1 cegger stat->tx_multi_colls += smb->tx_multi_colls; 1243 1.1 cegger stat->tx_late_colls += smb->tx_late_colls; 1244 1.1 cegger stat->tx_excess_colls += smb->tx_excess_colls; 1245 1.36 msaitoh stat->tx_abort += smb->tx_abort; 1246 1.1 cegger stat->tx_underrun += smb->tx_underrun; 1247 1.1 cegger stat->tx_desc_underrun += smb->tx_desc_underrun; 1248 1.1 cegger stat->tx_lenerrs += smb->tx_lenerrs; 1249 1.1 cegger stat->tx_pkts_truncated += smb->tx_pkts_truncated; 1250 1.1 cegger stat->tx_bcast_bytes += smb->tx_bcast_bytes; 1251 1.1 cegger stat->tx_mcast_bytes += smb->tx_mcast_bytes; 1252 1.1 cegger 1253 1.1 cegger /* Update counters in ifnet. */ 1254 1.38 thorpej net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1255 1.1 cegger 1256 1.44 riastrad if_statadd_ref(ifp, nsr, if_opackets, smb->tx_frames); 1257 1.38 thorpej 1258 1.44 riastrad if_statadd_ref(ifp, nsr, if_collisions, 1259 1.38 thorpej smb->tx_single_colls + 1260 1.1 cegger smb->tx_multi_colls * 2 + smb->tx_late_colls + 1261 1.38 thorpej smb->tx_abort * HDPX_CFG_RETRY_DEFAULT); 1262 1.1 cegger 1263 1.36 msaitoh /* 1264 1.36 msaitoh * XXX 1265 1.36 msaitoh * tx_pkts_truncated counter looks suspicious. It constantly 1266 1.36 msaitoh * increments with no sign of Tx errors. This may indicate 1267 1.36 msaitoh * the counter name is not correct one so I've removed the 1268 1.36 msaitoh * counter in output errors. 1269 1.36 msaitoh */ 1270 1.44 riastrad if_statadd_ref(ifp, nsr, if_oerrors, 1271 1.38 thorpej smb->tx_abort + smb->tx_late_colls + 1272 1.38 thorpej smb->tx_underrun); 1273 1.1 cegger 1274 1.44 riastrad if_statadd_ref(ifp, nsr, if_ierrors, 1275 1.38 thorpej smb->rx_crcerrs + smb->rx_lenerrs + 1276 1.1 cegger smb->rx_runts + smb->rx_pkts_truncated + 1277 1.1 cegger smb->rx_fifo_oflows + smb->rx_rrs_errs + 1278 1.38 thorpej smb->rx_alignerrs); 1279 1.38 thorpej 1280 1.38 thorpej IF_STAT_PUTREF(ifp); 1281 1.1 cegger } 1282 1.1 cegger 1283 1.1 cegger static int 1284 1.1 cegger ale_intr(void *xsc) 1285 1.1 cegger { 1286 1.1 cegger struct ale_softc *sc = xsc; 1287 1.1 cegger struct ifnet *ifp = &sc->sc_ec.ec_if; 1288 1.1 cegger uint32_t status; 1289 1.1 cegger 1290 1.1 cegger status = CSR_READ_4(sc, ALE_INTR_STATUS); 1291 1.1 cegger if ((status & ALE_INTRS) == 0) 1292 1.1 cegger return 0; 1293 1.1 cegger 1294 1.1 cegger /* Acknowledge and disable interrupts. */ 1295 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT); 1296 1.1 cegger 1297 1.1 cegger if (ifp->if_flags & IFF_RUNNING) { 1298 1.1 cegger int error; 1299 1.1 cegger 1300 1.1 cegger error = ale_rxeof(sc); 1301 1.1 cegger if (error) { 1302 1.1 cegger sc->ale_stats.reset_brk_seq++; 1303 1.1 cegger ale_init(ifp); 1304 1.1 cegger return 0; 1305 1.1 cegger } 1306 1.1 cegger 1307 1.1 cegger if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) { 1308 1.1 cegger if (status & INTR_DMA_RD_TO_RST) 1309 1.1 cegger printf("%s: DMA read error! -- resetting\n", 1310 1.1 cegger device_xname(sc->sc_dev)); 1311 1.1 cegger if (status & INTR_DMA_WR_TO_RST) 1312 1.1 cegger printf("%s: DMA write error! -- resetting\n", 1313 1.1 cegger device_xname(sc->sc_dev)); 1314 1.1 cegger ale_init(ifp); 1315 1.1 cegger return 0; 1316 1.1 cegger } 1317 1.1 cegger 1318 1.1 cegger ale_txeof(sc); 1319 1.22 ozaki if_schedule_deferred_start(ifp); 1320 1.1 cegger } 1321 1.1 cegger 1322 1.1 cegger /* Re-enable interrupts. */ 1323 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF); 1324 1.1 cegger return 1; 1325 1.1 cegger } 1326 1.1 cegger 1327 1.1 cegger static void 1328 1.1 cegger ale_txeof(struct ale_softc *sc) 1329 1.1 cegger { 1330 1.1 cegger struct ifnet *ifp = &sc->sc_ec.ec_if; 1331 1.1 cegger struct ale_txdesc *txd; 1332 1.1 cegger uint32_t cons, prod; 1333 1.1 cegger int prog; 1334 1.1 cegger 1335 1.1 cegger if (sc->ale_cdata.ale_tx_cnt == 0) 1336 1.1 cegger return; 1337 1.1 cegger 1338 1.1 cegger bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0, 1339 1.1 cegger sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1340 1.1 cegger if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) { 1341 1.1 cegger bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0, 1342 1.15 christos sc->ale_cdata.ale_tx_cmb_map->dm_mapsize, 1343 1.1 cegger BUS_DMASYNC_POSTREAD); 1344 1.1 cegger prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK; 1345 1.1 cegger } else 1346 1.1 cegger prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX); 1347 1.1 cegger cons = sc->ale_cdata.ale_tx_cons; 1348 1.1 cegger /* 1349 1.1 cegger * Go through our Tx list and free mbufs for those 1350 1.1 cegger * frames which have been transmitted. 1351 1.1 cegger */ 1352 1.1 cegger for (prog = 0; cons != prod; prog++, 1353 1.1 cegger ALE_DESC_INC(cons, ALE_TX_RING_CNT)) { 1354 1.1 cegger if (sc->ale_cdata.ale_tx_cnt <= 0) 1355 1.1 cegger break; 1356 1.1 cegger prog++; 1357 1.1 cegger ifp->if_flags &= ~IFF_OACTIVE; 1358 1.1 cegger sc->ale_cdata.ale_tx_cnt--; 1359 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[cons]; 1360 1.1 cegger if (txd->tx_m != NULL) { 1361 1.1 cegger /* Reclaim transmitted mbufs. */ 1362 1.1 cegger bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1363 1.1 cegger m_freem(txd->tx_m); 1364 1.1 cegger txd->tx_m = NULL; 1365 1.1 cegger } 1366 1.1 cegger } 1367 1.1 cegger 1368 1.1 cegger if (prog > 0) { 1369 1.1 cegger sc->ale_cdata.ale_tx_cons = cons; 1370 1.1 cegger /* 1371 1.1 cegger * Unarm watchdog timer only when there is no pending 1372 1.1 cegger * Tx descriptors in queue. 1373 1.1 cegger */ 1374 1.1 cegger if (sc->ale_cdata.ale_tx_cnt == 0) 1375 1.1 cegger ifp->if_timer = 0; 1376 1.1 cegger } 1377 1.1 cegger } 1378 1.1 cegger 1379 1.1 cegger static void 1380 1.1 cegger ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page, 1381 1.1 cegger uint32_t length, uint32_t *prod) 1382 1.1 cegger { 1383 1.1 cegger struct ale_rx_page *rx_page; 1384 1.1 cegger 1385 1.1 cegger rx_page = *page; 1386 1.1 cegger /* Update consumer position. */ 1387 1.1 cegger rx_page->cons += roundup(length + sizeof(struct rx_rs), 1388 1.1 cegger ALE_RX_PAGE_ALIGN); 1389 1.1 cegger if (rx_page->cons >= ALE_RX_PAGE_SZ) { 1390 1.1 cegger /* 1391 1.1 cegger * End of Rx page reached, let hardware reuse 1392 1.1 cegger * this page. 1393 1.1 cegger */ 1394 1.1 cegger rx_page->cons = 0; 1395 1.1 cegger *rx_page->cmb_addr = 0; 1396 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, 1397 1.1 cegger rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1398 1.1 cegger CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp, 1399 1.1 cegger RXF_VALID); 1400 1.1 cegger /* Switch to alternate Rx page. */ 1401 1.1 cegger sc->ale_cdata.ale_rx_curp ^= 1; 1402 1.1 cegger rx_page = *page = 1403 1.1 cegger &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; 1404 1.1 cegger /* Page flipped, sync CMB and Rx page. */ 1405 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0, 1406 1.1 cegger rx_page->page_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1407 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, 1408 1.1 cegger rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1409 1.1 cegger /* Sync completed, cache updated producer index. */ 1410 1.1 cegger *prod = *rx_page->cmb_addr; 1411 1.1 cegger } 1412 1.1 cegger } 1413 1.1 cegger 1414 1.1 cegger 1415 1.1 cegger /* 1416 1.1 cegger * It seems that AR81xx controller can compute partial checksum. 1417 1.1 cegger * The partial checksum value can be used to accelerate checksum 1418 1.1 cegger * computation for fragmented TCP/UDP packets. Upper network stack 1419 1.1 cegger * already takes advantage of the partial checksum value in IP 1420 1.1 cegger * reassembly stage. But I'm not sure the correctness of the 1421 1.1 cegger * partial hardware checksum assistance due to lack of data sheet. 1422 1.1 cegger * In addition, the Rx feature of controller that requires copying 1423 1.1 cegger * for every frames effectively nullifies one of most nice offload 1424 1.1 cegger * capability of controller. 1425 1.1 cegger */ 1426 1.1 cegger static void 1427 1.1 cegger ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status) 1428 1.1 cegger { 1429 1.1 cegger if (status & ALE_RD_IPCSUM_NOK) 1430 1.1 cegger m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1431 1.1 cegger 1432 1.1 cegger if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) { 1433 1.1 cegger if (((status & ALE_RD_IPV4_FRAG) == 0) && 1434 1.1 cegger ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) && 1435 1.1 cegger (status & ALE_RD_TCP_UDPCSUM_NOK)) 1436 1.1 cegger { 1437 1.1 cegger m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1438 1.1 cegger } 1439 1.1 cegger } else { 1440 1.1 cegger if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) { 1441 1.1 cegger if (status & ALE_RD_TCP_UDPCSUM_NOK) { 1442 1.1 cegger m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1443 1.1 cegger } 1444 1.1 cegger } 1445 1.1 cegger } 1446 1.1 cegger /* 1447 1.1 cegger * Don't mark bad checksum for TCP/UDP frames 1448 1.1 cegger * as fragmented frames may always have set 1449 1.1 cegger * bad checksummed bit of frame status. 1450 1.1 cegger */ 1451 1.1 cegger } 1452 1.1 cegger 1453 1.1 cegger /* Process received frames. */ 1454 1.1 cegger static int 1455 1.1 cegger ale_rxeof(struct ale_softc *sc) 1456 1.1 cegger { 1457 1.1 cegger struct ifnet *ifp = &sc->sc_ec.ec_if; 1458 1.1 cegger struct ale_rx_page *rx_page; 1459 1.1 cegger struct rx_rs *rs; 1460 1.1 cegger struct mbuf *m; 1461 1.1 cegger uint32_t length, prod, seqno, status; 1462 1.1 cegger int prog; 1463 1.1 cegger 1464 1.1 cegger rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; 1465 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, 1466 1.1 cegger rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1467 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0, 1468 1.1 cegger rx_page->page_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1469 1.1 cegger /* 1470 1.1 cegger * Don't directly access producer index as hardware may 1471 1.1 cegger * update it while Rx handler is in progress. It would 1472 1.1 cegger * be even better if there is a way to let hardware 1473 1.1 cegger * know how far driver processed its received frames. 1474 1.1 cegger * Alternatively, hardware could provide a way to disable 1475 1.1 cegger * CMB updates until driver acknowledges the end of CMB 1476 1.1 cegger * access. 1477 1.1 cegger */ 1478 1.1 cegger prod = *rx_page->cmb_addr; 1479 1.1 cegger for (prog = 0; ; prog++) { 1480 1.1 cegger if (rx_page->cons >= prod) 1481 1.1 cegger break; 1482 1.1 cegger rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons); 1483 1.1 cegger seqno = ALE_RX_SEQNO(le32toh(rs->seqno)); 1484 1.1 cegger if (sc->ale_cdata.ale_rx_seqno != seqno) { 1485 1.1 cegger /* 1486 1.1 cegger * Normally I believe this should not happen unless 1487 1.1 cegger * severe driver bug or corrupted memory. However 1488 1.1 cegger * it seems to happen under certain conditions which 1489 1.1 cegger * is triggered by abrupt Rx events such as initiation 1490 1.1 cegger * of bulk transfer of remote host. It's not easy to 1491 1.1 cegger * reproduce this and I doubt it could be related 1492 1.1 cegger * with FIFO overflow of hardware or activity of Tx 1493 1.1 cegger * CMB updates. I also remember similar behaviour 1494 1.1 cegger * seen on RealTek 8139 which uses resembling Rx 1495 1.1 cegger * scheme. 1496 1.1 cegger */ 1497 1.1 cegger if (aledebug) 1498 1.1 cegger printf("%s: garbled seq: %u, expected: %u -- " 1499 1.1 cegger "resetting!\n", device_xname(sc->sc_dev), 1500 1.1 cegger seqno, sc->ale_cdata.ale_rx_seqno); 1501 1.1 cegger return EIO; 1502 1.1 cegger } 1503 1.1 cegger /* Frame received. */ 1504 1.1 cegger sc->ale_cdata.ale_rx_seqno++; 1505 1.1 cegger length = ALE_RX_BYTES(le32toh(rs->length)); 1506 1.1 cegger status = le32toh(rs->flags); 1507 1.1 cegger if (status & ALE_RD_ERROR) { 1508 1.1 cegger /* 1509 1.1 cegger * We want to pass the following frames to upper 1510 1.1 cegger * layer regardless of error status of Rx return 1511 1.1 cegger * status. 1512 1.1 cegger * 1513 1.1 cegger * o IP/TCP/UDP checksum is bad. 1514 1.1 cegger * o frame length and protocol specific length 1515 1.1 cegger * does not match. 1516 1.1 cegger */ 1517 1.1 cegger if (status & (ALE_RD_CRC | ALE_RD_CODE | 1518 1.1 cegger ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW | 1519 1.1 cegger ALE_RD_TRUNC)) { 1520 1.1 cegger ale_rx_update_page(sc, &rx_page, length, &prod); 1521 1.1 cegger continue; 1522 1.1 cegger } 1523 1.1 cegger } 1524 1.1 cegger /* 1525 1.1 cegger * m_devget(9) is major bottle-neck of ale(4)(It comes 1526 1.1 cegger * from hardware limitation). For jumbo frames we could 1527 1.1 cegger * get a slightly better performance if driver use 1528 1.1 cegger * m_getjcl(9) with proper buffer size argument. However 1529 1.1 cegger * that would make code more complicated and I don't 1530 1.1 cegger * think users would expect good Rx performance numbers 1531 1.1 cegger * on these low-end consumer ethernet controller. 1532 1.1 cegger */ 1533 1.1 cegger m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN, 1534 1.25 maxv 0, ifp); 1535 1.1 cegger if (m == NULL) { 1536 1.38 thorpej if_statinc(ifp, if_iqdrops); 1537 1.1 cegger ale_rx_update_page(sc, &rx_page, length, &prod); 1538 1.1 cegger continue; 1539 1.1 cegger } 1540 1.1 cegger if (status & ALE_RD_IPV4) 1541 1.1 cegger ale_rxcsum(sc, m, status); 1542 1.1 cegger #if NVLAN > 0 1543 1.1 cegger if (status & ALE_RD_VLAN) { 1544 1.1 cegger uint32_t vtags = ALE_RX_VLAN(le32toh(rs->vtags)); 1545 1.23 knakahar vlan_set_tag(m, ALE_RX_VLAN_TAG(vtags)); 1546 1.1 cegger } 1547 1.1 cegger #endif 1548 1.1 cegger 1549 1.1 cegger /* Pass it to upper layer. */ 1550 1.20 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 1551 1.1 cegger 1552 1.1 cegger ale_rx_update_page(sc, &rx_page, length, &prod); 1553 1.1 cegger } 1554 1.1 cegger 1555 1.1 cegger return 0; 1556 1.1 cegger } 1557 1.1 cegger 1558 1.1 cegger static void 1559 1.1 cegger ale_tick(void *xsc) 1560 1.1 cegger { 1561 1.1 cegger struct ale_softc *sc = xsc; 1562 1.1 cegger struct mii_data *mii = &sc->sc_miibus; 1563 1.1 cegger int s; 1564 1.1 cegger 1565 1.1 cegger s = splnet(); 1566 1.1 cegger mii_tick(mii); 1567 1.1 cegger ale_stats_update(sc); 1568 1.1 cegger splx(s); 1569 1.1 cegger 1570 1.1 cegger callout_schedule(&sc->sc_tick_ch, hz); 1571 1.1 cegger } 1572 1.1 cegger 1573 1.1 cegger static void 1574 1.1 cegger ale_reset(struct ale_softc *sc) 1575 1.1 cegger { 1576 1.1 cegger uint32_t reg; 1577 1.1 cegger int i; 1578 1.1 cegger 1579 1.36 msaitoh /* Initialize PCIe module. From Linux. */ 1580 1.36 msaitoh CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 1581 1.1 cegger 1582 1.36 msaitoh CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET); 1583 1.1 cegger for (i = ALE_RESET_TIMEOUT; i > 0; i--) { 1584 1.1 cegger DELAY(10); 1585 1.1 cegger if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0) 1586 1.1 cegger break; 1587 1.1 cegger } 1588 1.1 cegger if (i == 0) 1589 1.1 cegger printf("%s: master reset timeout!\n", device_xname(sc->sc_dev)); 1590 1.1 cegger 1591 1.1 cegger for (i = ALE_RESET_TIMEOUT; i > 0; i--) { 1592 1.1 cegger if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0) 1593 1.1 cegger break; 1594 1.1 cegger DELAY(10); 1595 1.1 cegger } 1596 1.1 cegger 1597 1.1 cegger if (i == 0) 1598 1.1 cegger printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev), 1599 1.1 cegger reg); 1600 1.1 cegger } 1601 1.1 cegger 1602 1.1 cegger static int 1603 1.1 cegger ale_init(struct ifnet *ifp) 1604 1.1 cegger { 1605 1.1 cegger struct ale_softc *sc = ifp->if_softc; 1606 1.1 cegger struct mii_data *mii; 1607 1.1 cegger uint8_t eaddr[ETHER_ADDR_LEN]; 1608 1.1 cegger bus_addr_t paddr; 1609 1.1 cegger uint32_t reg, rxf_hi, rxf_lo; 1610 1.1 cegger 1611 1.1 cegger /* 1612 1.1 cegger * Cancel any pending I/O. 1613 1.1 cegger */ 1614 1.1 cegger ale_stop(ifp, 0); 1615 1.1 cegger 1616 1.1 cegger /* 1617 1.1 cegger * Reset the chip to a known state. 1618 1.1 cegger */ 1619 1.1 cegger ale_reset(sc); 1620 1.1 cegger 1621 1.1 cegger /* Initialize Tx descriptors, DMA memory blocks. */ 1622 1.1 cegger ale_init_rx_pages(sc); 1623 1.1 cegger ale_init_tx_ring(sc); 1624 1.1 cegger 1625 1.1 cegger /* Reprogram the station address. */ 1626 1.1 cegger memcpy(eaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 1627 1.1 cegger CSR_WRITE_4(sc, ALE_PAR0, 1628 1.1 cegger eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 1629 1.1 cegger CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]); 1630 1.1 cegger 1631 1.1 cegger /* 1632 1.1 cegger * Clear WOL status and disable all WOL feature as WOL 1633 1.1 cegger * would interfere Rx operation under normal environments. 1634 1.1 cegger */ 1635 1.1 cegger CSR_READ_4(sc, ALE_WOL_CFG); 1636 1.1 cegger CSR_WRITE_4(sc, ALE_WOL_CFG, 0); 1637 1.1 cegger 1638 1.1 cegger /* 1639 1.1 cegger * Set Tx descriptor/RXF0/CMB base addresses. They share 1640 1.1 cegger * the same high address part of DMAable region. 1641 1.1 cegger */ 1642 1.1 cegger paddr = sc->ale_cdata.ale_tx_ring_paddr; 1643 1.1 cegger CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr)); 1644 1.1 cegger CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr)); 1645 1.1 cegger CSR_WRITE_4(sc, ALE_TPD_CNT, 1646 1.1 cegger (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK); 1647 1.1 cegger 1648 1.1 cegger /* Set Rx page base address, note we use single queue. */ 1649 1.1 cegger paddr = sc->ale_cdata.ale_rx_page[0].page_paddr; 1650 1.1 cegger CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr)); 1651 1.1 cegger paddr = sc->ale_cdata.ale_rx_page[1].page_paddr; 1652 1.1 cegger CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr)); 1653 1.1 cegger 1654 1.1 cegger /* Set Tx/Rx CMB addresses. */ 1655 1.1 cegger paddr = sc->ale_cdata.ale_tx_cmb_paddr; 1656 1.1 cegger CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr)); 1657 1.1 cegger paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr; 1658 1.1 cegger CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr)); 1659 1.1 cegger paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr; 1660 1.1 cegger CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr)); 1661 1.1 cegger 1662 1.1 cegger /* Mark RXF0 is valid. */ 1663 1.1 cegger CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID); 1664 1.1 cegger CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID); 1665 1.1 cegger /* 1666 1.1 cegger * No need to initialize RFX1/RXF2/RXF3. We don't use 1667 1.1 cegger * multi-queue yet. 1668 1.1 cegger */ 1669 1.1 cegger 1670 1.1 cegger /* Set Rx page size, excluding guard frame size. */ 1671 1.1 cegger CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ); 1672 1.1 cegger 1673 1.1 cegger /* Tell hardware that we're ready to load DMA blocks. */ 1674 1.1 cegger CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD); 1675 1.1 cegger 1676 1.1 cegger /* Set Rx/Tx interrupt trigger threshold. */ 1677 1.1 cegger CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) | 1678 1.1 cegger (4 << INT_TRIG_TX_THRESH_SHIFT)); 1679 1.1 cegger /* 1680 1.1 cegger * XXX 1681 1.1 cegger * Set interrupt trigger timer, its purpose and relation 1682 1.1 cegger * with interrupt moderation mechanism is not clear yet. 1683 1.1 cegger */ 1684 1.1 cegger CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER, 1685 1.1 cegger ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) | 1686 1.1 cegger (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT))); 1687 1.1 cegger 1688 1.1 cegger /* Configure interrupt moderation timer. */ 1689 1.1 cegger sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; 1690 1.1 cegger sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; 1691 1.1 cegger reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT; 1692 1.1 cegger reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT; 1693 1.1 cegger CSR_WRITE_4(sc, ALE_IM_TIMER, reg); 1694 1.1 cegger reg = CSR_READ_4(sc, ALE_MASTER_CFG); 1695 1.1 cegger reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK); 1696 1.1 cegger reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 1697 1.1 cegger if (ALE_USECS(sc->ale_int_rx_mod) != 0) 1698 1.1 cegger reg |= MASTER_IM_RX_TIMER_ENB; 1699 1.1 cegger if (ALE_USECS(sc->ale_int_tx_mod) != 0) 1700 1.1 cegger reg |= MASTER_IM_TX_TIMER_ENB; 1701 1.1 cegger CSR_WRITE_4(sc, ALE_MASTER_CFG, reg); 1702 1.1 cegger CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000)); 1703 1.1 cegger 1704 1.1 cegger /* Set Maximum frame size of controller. */ 1705 1.1 cegger if (ifp->if_mtu < ETHERMTU) 1706 1.1 cegger sc->ale_max_frame_size = ETHERMTU; 1707 1.1 cegger else 1708 1.1 cegger sc->ale_max_frame_size = ifp->if_mtu; 1709 1.1 cegger sc->ale_max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 1710 1.1 cegger CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size); 1711 1.1 cegger 1712 1.1 cegger /* Configure IPG/IFG parameters. */ 1713 1.1 cegger CSR_WRITE_4(sc, ALE_IPG_IFG_CFG, 1714 1.1 cegger ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | 1715 1.1 cegger ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 1716 1.1 cegger ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 1717 1.1 cegger ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); 1718 1.1 cegger 1719 1.1 cegger /* Set parameters for half-duplex media. */ 1720 1.1 cegger CSR_WRITE_4(sc, ALE_HDPX_CFG, 1721 1.1 cegger ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 1722 1.1 cegger HDPX_CFG_LCOL_MASK) | 1723 1.1 cegger ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 1724 1.1 cegger HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 1725 1.1 cegger ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 1726 1.1 cegger HDPX_CFG_ABEBT_MASK) | 1727 1.1 cegger ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 1728 1.1 cegger HDPX_CFG_JAMIPG_MASK)); 1729 1.1 cegger 1730 1.1 cegger /* Configure Tx jumbo frame parameters. */ 1731 1.1 cegger if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { 1732 1.1 cegger if (ifp->if_mtu < ETHERMTU) 1733 1.1 cegger reg = sc->ale_max_frame_size; 1734 1.1 cegger else if (ifp->if_mtu < 6 * 1024) 1735 1.1 cegger reg = (sc->ale_max_frame_size * 2) / 3; 1736 1.1 cegger else 1737 1.1 cegger reg = sc->ale_max_frame_size / 2; 1738 1.1 cegger CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH, 1739 1.1 cegger roundup(reg, TX_JUMBO_THRESH_UNIT) >> 1740 1.1 cegger TX_JUMBO_THRESH_UNIT_SHIFT); 1741 1.1 cegger } 1742 1.1 cegger 1743 1.1 cegger /* Configure TxQ. */ 1744 1.1 cegger reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT)) 1745 1.1 cegger << TXQ_CFG_TX_FIFO_BURST_SHIFT; 1746 1.1 cegger reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 1747 1.1 cegger TXQ_CFG_TPD_BURST_MASK; 1748 1.1 cegger CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB); 1749 1.1 cegger 1750 1.1 cegger /* Configure Rx jumbo frame & flow control parameters. */ 1751 1.1 cegger if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { 1752 1.1 cegger reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT); 1753 1.1 cegger CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH, 1754 1.1 cegger (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) << 1755 1.1 cegger RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) | 1756 1.1 cegger ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) & 1757 1.1 cegger RX_JUMBO_LKAH_MASK)); 1758 1.1 cegger reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); 1759 1.1 cegger rxf_hi = (reg * 7) / 10; 1760 1.1 cegger rxf_lo = (reg * 3)/ 10; 1761 1.1 cegger CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH, 1762 1.1 cegger ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 1763 1.1 cegger RX_FIFO_PAUSE_THRESH_LO_MASK) | 1764 1.1 cegger ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 1765 1.1 cegger RX_FIFO_PAUSE_THRESH_HI_MASK)); 1766 1.1 cegger } 1767 1.1 cegger 1768 1.1 cegger /* Disable RSS. */ 1769 1.1 cegger CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0); 1770 1.1 cegger CSR_WRITE_4(sc, ALE_RSS_CPU, 0); 1771 1.1 cegger 1772 1.1 cegger /* Configure RxQ. */ 1773 1.1 cegger CSR_WRITE_4(sc, ALE_RXQ_CFG, 1774 1.1 cegger RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 1775 1.1 cegger 1776 1.1 cegger /* Configure DMA parameters. */ 1777 1.1 cegger reg = 0; 1778 1.1 cegger if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) 1779 1.1 cegger reg |= DMA_CFG_TXCMB_ENB; 1780 1.1 cegger CSR_WRITE_4(sc, ALE_DMA_CFG, 1781 1.1 cegger DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 | 1782 1.1 cegger sc->ale_dma_rd_burst | reg | 1783 1.1 cegger sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB | 1784 1.1 cegger ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 1785 1.1 cegger DMA_CFG_RD_DELAY_CNT_MASK) | 1786 1.1 cegger ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 1787 1.1 cegger DMA_CFG_WR_DELAY_CNT_MASK)); 1788 1.1 cegger 1789 1.1 cegger /* 1790 1.1 cegger * Hardware can be configured to issue SMB interrupt based 1791 1.1 cegger * on programmed interval. Since there is a callout that is 1792 1.1 cegger * invoked for every hz in driver we use that instead of 1793 1.1 cegger * relying on periodic SMB interrupt. 1794 1.1 cegger */ 1795 1.1 cegger CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0)); 1796 1.1 cegger 1797 1.1 cegger /* Clear MAC statistics. */ 1798 1.1 cegger ale_stats_clear(sc); 1799 1.1 cegger 1800 1.1 cegger /* 1801 1.1 cegger * Configure Tx/Rx MACs. 1802 1.1 cegger * - Auto-padding for short frames. 1803 1.1 cegger * - Enable CRC generation. 1804 1.1 cegger * Actual reconfiguration of MAC for resolved speed/duplex 1805 1.1 cegger * is followed after detection of link establishment. 1806 1.1 cegger * AR81xx always does checksum computation regardless of 1807 1.1 cegger * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will 1808 1.1 cegger * cause Rx handling issue for fragmented IP datagrams due 1809 1.1 cegger * to silicon bug. 1810 1.1 cegger */ 1811 1.1 cegger reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 1812 1.1 cegger ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 1813 1.1 cegger MAC_CFG_PREAMBLE_MASK); 1814 1.1 cegger if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0) 1815 1.1 cegger reg |= MAC_CFG_SPEED_10_100; 1816 1.1 cegger else 1817 1.1 cegger reg |= MAC_CFG_SPEED_1000; 1818 1.1 cegger CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1819 1.1 cegger 1820 1.1 cegger /* Set up the receive filter. */ 1821 1.1 cegger ale_rxfilter(sc); 1822 1.1 cegger ale_rxvlan(sc); 1823 1.1 cegger 1824 1.1 cegger /* Acknowledge all pending interrupts and clear it. */ 1825 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS); 1826 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 1827 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_STATUS, 0); 1828 1.1 cegger 1829 1.1 cegger sc->ale_flags &= ~ALE_FLAG_LINK; 1830 1.1 cegger 1831 1.1 cegger /* Switch to the current media. */ 1832 1.1 cegger mii = &sc->sc_miibus; 1833 1.1 cegger mii_mediachg(mii); 1834 1.1 cegger 1835 1.1 cegger callout_schedule(&sc->sc_tick_ch, hz); 1836 1.1 cegger 1837 1.1 cegger ifp->if_flags |= IFF_RUNNING; 1838 1.1 cegger ifp->if_flags &= ~IFF_OACTIVE; 1839 1.1 cegger 1840 1.1 cegger return 0; 1841 1.1 cegger } 1842 1.1 cegger 1843 1.1 cegger static void 1844 1.1 cegger ale_stop(struct ifnet *ifp, int disable) 1845 1.1 cegger { 1846 1.1 cegger struct ale_softc *sc = ifp->if_softc; 1847 1.1 cegger struct ale_txdesc *txd; 1848 1.1 cegger uint32_t reg; 1849 1.1 cegger int i; 1850 1.1 cegger 1851 1.1 cegger callout_stop(&sc->sc_tick_ch); 1852 1.1 cegger 1853 1.1 cegger /* 1854 1.1 cegger * Mark the interface down and cancel the watchdog timer. 1855 1.1 cegger */ 1856 1.1 cegger ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1857 1.1 cegger ifp->if_timer = 0; 1858 1.1 cegger 1859 1.1 cegger sc->ale_flags &= ~ALE_FLAG_LINK; 1860 1.1 cegger 1861 1.1 cegger ale_stats_update(sc); 1862 1.1 cegger 1863 1.1 cegger mii_down(&sc->sc_miibus); 1864 1.1 cegger 1865 1.1 cegger /* Disable interrupts. */ 1866 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_MASK, 0); 1867 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 1868 1.1 cegger 1869 1.1 cegger /* Disable queue processing and DMA. */ 1870 1.1 cegger reg = CSR_READ_4(sc, ALE_TXQ_CFG); 1871 1.1 cegger reg &= ~TXQ_CFG_ENB; 1872 1.1 cegger CSR_WRITE_4(sc, ALE_TXQ_CFG, reg); 1873 1.1 cegger reg = CSR_READ_4(sc, ALE_RXQ_CFG); 1874 1.1 cegger reg &= ~RXQ_CFG_ENB; 1875 1.1 cegger CSR_WRITE_4(sc, ALE_RXQ_CFG, reg); 1876 1.1 cegger reg = CSR_READ_4(sc, ALE_DMA_CFG); 1877 1.1 cegger reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB); 1878 1.1 cegger CSR_WRITE_4(sc, ALE_DMA_CFG, reg); 1879 1.1 cegger DELAY(1000); 1880 1.1 cegger 1881 1.1 cegger /* Stop Rx/Tx MACs. */ 1882 1.1 cegger ale_stop_mac(sc); 1883 1.1 cegger 1884 1.1 cegger /* Disable interrupts again? XXX */ 1885 1.1 cegger CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); 1886 1.1 cegger 1887 1.1 cegger /* 1888 1.1 cegger * Free TX mbufs still in the queues. 1889 1.1 cegger */ 1890 1.1 cegger for (i = 0; i < ALE_TX_RING_CNT; i++) { 1891 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[i]; 1892 1.1 cegger if (txd->tx_m != NULL) { 1893 1.1 cegger bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap); 1894 1.1 cegger m_freem(txd->tx_m); 1895 1.1 cegger txd->tx_m = NULL; 1896 1.1 cegger } 1897 1.30 msaitoh } 1898 1.1 cegger } 1899 1.1 cegger 1900 1.1 cegger static void 1901 1.1 cegger ale_stop_mac(struct ale_softc *sc) 1902 1.1 cegger { 1903 1.1 cegger uint32_t reg; 1904 1.1 cegger int i; 1905 1.1 cegger 1906 1.1 cegger reg = CSR_READ_4(sc, ALE_MAC_CFG); 1907 1.1 cegger if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 1908 1.13 cegger reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 1909 1.1 cegger CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1910 1.1 cegger } 1911 1.1 cegger 1912 1.1 cegger for (i = ALE_TIMEOUT; i > 0; i--) { 1913 1.1 cegger reg = CSR_READ_4(sc, ALE_IDLE_STATUS); 1914 1.1 cegger if (reg == 0) 1915 1.1 cegger break; 1916 1.1 cegger DELAY(10); 1917 1.1 cegger } 1918 1.1 cegger if (i == 0) 1919 1.1 cegger printf("%s: could not disable Tx/Rx MAC(0x%08x)!\n", 1920 1.1 cegger device_xname(sc->sc_dev), reg); 1921 1.1 cegger } 1922 1.1 cegger 1923 1.1 cegger static void 1924 1.1 cegger ale_init_tx_ring(struct ale_softc *sc) 1925 1.1 cegger { 1926 1.1 cegger struct ale_txdesc *txd; 1927 1.1 cegger int i; 1928 1.1 cegger 1929 1.1 cegger sc->ale_cdata.ale_tx_prod = 0; 1930 1.1 cegger sc->ale_cdata.ale_tx_cons = 0; 1931 1.1 cegger sc->ale_cdata.ale_tx_cnt = 0; 1932 1.1 cegger 1933 1.1 cegger memset(sc->ale_cdata.ale_tx_ring, 0, ALE_TX_RING_SZ); 1934 1.1 cegger memset(sc->ale_cdata.ale_tx_cmb, 0, ALE_TX_CMB_SZ); 1935 1.1 cegger for (i = 0; i < ALE_TX_RING_CNT; i++) { 1936 1.1 cegger txd = &sc->ale_cdata.ale_txdesc[i]; 1937 1.1 cegger txd->tx_m = NULL; 1938 1.1 cegger } 1939 1.1 cegger *sc->ale_cdata.ale_tx_cmb = 0; 1940 1.1 cegger bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_cmb_map, 0, 1941 1.1 cegger sc->ale_cdata.ale_tx_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1942 1.1 cegger bus_dmamap_sync(sc->sc_dmat, sc->ale_cdata.ale_tx_ring_map, 0, 1943 1.1 cegger sc->ale_cdata.ale_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1944 1.1 cegger } 1945 1.1 cegger 1946 1.1 cegger static void 1947 1.1 cegger ale_init_rx_pages(struct ale_softc *sc) 1948 1.1 cegger { 1949 1.1 cegger struct ale_rx_page *rx_page; 1950 1.1 cegger int i; 1951 1.1 cegger 1952 1.1 cegger sc->ale_cdata.ale_rx_seqno = 0; 1953 1.1 cegger sc->ale_cdata.ale_rx_curp = 0; 1954 1.1 cegger 1955 1.1 cegger for (i = 0; i < ALE_RX_PAGES; i++) { 1956 1.1 cegger rx_page = &sc->ale_cdata.ale_rx_page[i]; 1957 1.1 cegger memset(rx_page->page_addr, 0, sc->ale_pagesize); 1958 1.1 cegger memset(rx_page->cmb_addr, 0, ALE_RX_CMB_SZ); 1959 1.1 cegger rx_page->cons = 0; 1960 1.1 cegger *rx_page->cmb_addr = 0; 1961 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->page_map, 0, 1962 1.1 cegger rx_page->page_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1963 1.1 cegger bus_dmamap_sync(sc->sc_dmat, rx_page->cmb_map, 0, 1964 1.1 cegger rx_page->cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1965 1.1 cegger } 1966 1.1 cegger } 1967 1.1 cegger 1968 1.1 cegger static void 1969 1.1 cegger ale_rxvlan(struct ale_softc *sc) 1970 1.1 cegger { 1971 1.1 cegger uint32_t reg; 1972 1.1 cegger 1973 1.1 cegger reg = CSR_READ_4(sc, ALE_MAC_CFG); 1974 1.1 cegger reg &= ~MAC_CFG_VLAN_TAG_STRIP; 1975 1.12 cegger if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING) 1976 1.1 cegger reg |= MAC_CFG_VLAN_TAG_STRIP; 1977 1.1 cegger CSR_WRITE_4(sc, ALE_MAC_CFG, reg); 1978 1.1 cegger } 1979 1.1 cegger 1980 1.1 cegger static void 1981 1.1 cegger ale_rxfilter(struct ale_softc *sc) 1982 1.1 cegger { 1983 1.1 cegger struct ethercom *ec = &sc->sc_ec; 1984 1.1 cegger struct ifnet *ifp = &ec->ec_if; 1985 1.1 cegger struct ether_multi *enm; 1986 1.1 cegger struct ether_multistep step; 1987 1.1 cegger uint32_t crc; 1988 1.1 cegger uint32_t mchash[2]; 1989 1.1 cegger uint32_t rxcfg; 1990 1.1 cegger 1991 1.1 cegger rxcfg = CSR_READ_4(sc, ALE_MAC_CFG); 1992 1.1 cegger rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 1993 1.7 cegger ifp->if_flags &= ~IFF_ALLMULTI; 1994 1.1 cegger 1995 1.1 cegger /* 1996 1.1 cegger * Always accept broadcast frames. 1997 1.1 cegger */ 1998 1.1 cegger rxcfg |= MAC_CFG_BCAST; 1999 1.1 cegger 2000 1.35 msaitoh /* Program new filter. */ 2001 1.35 msaitoh if ((ifp->if_flags & IFF_PROMISC) != 0) 2002 1.35 msaitoh goto update; 2003 1.35 msaitoh 2004 1.35 msaitoh memset(mchash, 0, sizeof(mchash)); 2005 1.35 msaitoh 2006 1.35 msaitoh ETHER_LOCK(ec); 2007 1.35 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 2008 1.35 msaitoh while (enm != NULL) { 2009 1.35 msaitoh if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2010 1.35 msaitoh /* XXX Use ETHER_F_ALLMULTI in future. */ 2011 1.35 msaitoh ifp->if_flags |= IFF_ALLMULTI; 2012 1.35 msaitoh ETHER_UNLOCK(ec); 2013 1.35 msaitoh goto update; 2014 1.35 msaitoh } 2015 1.35 msaitoh crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2016 1.37 msaitoh mchash[crc >> 31] |= 1U << ((crc >> 26) & 0x1f); 2017 1.35 msaitoh ETHER_NEXT_MULTI(step, enm); 2018 1.35 msaitoh } 2019 1.35 msaitoh ETHER_UNLOCK(ec); 2020 1.35 msaitoh 2021 1.35 msaitoh update: 2022 1.35 msaitoh if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2023 1.35 msaitoh if (ifp->if_flags & IFF_PROMISC) { 2024 1.1 cegger rxcfg |= MAC_CFG_PROMISC; 2025 1.35 msaitoh /* XXX Use ETHER_F_ALLMULTI in future. */ 2026 1.35 msaitoh ifp->if_flags |= IFF_ALLMULTI; 2027 1.35 msaitoh } else 2028 1.1 cegger rxcfg |= MAC_CFG_ALLMULTI; 2029 1.1 cegger mchash[0] = mchash[1] = 0xFFFFFFFF; 2030 1.1 cegger } 2031 1.1 cegger CSR_WRITE_4(sc, ALE_MAR0, mchash[0]); 2032 1.1 cegger CSR_WRITE_4(sc, ALE_MAR1, mchash[1]); 2033 1.1 cegger CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); 2034 1.1 cegger } 2035