1 1.37 msaitoh /* $NetBSD: if_enet.c,v 1.37 2024/02/07 04:20:26 msaitoh Exp $ */ 2 1.1 ryo 3 1.1 ryo /* 4 1.37 msaitoh * Copyright (c) 2014 Ryo Shimizu 5 1.1 ryo * All rights reserved. 6 1.1 ryo * 7 1.1 ryo * Redistribution and use in source and binary forms, with or without 8 1.1 ryo * modification, are permitted provided that the following conditions 9 1.1 ryo * are met: 10 1.1 ryo * 1. Redistributions of source code must retain the above copyright 11 1.1 ryo * notice, this list of conditions and the following disclaimer. 12 1.1 ryo * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 ryo * notice, this list of conditions and the following disclaimer in the 14 1.1 ryo * documentation and/or other materials provided with the distribution. 15 1.1 ryo * 16 1.1 ryo * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 ryo * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 1.1 ryo * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 1.1 ryo * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 20 1.1 ryo * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 1.1 ryo * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 1.1 ryo * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 1.1 ryo * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 1.1 ryo * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 1.1 ryo * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 1.1 ryo * POSSIBILITY OF SUCH DAMAGE. 27 1.1 ryo */ 28 1.1 ryo 29 1.1 ryo /* 30 1.6 ryo * i.MX6,7 10/100/1000-Mbps ethernet MAC (ENET) 31 1.1 ryo */ 32 1.1 ryo 33 1.1 ryo #include <sys/cdefs.h> 34 1.37 msaitoh __KERNEL_RCSID(0, "$NetBSD: if_enet.c,v 1.37 2024/02/07 04:20:26 msaitoh Exp $"); 35 1.1 ryo 36 1.1 ryo #include "vlan.h" 37 1.1 ryo 38 1.1 ryo #include <sys/param.h> 39 1.1 ryo #include <sys/bus.h> 40 1.1 ryo #include <sys/mbuf.h> 41 1.1 ryo #include <sys/device.h> 42 1.1 ryo #include <sys/sockio.h> 43 1.1 ryo #include <sys/kernel.h> 44 1.3 riastrad #include <sys/rndsource.h> 45 1.1 ryo 46 1.1 ryo #include <lib/libkern/libkern.h> 47 1.1 ryo 48 1.1 ryo #include <net/if.h> 49 1.1 ryo #include <net/if_dl.h> 50 1.1 ryo #include <net/if_media.h> 51 1.1 ryo #include <net/if_ether.h> 52 1.1 ryo #include <net/bpf.h> 53 1.1 ryo #include <net/if_vlanvar.h> 54 1.1 ryo 55 1.1 ryo #include <netinet/in.h> 56 1.1 ryo #include <netinet/in_systm.h> 57 1.1 ryo #include <netinet/ip.h> 58 1.1 ryo 59 1.1 ryo #include <dev/mii/mii.h> 60 1.1 ryo #include <dev/mii/miivar.h> 61 1.1 ryo 62 1.1 ryo #include <arm/imx/if_enetreg.h> 63 1.6 ryo #include <arm/imx/if_enetvar.h> 64 1.1 ryo 65 1.1 ryo #undef DEBUG_ENET 66 1.1 ryo #undef ENET_EVENT_COUNTER 67 1.1 ryo 68 1.6 ryo #define ENET_TICK hz 69 1.6 ryo 70 1.1 ryo #ifdef DEBUG_ENET 71 1.1 ryo int enet_debug = 0; 72 1.1 ryo # define DEVICE_DPRINTF(args...) \ 73 1.1 ryo do { if (enet_debug) device_printf(sc->sc_dev, args); } while (0) 74 1.1 ryo #else 75 1.1 ryo # define DEVICE_DPRINTF(args...) 76 1.1 ryo #endif 77 1.1 ryo 78 1.1 ryo 79 1.1 ryo #define RXDESC_MAXBUFSIZE 0x07f0 80 1.6 ryo /* ENET does not work greather than 0x0800... */ 81 1.1 ryo 82 1.1 ryo #undef ENET_SUPPORT_JUMBO /* JUMBO FRAME SUPPORT is unstable */ 83 1.1 ryo #ifdef ENET_SUPPORT_JUMBO 84 1.1 ryo # define ENET_MAX_PKT_LEN 4034 /* MAX FIFO LEN */ 85 1.1 ryo #else 86 1.1 ryo # define ENET_MAX_PKT_LEN 1522 87 1.1 ryo #endif 88 1.1 ryo #define ENET_DEFAULT_PKT_LEN 1522 /* including VLAN tag */ 89 1.1 ryo #define MTU2FRAMESIZE(n) \ 90 1.1 ryo ((n) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN) 91 1.1 ryo 92 1.1 ryo 93 1.1 ryo #define ENET_MAX_PKT_NSEGS 64 94 1.1 ryo 95 1.11 ryo #define ENET_TX_NEXTIDX(idx) \ 96 1.11 ryo (((idx) >= (ENET_TX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 97 1.11 ryo #define ENET_RX_NEXTIDX(idx) \ 98 1.11 ryo (((idx) >= (ENET_RX_RING_CNT - 1)) ? 0 : ((idx) + 1)) 99 1.1 ryo 100 1.1 ryo #define TXDESC_WRITEOUT(idx) \ 101 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 102 1.1 ryo sizeof(struct enet_txdesc) * (idx), \ 103 1.1 ryo sizeof(struct enet_txdesc), \ 104 1.1 ryo BUS_DMASYNC_PREWRITE) 105 1.1 ryo 106 1.1 ryo #define TXDESC_READIN(idx) \ 107 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, \ 108 1.1 ryo sizeof(struct enet_txdesc) * (idx), \ 109 1.1 ryo sizeof(struct enet_txdesc), \ 110 1.1 ryo BUS_DMASYNC_PREREAD) 111 1.1 ryo 112 1.1 ryo #define RXDESC_WRITEOUT(idx) \ 113 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 114 1.1 ryo sizeof(struct enet_rxdesc) * (idx), \ 115 1.1 ryo sizeof(struct enet_rxdesc), \ 116 1.1 ryo BUS_DMASYNC_PREWRITE) 117 1.1 ryo 118 1.1 ryo #define RXDESC_READIN(idx) \ 119 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, \ 120 1.1 ryo sizeof(struct enet_rxdesc) * (idx), \ 121 1.1 ryo sizeof(struct enet_rxdesc), \ 122 1.1 ryo BUS_DMASYNC_PREREAD) 123 1.1 ryo 124 1.1 ryo #define ENET_REG_READ(sc, reg) \ 125 1.1 ryo bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 126 1.1 ryo 127 1.1 ryo #define ENET_REG_WRITE(sc, reg, value) \ 128 1.1 ryo bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, value) 129 1.1 ryo 130 1.1 ryo #ifdef ENET_EVENT_COUNTER 131 1.1 ryo static void enet_attach_evcnt(struct enet_softc *); 132 1.1 ryo static void enet_update_evcnt(struct enet_softc *); 133 1.1 ryo #endif 134 1.1 ryo 135 1.1 ryo static void enet_tick(void *); 136 1.1 ryo static int enet_tx_intr(void *); 137 1.1 ryo static int enet_rx_intr(void *); 138 1.1 ryo static void enet_rx_csum(struct enet_softc *, struct ifnet *, struct mbuf *, 139 1.21 msaitoh int); 140 1.1 ryo 141 1.1 ryo static void enet_start(struct ifnet *); 142 1.1 ryo static int enet_ifflags_cb(struct ethercom *); 143 1.1 ryo static int enet_ioctl(struct ifnet *, u_long, void *); 144 1.1 ryo static int enet_init(struct ifnet *); 145 1.1 ryo static void enet_stop(struct ifnet *, int); 146 1.1 ryo static void enet_watchdog(struct ifnet *); 147 1.1 ryo static void enet_mediastatus(struct ifnet *, struct ifmediareq *); 148 1.1 ryo 149 1.17 msaitoh static int enet_miibus_readreg(device_t, int, int, uint16_t *); 150 1.17 msaitoh static int enet_miibus_writereg(device_t, int, int, uint16_t); 151 1.1 ryo static void enet_miibus_statchg(struct ifnet *); 152 1.1 ryo 153 1.1 ryo static void enet_gethwaddr(struct enet_softc *, uint8_t *); 154 1.1 ryo static void enet_sethwaddr(struct enet_softc *, uint8_t *); 155 1.1 ryo static void enet_setmulti(struct enet_softc *); 156 1.1 ryo static int enet_encap_mbufalign(struct mbuf **); 157 1.1 ryo static int enet_encap_txring(struct enet_softc *, struct mbuf **); 158 1.1 ryo static int enet_init_regs(struct enet_softc *, int); 159 1.1 ryo static int enet_alloc_ring(struct enet_softc *); 160 1.1 ryo static void enet_init_txring(struct enet_softc *); 161 1.1 ryo static int enet_init_rxring(struct enet_softc *); 162 1.1 ryo static void enet_reset_rxdesc(struct enet_softc *, int); 163 1.1 ryo static int enet_alloc_rxbuf(struct enet_softc *, int); 164 1.1 ryo static void enet_drain_txbuf(struct enet_softc *); 165 1.1 ryo static void enet_drain_rxbuf(struct enet_softc *); 166 1.1 ryo static int enet_alloc_dma(struct enet_softc *, size_t, void **, 167 1.21 msaitoh bus_dmamap_t *); 168 1.1 ryo 169 1.24 hkenken int 170 1.24 hkenken enet_attach_common(device_t self) 171 1.1 ryo { 172 1.22 msaitoh struct enet_softc *sc = device_private(self); 173 1.1 ryo struct ifnet *ifp; 174 1.20 msaitoh struct mii_data * const mii = &sc->sc_mii; 175 1.1 ryo 176 1.1 ryo /* allocate dma buffer */ 177 1.1 ryo if (enet_alloc_ring(sc)) 178 1.24 hkenken return -1; 179 1.1 ryo 180 1.1 ryo #define IS_ENADDR_ZERO(enaddr) \ 181 1.1 ryo ((enaddr[0] | enaddr[1] | enaddr[2] | \ 182 1.1 ryo enaddr[3] | enaddr[4] | enaddr[5]) == 0) 183 1.1 ryo 184 1.1 ryo if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 185 1.1 ryo /* by any chance, mac-address is already set by bootloader? */ 186 1.1 ryo enet_gethwaddr(sc, sc->sc_enaddr); 187 1.1 ryo if (IS_ENADDR_ZERO(sc->sc_enaddr)) { 188 1.1 ryo /* give up. set randomly */ 189 1.6 ryo uint32_t eaddr = random(); 190 1.1 ryo /* not multicast */ 191 1.6 ryo sc->sc_enaddr[0] = (eaddr >> 24) & 0xfc; 192 1.6 ryo sc->sc_enaddr[1] = eaddr >> 16; 193 1.6 ryo sc->sc_enaddr[2] = eaddr >> 8; 194 1.6 ryo sc->sc_enaddr[3] = eaddr; 195 1.6 ryo eaddr = random(); 196 1.6 ryo sc->sc_enaddr[4] = eaddr >> 8; 197 1.6 ryo sc->sc_enaddr[5] = eaddr; 198 1.1 ryo 199 1.1 ryo aprint_error_dev(self, 200 1.1 ryo "cannot get mac address. set randomly\n"); 201 1.1 ryo } 202 1.1 ryo } 203 1.1 ryo enet_sethwaddr(sc, sc->sc_enaddr); 204 1.1 ryo 205 1.1 ryo aprint_normal_dev(self, "Ethernet address %s\n", 206 1.1 ryo ether_sprintf(sc->sc_enaddr)); 207 1.1 ryo 208 1.1 ryo enet_init_regs(sc, 1); 209 1.1 ryo 210 1.2 ryo /* callout will be scheduled from enet_init() */ 211 1.2 ryo callout_init(&sc->sc_tick_ch, 0); 212 1.2 ryo callout_setfunc(&sc->sc_tick_ch, enet_tick, sc); 213 1.2 ryo 214 1.1 ryo /* setup ifp */ 215 1.1 ryo ifp = &sc->sc_ethercom.ec_if; 216 1.1 ryo strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 217 1.1 ryo ifp->if_softc = sc; 218 1.1 ryo ifp->if_mtu = ETHERMTU; 219 1.1 ryo ifp->if_baudrate = IF_Gbps(1); 220 1.1 ryo ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 221 1.1 ryo ifp->if_ioctl = enet_ioctl; 222 1.1 ryo ifp->if_start = enet_start; 223 1.1 ryo ifp->if_init = enet_init; 224 1.1 ryo ifp->if_stop = enet_stop; 225 1.1 ryo ifp->if_watchdog = enet_watchdog; 226 1.1 ryo 227 1.1 ryo sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 228 1.1 ryo #ifdef ENET_SUPPORT_JUMBO 229 1.1 ryo sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 230 1.1 ryo #endif 231 1.1 ryo 232 1.1 ryo ifp->if_capabilities = IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 233 1.1 ryo IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 234 1.1 ryo IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 235 1.1 ryo IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx | 236 1.1 ryo IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx; 237 1.1 ryo 238 1.15 riastrad IFQ_SET_MAXLEN(&ifp->if_snd, uimax(ENET_TX_RING_CNT, IFQ_MAXLEN)); 239 1.1 ryo IFQ_SET_READY(&ifp->if_snd); 240 1.1 ryo 241 1.1 ryo /* setup MII */ 242 1.20 msaitoh sc->sc_ethercom.ec_mii = mii; 243 1.20 msaitoh mii->mii_ifp = ifp; 244 1.20 msaitoh mii->mii_readreg = enet_miibus_readreg; 245 1.20 msaitoh mii->mii_writereg = enet_miibus_writereg; 246 1.20 msaitoh mii->mii_statchg = enet_miibus_statchg; 247 1.20 msaitoh ifmedia_init(&mii->mii_media, 0, ether_mediachange, enet_mediastatus); 248 1.1 ryo 249 1.1 ryo /* try to attach PHY */ 250 1.30 jmcneill mii_attach(self, mii, 0xffffffff, sc->sc_phyid, MII_OFFSET_ANY, 0); 251 1.20 msaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) { 252 1.20 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 253 1.20 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 254 1.1 ryo } else { 255 1.20 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 256 1.1 ryo } 257 1.1 ryo 258 1.1 ryo if_attach(ifp); 259 1.1 ryo ether_ifattach(ifp, sc->sc_enaddr); 260 1.1 ryo ether_set_ifflags_cb(&sc->sc_ethercom, enet_ifflags_cb); 261 1.1 ryo 262 1.1 ryo rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 263 1.1 ryo RND_TYPE_NET, RND_FLAG_DEFAULT); 264 1.1 ryo 265 1.1 ryo #ifdef ENET_EVENT_COUNTER 266 1.1 ryo enet_attach_evcnt(sc); 267 1.1 ryo #endif 268 1.1 ryo 269 1.1 ryo sc->sc_stopping = false; 270 1.1 ryo 271 1.24 hkenken return 0; 272 1.1 ryo } 273 1.1 ryo 274 1.1 ryo #ifdef ENET_EVENT_COUNTER 275 1.1 ryo static void 276 1.1 ryo enet_attach_evcnt(struct enet_softc *sc) 277 1.1 ryo { 278 1.1 ryo const char *xname; 279 1.1 ryo 280 1.1 ryo xname = device_xname(sc->sc_dev); 281 1.1 ryo 282 1.1 ryo #define ENET_EVCNT_ATTACH(name) \ 283 1.1 ryo evcnt_attach_dynamic(&sc->sc_ev_ ## name, EVCNT_TYPE_MISC, \ 284 1.1 ryo NULL, xname, #name); 285 1.1 ryo 286 1.1 ryo ENET_EVCNT_ATTACH(t_drop); 287 1.1 ryo ENET_EVCNT_ATTACH(t_packets); 288 1.1 ryo ENET_EVCNT_ATTACH(t_bc_pkt); 289 1.1 ryo ENET_EVCNT_ATTACH(t_mc_pkt); 290 1.1 ryo ENET_EVCNT_ATTACH(t_crc_align); 291 1.1 ryo ENET_EVCNT_ATTACH(t_undersize); 292 1.1 ryo ENET_EVCNT_ATTACH(t_oversize); 293 1.1 ryo ENET_EVCNT_ATTACH(t_frag); 294 1.1 ryo ENET_EVCNT_ATTACH(t_jab); 295 1.1 ryo ENET_EVCNT_ATTACH(t_col); 296 1.1 ryo ENET_EVCNT_ATTACH(t_p64); 297 1.1 ryo ENET_EVCNT_ATTACH(t_p65to127n); 298 1.1 ryo ENET_EVCNT_ATTACH(t_p128to255n); 299 1.1 ryo ENET_EVCNT_ATTACH(t_p256to511); 300 1.1 ryo ENET_EVCNT_ATTACH(t_p512to1023); 301 1.1 ryo ENET_EVCNT_ATTACH(t_p1024to2047); 302 1.1 ryo ENET_EVCNT_ATTACH(t_p_gte2048); 303 1.1 ryo ENET_EVCNT_ATTACH(t_octets); 304 1.1 ryo ENET_EVCNT_ATTACH(r_packets); 305 1.1 ryo ENET_EVCNT_ATTACH(r_bc_pkt); 306 1.1 ryo ENET_EVCNT_ATTACH(r_mc_pkt); 307 1.1 ryo ENET_EVCNT_ATTACH(r_crc_align); 308 1.1 ryo ENET_EVCNT_ATTACH(r_undersize); 309 1.1 ryo ENET_EVCNT_ATTACH(r_oversize); 310 1.1 ryo ENET_EVCNT_ATTACH(r_frag); 311 1.1 ryo ENET_EVCNT_ATTACH(r_jab); 312 1.1 ryo ENET_EVCNT_ATTACH(r_p64); 313 1.1 ryo ENET_EVCNT_ATTACH(r_p65to127); 314 1.1 ryo ENET_EVCNT_ATTACH(r_p128to255); 315 1.1 ryo ENET_EVCNT_ATTACH(r_p256to511); 316 1.1 ryo ENET_EVCNT_ATTACH(r_p512to1023); 317 1.1 ryo ENET_EVCNT_ATTACH(r_p1024to2047); 318 1.1 ryo ENET_EVCNT_ATTACH(r_p_gte2048); 319 1.1 ryo ENET_EVCNT_ATTACH(r_octets); 320 1.1 ryo } 321 1.1 ryo 322 1.1 ryo static void 323 1.1 ryo enet_update_evcnt(struct enet_softc *sc) 324 1.1 ryo { 325 1.1 ryo sc->sc_ev_t_drop.ev_count += ENET_REG_READ(sc, ENET_RMON_T_DROP); 326 1.1 ryo sc->sc_ev_t_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_PACKETS); 327 1.1 ryo sc->sc_ev_t_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_BC_PKT); 328 1.1 ryo sc->sc_ev_t_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_T_MC_PKT); 329 1.1 ryo sc->sc_ev_t_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_T_CRC_ALIGN); 330 1.1 ryo sc->sc_ev_t_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_UNDERSIZE); 331 1.1 ryo sc->sc_ev_t_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OVERSIZE); 332 1.1 ryo sc->sc_ev_t_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_T_FRAG); 333 1.1 ryo sc->sc_ev_t_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_T_JAB); 334 1.1 ryo sc->sc_ev_t_col.ev_count += ENET_REG_READ(sc, ENET_RMON_T_COL); 335 1.1 ryo sc->sc_ev_t_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P64); 336 1.1 ryo sc->sc_ev_t_p65to127n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P65TO127N); 337 1.1 ryo sc->sc_ev_t_p128to255n.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P128TO255N); 338 1.1 ryo sc->sc_ev_t_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P256TO511); 339 1.1 ryo sc->sc_ev_t_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P512TO1023); 340 1.1 ryo sc->sc_ev_t_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P1024TO2047); 341 1.1 ryo sc->sc_ev_t_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_T_P_GTE2048); 342 1.1 ryo sc->sc_ev_t_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_T_OCTETS); 343 1.1 ryo sc->sc_ev_r_packets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_PACKETS); 344 1.1 ryo sc->sc_ev_r_bc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_BC_PKT); 345 1.1 ryo sc->sc_ev_r_mc_pkt.ev_count += ENET_REG_READ(sc, ENET_RMON_R_MC_PKT); 346 1.1 ryo sc->sc_ev_r_crc_align.ev_count += ENET_REG_READ(sc, ENET_RMON_R_CRC_ALIGN); 347 1.1 ryo sc->sc_ev_r_undersize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE); 348 1.1 ryo sc->sc_ev_r_oversize.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OVERSIZE); 349 1.1 ryo sc->sc_ev_r_frag.ev_count += ENET_REG_READ(sc, ENET_RMON_R_FRAG); 350 1.1 ryo sc->sc_ev_r_jab.ev_count += ENET_REG_READ(sc, ENET_RMON_R_JAB); 351 1.1 ryo sc->sc_ev_r_p64.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P64); 352 1.1 ryo sc->sc_ev_r_p65to127.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P65TO127); 353 1.1 ryo sc->sc_ev_r_p128to255.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P128TO255); 354 1.1 ryo sc->sc_ev_r_p256to511.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P256TO511); 355 1.1 ryo sc->sc_ev_r_p512to1023.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P512TO1023); 356 1.1 ryo sc->sc_ev_r_p1024to2047.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P1024TO2047); 357 1.1 ryo sc->sc_ev_r_p_gte2048.ev_count += ENET_REG_READ(sc, ENET_RMON_R_P_GTE2048); 358 1.1 ryo sc->sc_ev_r_octets.ev_count += ENET_REG_READ(sc, ENET_RMON_R_OCTETS); 359 1.1 ryo } 360 1.1 ryo #endif /* ENET_EVENT_COUNTER */ 361 1.1 ryo 362 1.1 ryo static void 363 1.1 ryo enet_tick(void *arg) 364 1.1 ryo { 365 1.1 ryo struct enet_softc *sc; 366 1.1 ryo struct mii_data *mii; 367 1.1 ryo struct ifnet *ifp; 368 1.1 ryo int s; 369 1.1 ryo 370 1.1 ryo sc = arg; 371 1.1 ryo mii = &sc->sc_mii; 372 1.1 ryo ifp = &sc->sc_ethercom.ec_if; 373 1.1 ryo 374 1.1 ryo s = splnet(); 375 1.1 ryo 376 1.1 ryo if (sc->sc_stopping) 377 1.1 ryo goto out; 378 1.1 ryo 379 1.1 ryo #ifdef ENET_EVENT_COUNTER 380 1.1 ryo enet_update_evcnt(sc); 381 1.1 ryo #endif 382 1.1 ryo 383 1.1 ryo /* update counters */ 384 1.31 thorpej if_statadd(ifp, if_ierrors, 385 1.31 thorpej (uint64_t)ENET_REG_READ(sc, ENET_RMON_R_UNDERSIZE) + 386 1.31 thorpej (uint64_t)ENET_REG_READ(sc, ENET_RMON_R_FRAG) + 387 1.31 thorpej (uint64_t)ENET_REG_READ(sc, ENET_RMON_R_JAB)); 388 1.1 ryo 389 1.1 ryo /* clear counters */ 390 1.1 ryo ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 391 1.1 ryo ENET_REG_WRITE(sc, ENET_MIBC, 0); 392 1.1 ryo 393 1.1 ryo mii_tick(mii); 394 1.1 ryo out: 395 1.1 ryo 396 1.1 ryo if (!sc->sc_stopping) 397 1.6 ryo callout_schedule(&sc->sc_tick_ch, ENET_TICK); 398 1.1 ryo 399 1.1 ryo splx(s); 400 1.1 ryo } 401 1.1 ryo 402 1.24 hkenken int 403 1.1 ryo enet_intr(void *arg) 404 1.1 ryo { 405 1.1 ryo struct enet_softc *sc; 406 1.1 ryo struct ifnet *ifp; 407 1.1 ryo uint32_t status; 408 1.1 ryo 409 1.1 ryo sc = arg; 410 1.1 ryo status = ENET_REG_READ(sc, ENET_EIR); 411 1.1 ryo 412 1.6 ryo if (sc->sc_imxtype == 7) { 413 1.20 msaitoh if (status & (ENET_EIR_TXF | ENET_EIR_TXF1 | ENET_EIR_TXF2)) 414 1.6 ryo enet_tx_intr(arg); 415 1.20 msaitoh if (status & (ENET_EIR_RXF | ENET_EIR_RXF1 | ENET_EIR_RXF2)) 416 1.6 ryo enet_rx_intr(arg); 417 1.6 ryo } else { 418 1.6 ryo if (status & ENET_EIR_TXF) 419 1.6 ryo enet_tx_intr(arg); 420 1.6 ryo if (status & ENET_EIR_RXF) 421 1.6 ryo enet_rx_intr(arg); 422 1.6 ryo } 423 1.1 ryo 424 1.1 ryo if (status & ENET_EIR_EBERR) { 425 1.1 ryo device_printf(sc->sc_dev, "Ethernet Bus Error\n"); 426 1.1 ryo ifp = &sc->sc_ethercom.ec_if; 427 1.1 ryo enet_stop(ifp, 1); 428 1.1 ryo enet_init(ifp); 429 1.1 ryo } else { 430 1.1 ryo ENET_REG_WRITE(sc, ENET_EIR, status); 431 1.1 ryo } 432 1.1 ryo 433 1.1 ryo rnd_add_uint32(&sc->sc_rnd_source, status); 434 1.1 ryo 435 1.1 ryo return 1; 436 1.1 ryo } 437 1.1 ryo 438 1.1 ryo static int 439 1.1 ryo enet_tx_intr(void *arg) 440 1.1 ryo { 441 1.1 ryo struct enet_softc *sc; 442 1.1 ryo struct ifnet *ifp; 443 1.1 ryo struct enet_txsoft *txs; 444 1.1 ryo int idx; 445 1.1 ryo 446 1.1 ryo sc = (struct enet_softc *)arg; 447 1.1 ryo ifp = &sc->sc_ethercom.ec_if; 448 1.1 ryo 449 1.1 ryo for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 450 1.1 ryo idx = ENET_TX_NEXTIDX(idx)) { 451 1.1 ryo 452 1.1 ryo txs = &sc->sc_txsoft[idx]; 453 1.1 ryo 454 1.1 ryo TXDESC_READIN(idx); 455 1.1 ryo if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_R) { 456 1.1 ryo /* This TX Descriptor has not been transmitted yet */ 457 1.1 ryo break; 458 1.1 ryo } 459 1.1 ryo 460 1.1 ryo /* txsoft is available on first segment (TXFLAGS1_T1) */ 461 1.1 ryo if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 462 1.1 ryo bus_dmamap_unload(sc->sc_dmat, 463 1.1 ryo txs->txs_dmamap); 464 1.1 ryo m_freem(txs->txs_mbuf); 465 1.31 thorpej if_statinc(ifp, if_opackets); 466 1.1 ryo } 467 1.1 ryo 468 1.1 ryo /* checking error */ 469 1.1 ryo if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_L) { 470 1.1 ryo uint32_t flags2; 471 1.1 ryo 472 1.1 ryo flags2 = sc->sc_txdesc_ring[idx].tx_flags2; 473 1.1 ryo 474 1.1 ryo if (flags2 & (TXFLAGS2_TXE | 475 1.1 ryo TXFLAGS2_UE | TXFLAGS2_EE | TXFLAGS2_FE | 476 1.1 ryo TXFLAGS2_LCE | TXFLAGS2_OE | TXFLAGS2_TSE)) { 477 1.1 ryo #ifdef DEBUG_ENET 478 1.1 ryo if (enet_debug) { 479 1.1 ryo char flagsbuf[128]; 480 1.1 ryo 481 1.1 ryo snprintb(flagsbuf, sizeof(flagsbuf), 482 1.1 ryo "\20" "\20TRANSMIT" "\16UNDERFLOW" 483 1.1 ryo "\15COLLISION" "\14FRAME" 484 1.1 ryo "\13LATECOLLISION" "\12OVERFLOW", 485 1.1 ryo flags2); 486 1.1 ryo 487 1.1 ryo device_printf(sc->sc_dev, 488 1.1 ryo "txdesc[%d]: transmit error: " 489 1.1 ryo "flags2=%s\n", idx, flagsbuf); 490 1.1 ryo } 491 1.1 ryo #endif /* DEBUG_ENET */ 492 1.31 thorpej if_statinc(ifp, if_oerrors); 493 1.1 ryo } 494 1.1 ryo } 495 1.1 ryo 496 1.1 ryo sc->sc_tx_free++; 497 1.1 ryo } 498 1.1 ryo sc->sc_tx_considx = idx; 499 1.1 ryo 500 1.1 ryo if (sc->sc_tx_free > 0) 501 1.36 thorpej sc->sc_txbusy = false; 502 1.1 ryo 503 1.1 ryo /* 504 1.1 ryo * No more pending TX descriptor, 505 1.1 ryo * cancel the watchdog timer. 506 1.1 ryo */ 507 1.1 ryo if (sc->sc_tx_free == ENET_TX_RING_CNT) 508 1.1 ryo ifp->if_timer = 0; 509 1.1 ryo 510 1.1 ryo return 1; 511 1.1 ryo } 512 1.1 ryo 513 1.1 ryo static int 514 1.1 ryo enet_rx_intr(void *arg) 515 1.1 ryo { 516 1.1 ryo struct enet_softc *sc; 517 1.1 ryo struct ifnet *ifp; 518 1.1 ryo struct enet_rxsoft *rxs; 519 1.1 ryo int idx, len, amount; 520 1.1 ryo uint32_t flags1, flags2; 521 1.1 ryo struct mbuf *m, *m0, *mprev; 522 1.1 ryo 523 1.1 ryo sc = arg; 524 1.1 ryo ifp = &sc->sc_ethercom.ec_if; 525 1.1 ryo 526 1.1 ryo m0 = mprev = NULL; 527 1.1 ryo amount = 0; 528 1.1 ryo for (idx = sc->sc_rx_readidx; ; idx = ENET_RX_NEXTIDX(idx)) { 529 1.1 ryo 530 1.1 ryo rxs = &sc->sc_rxsoft[idx]; 531 1.1 ryo 532 1.1 ryo RXDESC_READIN(idx); 533 1.1 ryo if (sc->sc_rxdesc_ring[idx].rx_flags1_len & RXFLAGS1_E) { 534 1.1 ryo /* This RX Descriptor has not been received yet */ 535 1.1 ryo break; 536 1.1 ryo } 537 1.1 ryo 538 1.1 ryo /* 539 1.1 ryo * build mbuf from RX Descriptor if needed 540 1.1 ryo */ 541 1.1 ryo m = rxs->rxs_mbuf; 542 1.1 ryo rxs->rxs_mbuf = NULL; 543 1.1 ryo 544 1.1 ryo flags1 = sc->sc_rxdesc_ring[idx].rx_flags1_len; 545 1.1 ryo len = RXFLAGS1_LEN(flags1); 546 1.1 ryo 547 1.1 ryo #define RACC_SHIFT16 2 548 1.1 ryo if (m0 == NULL) { 549 1.1 ryo m0 = m; 550 1.1 ryo m_adj(m0, RACC_SHIFT16); 551 1.1 ryo len -= RACC_SHIFT16; 552 1.1 ryo m->m_len = len; 553 1.1 ryo amount = len; 554 1.1 ryo } else { 555 1.1 ryo if (flags1 & RXFLAGS1_L) 556 1.1 ryo len = len - amount - RACC_SHIFT16; 557 1.1 ryo 558 1.1 ryo m->m_len = len; 559 1.1 ryo amount += len; 560 1.13 maxv if (m->m_flags & M_PKTHDR) 561 1.13 maxv m_remove_pkthdr(m); 562 1.1 ryo mprev->m_next = m; 563 1.1 ryo } 564 1.1 ryo mprev = m; 565 1.1 ryo 566 1.1 ryo flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 567 1.1 ryo 568 1.1 ryo if (flags1 & RXFLAGS1_L) { 569 1.1 ryo /* last buffer */ 570 1.1 ryo if ((amount < ETHER_HDR_LEN) || 571 1.1 ryo ((flags1 & (RXFLAGS1_LG | RXFLAGS1_NO | 572 1.1 ryo RXFLAGS1_CR | RXFLAGS1_OV | RXFLAGS1_TR)) || 573 1.1 ryo (flags2 & (RXFLAGS2_ME | RXFLAGS2_PE | 574 1.1 ryo RXFLAGS2_CE)))) { 575 1.1 ryo 576 1.1 ryo #ifdef DEBUG_ENET 577 1.1 ryo if (enet_debug) { 578 1.1 ryo char flags1buf[128], flags2buf[128]; 579 1.1 ryo snprintb(flags1buf, sizeof(flags1buf), 580 1.1 ryo "\20" "\31MISS" "\26LENGTHVIOLATION" 581 1.2 ryo "\25NONOCTET" "\23CRC" "\22OVERRUN" 582 1.1 ryo "\21TRUNCATED", flags1); 583 1.1 ryo snprintb(flags2buf, sizeof(flags2buf), 584 1.1 ryo "\20" "\40MAC" "\33PHY" 585 1.1 ryo "\32COLLISION", flags2); 586 1.1 ryo 587 1.1 ryo DEVICE_DPRINTF( 588 1.1 ryo "rxdesc[%d]: receive error: " 589 1.1 ryo "flags1=%s,flags2=%s,len=%d\n", 590 1.1 ryo idx, flags1buf, flags2buf, amount); 591 1.1 ryo } 592 1.1 ryo #endif /* DEBUG_ENET */ 593 1.31 thorpej if_statinc(ifp, if_ierrors); 594 1.1 ryo m_freem(m0); 595 1.1 ryo 596 1.1 ryo } else { 597 1.1 ryo /* packet receive ok */ 598 1.7 ozaki m_set_rcvif(m0, ifp); 599 1.1 ryo m0->m_pkthdr.len = amount; 600 1.1 ryo 601 1.1 ryo bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 602 1.1 ryo rxs->rxs_dmamap->dm_mapsize, 603 1.1 ryo BUS_DMASYNC_PREREAD); 604 1.1 ryo 605 1.1 ryo if (ifp->if_csum_flags_rx & (M_CSUM_IPv4 | 606 1.1 ryo M_CSUM_TCPv4 | M_CSUM_UDPv4 | 607 1.1 ryo M_CSUM_TCPv6 | M_CSUM_UDPv6)) 608 1.1 ryo enet_rx_csum(sc, ifp, m0, idx); 609 1.1 ryo 610 1.5 ozaki if_percpuq_enqueue(ifp->if_percpuq, m0); 611 1.1 ryo } 612 1.1 ryo 613 1.1 ryo m0 = NULL; 614 1.1 ryo mprev = NULL; 615 1.1 ryo amount = 0; 616 1.1 ryo 617 1.1 ryo } else { 618 1.1 ryo /* continued from previous buffer */ 619 1.1 ryo bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 620 1.1 ryo rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 621 1.1 ryo } 622 1.1 ryo 623 1.1 ryo bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 624 1.1 ryo if (enet_alloc_rxbuf(sc, idx) != 0) { 625 1.1 ryo panic("enet_alloc_rxbuf NULL\n"); 626 1.1 ryo } 627 1.1 ryo } 628 1.1 ryo sc->sc_rx_readidx = idx; 629 1.1 ryo 630 1.1 ryo /* re-enable RX DMA to make sure */ 631 1.1 ryo ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 632 1.1 ryo 633 1.1 ryo return 1; 634 1.1 ryo } 635 1.1 ryo 636 1.1 ryo static void 637 1.1 ryo enet_rx_csum(struct enet_softc *sc, struct ifnet *ifp, struct mbuf *m, int idx) 638 1.1 ryo { 639 1.1 ryo uint32_t flags2; 640 1.1 ryo uint8_t proto; 641 1.1 ryo 642 1.1 ryo flags2 = sc->sc_rxdesc_ring[idx].rx_flags2; 643 1.1 ryo 644 1.1 ryo if (flags2 & RXFLAGS2_IPV6) { 645 1.1 ryo proto = sc->sc_rxdesc_ring[idx].rx_proto; 646 1.1 ryo 647 1.1 ryo /* RXFLAGS2_PCR is valid when IPv6 and TCP/UDP */ 648 1.1 ryo if ((proto == IPPROTO_TCP) && 649 1.1 ryo (ifp->if_csum_flags_rx & M_CSUM_TCPv6)) 650 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCPv6; 651 1.1 ryo else if ((proto == IPPROTO_UDP) && 652 1.1 ryo (ifp->if_csum_flags_rx & M_CSUM_UDPv6)) 653 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_UDPv6; 654 1.1 ryo else 655 1.1 ryo return; 656 1.1 ryo 657 1.1 ryo /* IPv6 protocol checksum error */ 658 1.1 ryo if (flags2 & RXFLAGS2_PCR) 659 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 660 1.1 ryo 661 1.1 ryo } else { 662 1.1 ryo struct ether_header *eh; 663 1.1 ryo uint8_t *ip; 664 1.1 ryo 665 1.1 ryo eh = mtod(m, struct ether_header *); 666 1.1 ryo 667 1.1 ryo /* XXX: is an IPv4? */ 668 1.1 ryo if (ntohs(eh->ether_type) != ETHERTYPE_IP) 669 1.1 ryo return; 670 1.1 ryo ip = (uint8_t *)(eh + 1); 671 1.1 ryo if ((ip[0] & 0xf0) == 0x40) 672 1.1 ryo return; 673 1.1 ryo 674 1.1 ryo proto = sc->sc_rxdesc_ring[idx].rx_proto; 675 1.1 ryo if (flags2 & RXFLAGS2_ICE) { 676 1.1 ryo if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 677 1.1 ryo m->m_pkthdr.csum_flags |= 678 1.1 ryo M_CSUM_IPv4 | M_CSUM_IPv4_BAD; 679 1.1 ryo } 680 1.1 ryo } else { 681 1.1 ryo if (ifp->if_csum_flags_rx & M_CSUM_IPv4) { 682 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 683 1.1 ryo } 684 1.1 ryo 685 1.1 ryo /* 686 1.1 ryo * PCR is valid when 687 1.1 ryo * ICE == 0 and FRAG == 0 688 1.1 ryo */ 689 1.1 ryo if (flags2 & RXFLAGS2_FRAG) 690 1.1 ryo return; 691 1.1 ryo 692 1.1 ryo /* 693 1.1 ryo * PCR is valid when proto is TCP or UDP 694 1.1 ryo */ 695 1.1 ryo if ((proto == IPPROTO_TCP) && 696 1.1 ryo (ifp->if_csum_flags_rx & M_CSUM_TCPv4)) 697 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 698 1.1 ryo else if ((proto == IPPROTO_UDP) && 699 1.1 ryo (ifp->if_csum_flags_rx & M_CSUM_UDPv4)) 700 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 701 1.1 ryo else 702 1.1 ryo return; 703 1.1 ryo 704 1.1 ryo /* IPv4 protocol cksum error */ 705 1.1 ryo if (flags2 & RXFLAGS2_PCR) 706 1.1 ryo m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 707 1.1 ryo } 708 1.1 ryo } 709 1.1 ryo } 710 1.1 ryo 711 1.1 ryo static void 712 1.1 ryo enet_setmulti(struct enet_softc *sc) 713 1.1 ryo { 714 1.20 msaitoh struct ethercom *ec = &sc->sc_ethercom; 715 1.20 msaitoh struct ifnet *ifp = &ec->ec_if; 716 1.1 ryo struct ether_multi *enm; 717 1.1 ryo struct ether_multistep step; 718 1.29 ryo uint32_t crc, hashidx; 719 1.1 ryo uint32_t gaddr[2]; 720 1.1 ryo 721 1.29 ryo if (ifp->if_flags & IFF_PROMISC) { 722 1.29 ryo /* receive all unicast packet */ 723 1.29 ryo ENET_REG_WRITE(sc, ENET_IAUR, 0xffffffff); 724 1.29 ryo ENET_REG_WRITE(sc, ENET_IALR, 0xffffffff); 725 1.29 ryo /* receive all multicast packet */ 726 1.1 ryo gaddr[0] = gaddr[1] = 0xffffffff; 727 1.1 ryo } else { 728 1.1 ryo gaddr[0] = gaddr[1] = 0; 729 1.1 ryo 730 1.23 msaitoh ETHER_LOCK(ec); 731 1.20 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 732 1.1 ryo while (enm != NULL) { 733 1.29 ryo if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 734 1.29 ryo ETHER_ADDR_LEN)) { 735 1.29 ryo /* 736 1.29 ryo * if specified by range, give up setting hash, 737 1.29 ryo * and fallback to allmulti. 738 1.29 ryo */ 739 1.29 ryo gaddr[0] = gaddr[1] = 0xffffffff; 740 1.29 ryo break; 741 1.29 ryo } 742 1.29 ryo 743 1.1 ryo crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 744 1.29 ryo hashidx = __SHIFTOUT(crc, __BITS(30,26)); 745 1.29 ryo gaddr[__SHIFTOUT(crc, __BIT(31))] |= __BIT(hashidx); 746 1.29 ryo 747 1.1 ryo ETHER_NEXT_MULTI(step, enm); 748 1.1 ryo } 749 1.23 msaitoh ETHER_UNLOCK(ec); 750 1.1 ryo 751 1.29 ryo /* dont't receive any unicast packet (except own address) */ 752 1.1 ryo ENET_REG_WRITE(sc, ENET_IAUR, 0); 753 1.1 ryo ENET_REG_WRITE(sc, ENET_IALR, 0); 754 1.1 ryo } 755 1.29 ryo 756 1.29 ryo if (gaddr[0] == 0xffffffff && gaddr[1] == 0xffffffff) 757 1.29 ryo ifp->if_flags |= IFF_ALLMULTI; 758 1.29 ryo else 759 1.29 ryo ifp->if_flags &= ~IFF_ALLMULTI; 760 1.29 ryo 761 1.29 ryo /* receive multicast packets according to multicast filter */ 762 1.29 ryo ENET_REG_WRITE(sc, ENET_GAUR, gaddr[1]); 763 1.29 ryo ENET_REG_WRITE(sc, ENET_GALR, gaddr[0]); 764 1.29 ryo 765 1.1 ryo } 766 1.1 ryo 767 1.1 ryo static void 768 1.1 ryo enet_gethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 769 1.1 ryo { 770 1.1 ryo uint32_t paddr; 771 1.1 ryo 772 1.1 ryo paddr = ENET_REG_READ(sc, ENET_PALR); 773 1.1 ryo hwaddr[0] = paddr >> 24; 774 1.1 ryo hwaddr[1] = paddr >> 16; 775 1.1 ryo hwaddr[2] = paddr >> 8; 776 1.1 ryo hwaddr[3] = paddr; 777 1.1 ryo 778 1.1 ryo paddr = ENET_REG_READ(sc, ENET_PAUR); 779 1.1 ryo hwaddr[4] = paddr >> 24; 780 1.1 ryo hwaddr[5] = paddr >> 16; 781 1.1 ryo } 782 1.1 ryo 783 1.1 ryo static void 784 1.1 ryo enet_sethwaddr(struct enet_softc *sc, uint8_t *hwaddr) 785 1.1 ryo { 786 1.1 ryo uint32_t paddr; 787 1.1 ryo 788 1.1 ryo paddr = (hwaddr[0] << 24) | (hwaddr[1] << 16) | (hwaddr[2] << 8) | 789 1.1 ryo hwaddr[3]; 790 1.1 ryo ENET_REG_WRITE(sc, ENET_PALR, paddr); 791 1.1 ryo paddr = (hwaddr[4] << 24) | (hwaddr[5] << 16); 792 1.1 ryo ENET_REG_WRITE(sc, ENET_PAUR, paddr); 793 1.1 ryo } 794 1.1 ryo 795 1.1 ryo /* 796 1.1 ryo * ifnet interfaces 797 1.1 ryo */ 798 1.1 ryo static int 799 1.1 ryo enet_init(struct ifnet *ifp) 800 1.1 ryo { 801 1.1 ryo struct enet_softc *sc; 802 1.1 ryo int s, error; 803 1.1 ryo 804 1.1 ryo sc = ifp->if_softc; 805 1.1 ryo 806 1.1 ryo s = splnet(); 807 1.1 ryo 808 1.1 ryo enet_init_regs(sc, 0); 809 1.1 ryo enet_init_txring(sc); 810 1.1 ryo error = enet_init_rxring(sc); 811 1.1 ryo if (error != 0) { 812 1.1 ryo enet_drain_rxbuf(sc); 813 1.1 ryo device_printf(sc->sc_dev, "Cannot allocate mbuf cluster\n"); 814 1.1 ryo goto init_failure; 815 1.1 ryo } 816 1.1 ryo 817 1.1 ryo /* reload mac address */ 818 1.1 ryo memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN); 819 1.1 ryo enet_sethwaddr(sc, sc->sc_enaddr); 820 1.1 ryo 821 1.1 ryo /* program multicast address */ 822 1.1 ryo enet_setmulti(sc); 823 1.1 ryo 824 1.1 ryo /* update if_flags */ 825 1.1 ryo ifp->if_flags |= IFF_RUNNING; 826 1.36 thorpej sc->sc_txbusy = false; 827 1.1 ryo 828 1.1 ryo /* update local copy of if_flags */ 829 1.1 ryo sc->sc_if_flags = ifp->if_flags; 830 1.1 ryo 831 1.1 ryo /* mii */ 832 1.1 ryo mii_mediachg(&sc->sc_mii); 833 1.1 ryo 834 1.1 ryo /* enable RX DMA */ 835 1.1 ryo ENET_REG_WRITE(sc, ENET_RDAR, ENET_RDAR_ACTIVE); 836 1.1 ryo 837 1.1 ryo sc->sc_stopping = false; 838 1.6 ryo callout_schedule(&sc->sc_tick_ch, ENET_TICK); 839 1.1 ryo 840 1.1 ryo init_failure: 841 1.1 ryo splx(s); 842 1.1 ryo 843 1.1 ryo return error; 844 1.1 ryo } 845 1.1 ryo 846 1.1 ryo static void 847 1.1 ryo enet_start(struct ifnet *ifp) 848 1.1 ryo { 849 1.1 ryo struct enet_softc *sc; 850 1.1 ryo struct mbuf *m; 851 1.1 ryo int npkt; 852 1.1 ryo 853 1.36 thorpej if ((ifp->if_flags & IFF_RUNNING) == 0) 854 1.1 ryo return; 855 1.1 ryo 856 1.1 ryo sc = ifp->if_softc; 857 1.36 thorpej for (npkt = 0; !sc->sc_txbusy; npkt++) { 858 1.1 ryo IFQ_POLL(&ifp->if_snd, m); 859 1.1 ryo if (m == NULL) 860 1.1 ryo break; 861 1.1 ryo 862 1.1 ryo if (sc->sc_tx_free <= 0) { 863 1.1 ryo /* no tx descriptor now... */ 864 1.36 thorpej sc->sc_txbusy = true; 865 1.1 ryo DEVICE_DPRINTF("TX descriptor is full\n"); 866 1.1 ryo break; 867 1.1 ryo } 868 1.1 ryo 869 1.1 ryo IFQ_DEQUEUE(&ifp->if_snd, m); 870 1.1 ryo 871 1.1 ryo if (enet_encap_txring(sc, &m) != 0) { 872 1.1 ryo /* too many mbuf chains? */ 873 1.36 thorpej sc->sc_txbusy = true; 874 1.1 ryo DEVICE_DPRINTF( 875 1.1 ryo "TX descriptor is full. dropping packet\n"); 876 1.1 ryo m_freem(m); 877 1.31 thorpej if_statinc(ifp, if_oerrors); 878 1.1 ryo break; 879 1.1 ryo } 880 1.1 ryo 881 1.1 ryo /* Pass the packet to any BPF listeners */ 882 1.14 msaitoh bpf_mtap(ifp, m, BPF_D_OUT); 883 1.1 ryo } 884 1.1 ryo 885 1.1 ryo if (npkt) { 886 1.1 ryo /* enable TX DMA */ 887 1.1 ryo ENET_REG_WRITE(sc, ENET_TDAR, ENET_TDAR_ACTIVE); 888 1.1 ryo 889 1.1 ryo ifp->if_timer = 5; 890 1.1 ryo } 891 1.1 ryo } 892 1.1 ryo 893 1.1 ryo static void 894 1.1 ryo enet_stop(struct ifnet *ifp, int disable) 895 1.1 ryo { 896 1.1 ryo struct enet_softc *sc; 897 1.1 ryo int s; 898 1.1 ryo uint32_t v; 899 1.1 ryo 900 1.1 ryo sc = ifp->if_softc; 901 1.1 ryo 902 1.1 ryo s = splnet(); 903 1.1 ryo 904 1.1 ryo sc->sc_stopping = true; 905 1.1 ryo callout_stop(&sc->sc_tick_ch); 906 1.1 ryo 907 1.1 ryo /* clear ENET_ECR[ETHEREN] to abort receive and transmit */ 908 1.1 ryo v = ENET_REG_READ(sc, ENET_ECR); 909 1.1 ryo ENET_REG_WRITE(sc, ENET_ECR, v & ~ENET_ECR_ETHEREN); 910 1.1 ryo 911 1.1 ryo /* Mark the interface as down and cancel the watchdog timer. */ 912 1.36 thorpej ifp->if_flags &= ~IFF_RUNNING; 913 1.1 ryo ifp->if_timer = 0; 914 1.36 thorpej sc->sc_txbusy = false; 915 1.1 ryo 916 1.1 ryo if (disable) { 917 1.1 ryo enet_drain_txbuf(sc); 918 1.1 ryo enet_drain_rxbuf(sc); 919 1.1 ryo } 920 1.1 ryo 921 1.1 ryo splx(s); 922 1.1 ryo } 923 1.1 ryo 924 1.1 ryo static void 925 1.1 ryo enet_watchdog(struct ifnet *ifp) 926 1.1 ryo { 927 1.1 ryo struct enet_softc *sc; 928 1.1 ryo int s; 929 1.1 ryo 930 1.1 ryo sc = ifp->if_softc; 931 1.1 ryo s = splnet(); 932 1.1 ryo 933 1.1 ryo device_printf(sc->sc_dev, "watchdog timeout\n"); 934 1.31 thorpej if_statinc(ifp, if_oerrors); 935 1.1 ryo 936 1.1 ryo /* salvage packets left in descriptors */ 937 1.1 ryo enet_tx_intr(sc); 938 1.1 ryo enet_rx_intr(sc); 939 1.1 ryo 940 1.1 ryo /* reset */ 941 1.1 ryo enet_stop(ifp, 1); 942 1.1 ryo enet_init(ifp); 943 1.1 ryo 944 1.1 ryo splx(s); 945 1.1 ryo } 946 1.1 ryo 947 1.1 ryo static void 948 1.1 ryo enet_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 949 1.1 ryo { 950 1.1 ryo struct enet_softc *sc = ifp->if_softc; 951 1.1 ryo 952 1.1 ryo ether_mediastatus(ifp, ifmr); 953 1.1 ryo ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) 954 1.1 ryo | sc->sc_flowflags; 955 1.1 ryo } 956 1.1 ryo 957 1.1 ryo static int 958 1.1 ryo enet_ifflags_cb(struct ethercom *ec) 959 1.1 ryo { 960 1.1 ryo struct ifnet *ifp = &ec->ec_if; 961 1.1 ryo struct enet_softc *sc = ifp->if_softc; 962 1.26 msaitoh u_short change = ifp->if_flags ^ sc->sc_if_flags; 963 1.1 ryo 964 1.1 ryo if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 965 1.1 ryo return ENETRESET; 966 1.1 ryo else if ((change & (IFF_PROMISC | IFF_ALLMULTI)) == 0) 967 1.1 ryo return 0; 968 1.1 ryo 969 1.1 ryo enet_setmulti(sc); 970 1.1 ryo 971 1.1 ryo sc->sc_if_flags = ifp->if_flags; 972 1.1 ryo return 0; 973 1.1 ryo } 974 1.1 ryo 975 1.1 ryo static int 976 1.1 ryo enet_ioctl(struct ifnet *ifp, u_long command, void *data) 977 1.1 ryo { 978 1.1 ryo struct enet_softc *sc; 979 1.1 ryo struct ifreq *ifr; 980 1.1 ryo int s, error; 981 1.1 ryo uint32_t v; 982 1.1 ryo 983 1.1 ryo sc = ifp->if_softc; 984 1.1 ryo ifr = data; 985 1.1 ryo 986 1.1 ryo error = 0; 987 1.1 ryo 988 1.1 ryo s = splnet(); 989 1.1 ryo 990 1.1 ryo switch (command) { 991 1.1 ryo case SIOCSIFMTU: 992 1.1 ryo if (MTU2FRAMESIZE(ifr->ifr_mtu) > ENET_MAX_PKT_LEN) { 993 1.1 ryo error = EINVAL; 994 1.1 ryo } else { 995 1.1 ryo ifp->if_mtu = ifr->ifr_mtu; 996 1.1 ryo 997 1.1 ryo /* set maximum frame length */ 998 1.1 ryo v = MTU2FRAMESIZE(ifr->ifr_mtu); 999 1.1 ryo ENET_REG_WRITE(sc, ENET_FTRL, v); 1000 1.1 ryo v = ENET_REG_READ(sc, ENET_RCR); 1001 1.1 ryo v &= ~ENET_RCR_MAX_FL(0x3fff); 1002 1.11 ryo v |= ENET_RCR_MAX_FL(ifp->if_mtu + ETHER_HDR_LEN + 1003 1.11 ryo ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); 1004 1.1 ryo ENET_REG_WRITE(sc, ENET_RCR, v); 1005 1.1 ryo } 1006 1.1 ryo break; 1007 1.1 ryo case SIOCSIFMEDIA: 1008 1.1 ryo /* Flow control requires full-duplex mode. */ 1009 1.1 ryo if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1010 1.1 ryo (ifr->ifr_media & IFM_FDX) == 0) 1011 1.1 ryo ifr->ifr_media &= ~IFM_ETH_FMASK; 1012 1.1 ryo if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1013 1.1 ryo if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1014 1.1 ryo /* We can do both TXPAUSE and RXPAUSE. */ 1015 1.1 ryo ifr->ifr_media |= 1016 1.1 ryo IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1017 1.1 ryo } 1018 1.1 ryo sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1019 1.1 ryo } 1020 1.1 ryo error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1021 1.1 ryo break; 1022 1.1 ryo default: 1023 1.1 ryo error = ether_ioctl(ifp, command, data); 1024 1.1 ryo if (error != ENETRESET) 1025 1.1 ryo break; 1026 1.1 ryo 1027 1.1 ryo /* post-process */ 1028 1.1 ryo error = 0; 1029 1.1 ryo switch (command) { 1030 1.1 ryo case SIOCSIFCAP: 1031 1.34 riastrad error = if_init(ifp); 1032 1.1 ryo break; 1033 1.1 ryo case SIOCADDMULTI: 1034 1.1 ryo case SIOCDELMULTI: 1035 1.1 ryo if (ifp->if_flags & IFF_RUNNING) 1036 1.1 ryo enet_setmulti(sc); 1037 1.1 ryo break; 1038 1.1 ryo } 1039 1.1 ryo break; 1040 1.1 ryo } 1041 1.1 ryo 1042 1.1 ryo splx(s); 1043 1.1 ryo 1044 1.1 ryo return error; 1045 1.1 ryo } 1046 1.1 ryo 1047 1.1 ryo /* 1048 1.1 ryo * for MII 1049 1.1 ryo */ 1050 1.1 ryo static int 1051 1.17 msaitoh enet_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 1052 1.1 ryo { 1053 1.1 ryo struct enet_softc *sc; 1054 1.1 ryo int timeout; 1055 1.17 msaitoh uint32_t status; 1056 1.1 ryo 1057 1.1 ryo sc = device_private(dev); 1058 1.1 ryo 1059 1.1 ryo /* clear MII update */ 1060 1.1 ryo ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1061 1.1 ryo 1062 1.1 ryo /* read command */ 1063 1.1 ryo ENET_REG_WRITE(sc, ENET_MMFR, 1064 1.1 ryo ENET_MMFR_ST | ENET_MMFR_OP_READ | ENET_MMFR_TA | 1065 1.1 ryo ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy)); 1066 1.1 ryo 1067 1.1 ryo /* check MII update */ 1068 1.1 ryo for (timeout = 5000; timeout > 0; --timeout) { 1069 1.1 ryo status = ENET_REG_READ(sc, ENET_EIR); 1070 1.1 ryo if (status & ENET_EIR_MII) 1071 1.1 ryo break; 1072 1.1 ryo } 1073 1.1 ryo if (timeout <= 0) { 1074 1.1 ryo DEVICE_DPRINTF("MII read timeout: reg=0x%02x\n", 1075 1.1 ryo reg); 1076 1.17 msaitoh return ETIMEDOUT; 1077 1.17 msaitoh } else 1078 1.17 msaitoh *val = ENET_REG_READ(sc, ENET_MMFR) & ENET_MMFR_DATAMASK; 1079 1.1 ryo 1080 1.17 msaitoh return 0; 1081 1.1 ryo } 1082 1.1 ryo 1083 1.17 msaitoh static int 1084 1.17 msaitoh enet_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 1085 1.1 ryo { 1086 1.1 ryo struct enet_softc *sc; 1087 1.1 ryo int timeout; 1088 1.1 ryo 1089 1.1 ryo sc = device_private(dev); 1090 1.1 ryo 1091 1.1 ryo /* clear MII update */ 1092 1.1 ryo ENET_REG_WRITE(sc, ENET_EIR, ENET_EIR_MII); 1093 1.1 ryo 1094 1.1 ryo /* write command */ 1095 1.1 ryo ENET_REG_WRITE(sc, ENET_MMFR, 1096 1.1 ryo ENET_MMFR_ST | ENET_MMFR_OP_WRITE | ENET_MMFR_TA | 1097 1.1 ryo ENET_MMFR_PHY_REG(reg) | ENET_MMFR_PHY_ADDR(phy) | 1098 1.1 ryo (ENET_MMFR_DATAMASK & val)); 1099 1.1 ryo 1100 1.1 ryo /* check MII update */ 1101 1.1 ryo for (timeout = 5000; timeout > 0; --timeout) { 1102 1.1 ryo if (ENET_REG_READ(sc, ENET_EIR) & ENET_EIR_MII) 1103 1.1 ryo break; 1104 1.1 ryo } 1105 1.1 ryo if (timeout <= 0) { 1106 1.17 msaitoh DEVICE_DPRINTF("MII write timeout: reg=0x%02x\n", reg); 1107 1.17 msaitoh return ETIMEDOUT; 1108 1.1 ryo } 1109 1.17 msaitoh 1110 1.17 msaitoh return 0; 1111 1.1 ryo } 1112 1.1 ryo 1113 1.1 ryo static void 1114 1.1 ryo enet_miibus_statchg(struct ifnet *ifp) 1115 1.1 ryo { 1116 1.1 ryo struct enet_softc *sc; 1117 1.1 ryo struct mii_data *mii; 1118 1.1 ryo struct ifmedia_entry *ife; 1119 1.1 ryo uint32_t ecr, ecr0; 1120 1.1 ryo uint32_t rcr, rcr0; 1121 1.1 ryo uint32_t tcr, tcr0; 1122 1.1 ryo 1123 1.1 ryo sc = ifp->if_softc; 1124 1.1 ryo mii = &sc->sc_mii; 1125 1.1 ryo ife = mii->mii_media.ifm_cur; 1126 1.1 ryo 1127 1.1 ryo /* get current status */ 1128 1.1 ryo ecr0 = ecr = ENET_REG_READ(sc, ENET_ECR) & ~ENET_ECR_RESET; 1129 1.1 ryo rcr0 = rcr = ENET_REG_READ(sc, ENET_RCR); 1130 1.1 ryo tcr0 = tcr = ENET_REG_READ(sc, ENET_TCR); 1131 1.1 ryo 1132 1.1 ryo if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 1133 1.1 ryo (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 1134 1.1 ryo sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 1135 1.1 ryo mii->mii_media_active &= ~IFM_ETH_FMASK; 1136 1.1 ryo } 1137 1.1 ryo 1138 1.18 msaitoh if ((ife->ifm_media & IFM_FDX) != 0) { 1139 1.1 ryo tcr |= ENET_TCR_FDEN; /* full duplex */ 1140 1.32 msaitoh rcr &= ~ENET_RCR_DRT; /* enable receive on transmit */ 1141 1.1 ryo } else { 1142 1.1 ryo tcr &= ~ENET_TCR_FDEN; /* half duplex */ 1143 1.1 ryo rcr |= ENET_RCR_DRT; /* disable receive on transmit */ 1144 1.1 ryo } 1145 1.1 ryo 1146 1.1 ryo if ((tcr ^ tcr0) & ENET_TCR_FDEN) { 1147 1.1 ryo /* 1148 1.1 ryo * need to reset because 1149 1.1 ryo * FDEN can change when ECR[ETHEREN] is 0 1150 1.1 ryo */ 1151 1.1 ryo enet_init_regs(sc, 0); 1152 1.1 ryo return; 1153 1.1 ryo } 1154 1.1 ryo 1155 1.1 ryo switch (IFM_SUBTYPE(ife->ifm_media)) { 1156 1.1 ryo case IFM_AUTO: 1157 1.1 ryo case IFM_1000_T: 1158 1.1 ryo ecr |= ENET_ECR_SPEED; /* 1000Mbps mode */ 1159 1.11 ryo rcr &= ~ENET_RCR_RMII_10T; 1160 1.1 ryo break; 1161 1.1 ryo case IFM_100_TX: 1162 1.1 ryo ecr &= ~ENET_ECR_SPEED; /* 100Mbps mode */ 1163 1.1 ryo rcr &= ~ENET_RCR_RMII_10T; /* 100Mbps mode */ 1164 1.1 ryo break; 1165 1.1 ryo case IFM_10_T: 1166 1.1 ryo ecr &= ~ENET_ECR_SPEED; /* 10Mbps mode */ 1167 1.1 ryo rcr |= ENET_RCR_RMII_10T; /* 10Mbps mode */ 1168 1.1 ryo break; 1169 1.1 ryo default: 1170 1.1 ryo ecr = ecr0; 1171 1.1 ryo rcr = rcr0; 1172 1.1 ryo tcr = tcr0; 1173 1.1 ryo break; 1174 1.1 ryo } 1175 1.1 ryo 1176 1.11 ryo if (sc->sc_rgmii == 0) 1177 1.11 ryo ecr &= ~ENET_ECR_SPEED; 1178 1.11 ryo 1179 1.1 ryo if (sc->sc_flowflags & IFM_FLOW) 1180 1.1 ryo rcr |= ENET_RCR_FCE; 1181 1.1 ryo else 1182 1.1 ryo rcr &= ~ENET_RCR_FCE; 1183 1.1 ryo 1184 1.1 ryo /* update registers if need change */ 1185 1.1 ryo if (ecr != ecr0) 1186 1.1 ryo ENET_REG_WRITE(sc, ENET_ECR, ecr); 1187 1.1 ryo if (rcr != rcr0) 1188 1.1 ryo ENET_REG_WRITE(sc, ENET_RCR, rcr); 1189 1.1 ryo if (tcr != tcr0) 1190 1.1 ryo ENET_REG_WRITE(sc, ENET_TCR, tcr); 1191 1.1 ryo } 1192 1.1 ryo 1193 1.1 ryo /* 1194 1.1 ryo * handling descriptors 1195 1.1 ryo */ 1196 1.1 ryo static void 1197 1.1 ryo enet_init_txring(struct enet_softc *sc) 1198 1.1 ryo { 1199 1.1 ryo int i; 1200 1.1 ryo 1201 1.1 ryo /* build TX ring */ 1202 1.1 ryo for (i = 0; i < ENET_TX_RING_CNT; i++) { 1203 1.11 ryo sc->sc_txdesc_ring[i].tx_flags1_len = 1204 1.1 ryo ((i == (ENET_TX_RING_CNT - 1)) ? TXFLAGS1_W : 0); 1205 1.1 ryo sc->sc_txdesc_ring[i].tx_databuf = 0; 1206 1.1 ryo sc->sc_txdesc_ring[i].tx_flags2 = TXFLAGS2_INT; 1207 1.1 ryo sc->sc_txdesc_ring[i].tx__reserved1 = 0; 1208 1.1 ryo sc->sc_txdesc_ring[i].tx_flags3 = 0; 1209 1.1 ryo sc->sc_txdesc_ring[i].tx_1588timestamp = 0; 1210 1.1 ryo sc->sc_txdesc_ring[i].tx__reserved2 = 0; 1211 1.1 ryo sc->sc_txdesc_ring[i].tx__reserved3 = 0; 1212 1.1 ryo 1213 1.1 ryo TXDESC_WRITEOUT(i); 1214 1.1 ryo } 1215 1.1 ryo 1216 1.1 ryo sc->sc_tx_free = ENET_TX_RING_CNT; 1217 1.1 ryo sc->sc_tx_considx = 0; 1218 1.1 ryo sc->sc_tx_prodidx = 0; 1219 1.1 ryo } 1220 1.1 ryo 1221 1.1 ryo static int 1222 1.1 ryo enet_init_rxring(struct enet_softc *sc) 1223 1.1 ryo { 1224 1.1 ryo int i, error; 1225 1.1 ryo 1226 1.1 ryo /* build RX ring */ 1227 1.1 ryo for (i = 0; i < ENET_RX_RING_CNT; i++) { 1228 1.1 ryo error = enet_alloc_rxbuf(sc, i); 1229 1.1 ryo if (error != 0) 1230 1.1 ryo return error; 1231 1.1 ryo } 1232 1.1 ryo 1233 1.1 ryo sc->sc_rx_readidx = 0; 1234 1.1 ryo 1235 1.1 ryo return 0; 1236 1.1 ryo } 1237 1.1 ryo 1238 1.1 ryo static int 1239 1.1 ryo enet_alloc_rxbuf(struct enet_softc *sc, int idx) 1240 1.1 ryo { 1241 1.1 ryo struct mbuf *m; 1242 1.1 ryo int error; 1243 1.1 ryo 1244 1.1 ryo KASSERT((idx >= 0) && (idx < ENET_RX_RING_CNT)); 1245 1.1 ryo 1246 1.1 ryo /* free mbuf if already allocated */ 1247 1.1 ryo if (sc->sc_rxsoft[idx].rxs_mbuf != NULL) { 1248 1.1 ryo bus_dmamap_unload(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap); 1249 1.1 ryo m_freem(sc->sc_rxsoft[idx].rxs_mbuf); 1250 1.1 ryo sc->sc_rxsoft[idx].rxs_mbuf = NULL; 1251 1.1 ryo } 1252 1.1 ryo 1253 1.1 ryo /* allocate new mbuf cluster */ 1254 1.1 ryo MGETHDR(m, M_DONTWAIT, MT_DATA); 1255 1.1 ryo if (m == NULL) 1256 1.1 ryo return ENOBUFS; 1257 1.1 ryo MCLGET(m, M_DONTWAIT); 1258 1.1 ryo if (!(m->m_flags & M_EXT)) { 1259 1.1 ryo m_freem(m); 1260 1.1 ryo return ENOBUFS; 1261 1.1 ryo } 1262 1.1 ryo m->m_len = MCLBYTES; 1263 1.1 ryo m->m_next = NULL; 1264 1.1 ryo 1265 1.1 ryo error = bus_dmamap_load(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 1266 1.1 ryo m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1267 1.1 ryo BUS_DMA_READ | BUS_DMA_NOWAIT); 1268 1.4 christos if (error) { 1269 1.4 christos m_freem(m); 1270 1.1 ryo return error; 1271 1.4 christos } 1272 1.1 ryo 1273 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_rxsoft[idx].rxs_dmamap, 0, 1274 1.11 ryo sc->sc_rxsoft[idx].rxs_dmamap->dm_mapsize, 1275 1.1 ryo BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1276 1.1 ryo 1277 1.1 ryo sc->sc_rxsoft[idx].rxs_mbuf = m; 1278 1.1 ryo enet_reset_rxdesc(sc, idx); 1279 1.1 ryo return 0; 1280 1.1 ryo } 1281 1.1 ryo 1282 1.1 ryo static void 1283 1.1 ryo enet_reset_rxdesc(struct enet_softc *sc, int idx) 1284 1.1 ryo { 1285 1.1 ryo uint32_t paddr; 1286 1.1 ryo 1287 1.1 ryo paddr = sc->sc_rxsoft[idx].rxs_dmamap->dm_segs[0].ds_addr; 1288 1.1 ryo 1289 1.1 ryo sc->sc_rxdesc_ring[idx].rx_flags1_len = 1290 1.1 ryo RXFLAGS1_E | 1291 1.1 ryo ((idx == (ENET_RX_RING_CNT - 1)) ? RXFLAGS1_W : 0); 1292 1.1 ryo sc->sc_rxdesc_ring[idx].rx_databuf = paddr; 1293 1.1 ryo sc->sc_rxdesc_ring[idx].rx_flags2 = 1294 1.1 ryo RXFLAGS2_INT; 1295 1.1 ryo sc->sc_rxdesc_ring[idx].rx_hl = 0; 1296 1.1 ryo sc->sc_rxdesc_ring[idx].rx_proto = 0; 1297 1.1 ryo sc->sc_rxdesc_ring[idx].rx_cksum = 0; 1298 1.1 ryo sc->sc_rxdesc_ring[idx].rx_flags3 = 0; 1299 1.1 ryo sc->sc_rxdesc_ring[idx].rx_1588timestamp = 0; 1300 1.1 ryo sc->sc_rxdesc_ring[idx].rx__reserved2 = 0; 1301 1.1 ryo sc->sc_rxdesc_ring[idx].rx__reserved3 = 0; 1302 1.1 ryo 1303 1.1 ryo RXDESC_WRITEOUT(idx); 1304 1.1 ryo } 1305 1.1 ryo 1306 1.1 ryo static void 1307 1.1 ryo enet_drain_txbuf(struct enet_softc *sc) 1308 1.1 ryo { 1309 1.1 ryo int idx; 1310 1.1 ryo struct enet_txsoft *txs; 1311 1.1 ryo struct ifnet *ifp; 1312 1.1 ryo 1313 1.1 ryo ifp = &sc->sc_ethercom.ec_if; 1314 1.1 ryo 1315 1.1 ryo for (idx = sc->sc_tx_considx; idx != sc->sc_tx_prodidx; 1316 1.1 ryo idx = ENET_TX_NEXTIDX(idx)) { 1317 1.1 ryo 1318 1.1 ryo /* txsoft[] is used only first segment */ 1319 1.1 ryo txs = &sc->sc_txsoft[idx]; 1320 1.1 ryo TXDESC_READIN(idx); 1321 1.1 ryo if (sc->sc_txdesc_ring[idx].tx_flags1_len & TXFLAGS1_T1) { 1322 1.1 ryo sc->sc_txdesc_ring[idx].tx_flags1_len = 0; 1323 1.1 ryo bus_dmamap_unload(sc->sc_dmat, 1324 1.1 ryo txs->txs_dmamap); 1325 1.1 ryo m_freem(txs->txs_mbuf); 1326 1.1 ryo 1327 1.31 thorpej if_statinc(ifp, if_oerrors); 1328 1.1 ryo } 1329 1.1 ryo sc->sc_tx_free++; 1330 1.1 ryo } 1331 1.1 ryo } 1332 1.1 ryo 1333 1.1 ryo static void 1334 1.1 ryo enet_drain_rxbuf(struct enet_softc *sc) 1335 1.1 ryo { 1336 1.1 ryo int i; 1337 1.1 ryo 1338 1.1 ryo for (i = 0; i < ENET_RX_RING_CNT; i++) { 1339 1.1 ryo if (sc->sc_rxsoft[i].rxs_mbuf != NULL) { 1340 1.1 ryo sc->sc_rxdesc_ring[i].rx_flags1_len = 0; 1341 1.1 ryo bus_dmamap_unload(sc->sc_dmat, 1342 1.1 ryo sc->sc_rxsoft[i].rxs_dmamap); 1343 1.1 ryo m_freem(sc->sc_rxsoft[i].rxs_mbuf); 1344 1.1 ryo sc->sc_rxsoft[i].rxs_mbuf = NULL; 1345 1.1 ryo } 1346 1.1 ryo } 1347 1.1 ryo } 1348 1.1 ryo 1349 1.1 ryo static int 1350 1.1 ryo enet_alloc_ring(struct enet_softc *sc) 1351 1.1 ryo { 1352 1.1 ryo int i, error; 1353 1.1 ryo 1354 1.1 ryo /* 1355 1.1 ryo * build DMA maps for TX. 1356 1.1 ryo * TX descriptor must be able to contain mbuf chains, 1357 1.1 ryo * so, make up ENET_MAX_PKT_NSEGS dmamap. 1358 1.1 ryo */ 1359 1.1 ryo for (i = 0; i < ENET_TX_RING_CNT; i++) { 1360 1.1 ryo error = bus_dmamap_create(sc->sc_dmat, ENET_MAX_PKT_LEN, 1361 1.1 ryo ENET_MAX_PKT_NSEGS, ENET_MAX_PKT_LEN, 0, BUS_DMA_NOWAIT, 1362 1.1 ryo &sc->sc_txsoft[i].txs_dmamap); 1363 1.1 ryo 1364 1.1 ryo if (error) { 1365 1.1 ryo aprint_error_dev(sc->sc_dev, 1366 1.1 ryo "can't create DMA map for TX descs\n"); 1367 1.1 ryo goto fail_1; 1368 1.1 ryo } 1369 1.1 ryo } 1370 1.1 ryo 1371 1.1 ryo /* 1372 1.1 ryo * build DMA maps for RX. 1373 1.1 ryo * RX descripter contains An mbuf cluster, 1374 1.1 ryo * and make up a dmamap. 1375 1.1 ryo */ 1376 1.1 ryo for (i = 0; i < ENET_RX_RING_CNT; i++) { 1377 1.1 ryo error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1378 1.1 ryo 1, MCLBYTES, 0, BUS_DMA_NOWAIT, 1379 1.1 ryo &sc->sc_rxsoft[i].rxs_dmamap); 1380 1.1 ryo if (error) { 1381 1.1 ryo aprint_error_dev(sc->sc_dev, 1382 1.1 ryo "can't create DMA map for RX descs\n"); 1383 1.1 ryo goto fail_2; 1384 1.1 ryo } 1385 1.1 ryo } 1386 1.1 ryo 1387 1.1 ryo if (enet_alloc_dma(sc, sizeof(struct enet_txdesc) * ENET_TX_RING_CNT, 1388 1.1 ryo (void **)&(sc->sc_txdesc_ring), &(sc->sc_txdesc_dmamap)) != 0) 1389 1.1 ryo return -1; 1390 1.1 ryo memset(sc->sc_txdesc_ring, 0, 1391 1.1 ryo sizeof(struct enet_txdesc) * ENET_TX_RING_CNT); 1392 1.1 ryo 1393 1.1 ryo if (enet_alloc_dma(sc, sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT, 1394 1.1 ryo (void **)&(sc->sc_rxdesc_ring), &(sc->sc_rxdesc_dmamap)) != 0) 1395 1.1 ryo return -1; 1396 1.1 ryo memset(sc->sc_rxdesc_ring, 0, 1397 1.1 ryo sizeof(struct enet_rxdesc) * ENET_RX_RING_CNT); 1398 1.1 ryo 1399 1.1 ryo return 0; 1400 1.1 ryo 1401 1.1 ryo fail_2: 1402 1.1 ryo for (i = 0; i < ENET_RX_RING_CNT; i++) { 1403 1.1 ryo if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1404 1.1 ryo bus_dmamap_destroy(sc->sc_dmat, 1405 1.1 ryo sc->sc_rxsoft[i].rxs_dmamap); 1406 1.1 ryo } 1407 1.1 ryo fail_1: 1408 1.1 ryo for (i = 0; i < ENET_TX_RING_CNT; i++) { 1409 1.1 ryo if (sc->sc_txsoft[i].txs_dmamap != NULL) 1410 1.1 ryo bus_dmamap_destroy(sc->sc_dmat, 1411 1.1 ryo sc->sc_txsoft[i].txs_dmamap); 1412 1.1 ryo } 1413 1.1 ryo return error; 1414 1.1 ryo } 1415 1.1 ryo 1416 1.1 ryo static int 1417 1.1 ryo enet_encap_mbufalign(struct mbuf **mp) 1418 1.1 ryo { 1419 1.1 ryo struct mbuf *m, *m0, *mt, *p, *x; 1420 1.1 ryo void *ap; 1421 1.1 ryo uint32_t alignoff, chiplen; 1422 1.1 ryo 1423 1.1 ryo /* 1424 1.1 ryo * iMX6 SoC ethernet controller requires 1425 1.1 ryo * address of buffer must aligned 8, and 1426 1.1 ryo * length of buffer must be greater than 10 (first fragment only?) 1427 1.1 ryo */ 1428 1.1 ryo #define ALIGNBYTE 8 1429 1.1 ryo #define MINBUFSIZE 10 1430 1.1 ryo #define ALIGN_PTR(p, align) \ 1431 1.1 ryo (void *)(((uintptr_t)(p) + ((align) - 1)) & -(align)) 1432 1.1 ryo 1433 1.1 ryo m0 = *mp; 1434 1.1 ryo mt = p = NULL; 1435 1.1 ryo for (m = m0; m != NULL; m = m->m_next) { 1436 1.1 ryo alignoff = (uintptr_t)m->m_data & (ALIGNBYTE - 1); 1437 1.1 ryo if (m->m_len < (ALIGNBYTE * 2)) { 1438 1.1 ryo /* 1439 1.1 ryo * rearrange mbuf data aligned 1440 1.1 ryo * 1441 1.1 ryo * align 8 * * * * * 1442 1.1 ryo * +0123456789abcdef0123456789abcdef0 1443 1.1 ryo * FROM m->m_data[___________abcdefghijklmn_______] 1444 1.1 ryo * 1445 1.1 ryo * +0123456789abcdef0123456789abcdef0 1446 1.1 ryo * TO m->m_data[________abcdefghijklm___________] or 1447 1.1 ryo * m->m_data[________________abcdefghijklmn__] 1448 1.1 ryo */ 1449 1.1 ryo if ((alignoff != 0) && (m->m_len != 0)) { 1450 1.1 ryo chiplen = ALIGNBYTE - alignoff; 1451 1.1 ryo if (M_LEADINGSPACE(m) >= alignoff) { 1452 1.1 ryo ap = m->m_data - alignoff; 1453 1.1 ryo memmove(ap, m->m_data, m->m_len); 1454 1.1 ryo m->m_data = ap; 1455 1.1 ryo } else if (M_TRAILINGSPACE(m) >= chiplen) { 1456 1.1 ryo ap = m->m_data + chiplen; 1457 1.1 ryo memmove(ap, m->m_data, m->m_len); 1458 1.1 ryo m->m_data = ap; 1459 1.1 ryo } else { 1460 1.1 ryo /* 1461 1.1 ryo * no space to align data. (M_READONLY?) 1462 1.1 ryo * allocate new mbuf aligned, 1463 1.1 ryo * and copy to it. 1464 1.1 ryo */ 1465 1.1 ryo MGET(x, M_DONTWAIT, m->m_type); 1466 1.1 ryo if (x == NULL) { 1467 1.1 ryo m_freem(m); 1468 1.1 ryo return ENOBUFS; 1469 1.1 ryo } 1470 1.1 ryo MCLAIM(x, m->m_owner); 1471 1.1 ryo if (m->m_flags & M_PKTHDR) 1472 1.16 maxv m_move_pkthdr(x, m); 1473 1.1 ryo x->m_len = m->m_len; 1474 1.1 ryo x->m_data = ALIGN_PTR(x->m_data, 1475 1.1 ryo ALIGNBYTE); 1476 1.1 ryo memcpy(mtod(x, void *), mtod(m, void *), 1477 1.1 ryo m->m_len); 1478 1.1 ryo p->m_next = x; 1479 1.9 christos x->m_next = m_free(m); 1480 1.1 ryo m = x; 1481 1.1 ryo } 1482 1.1 ryo } 1483 1.1 ryo 1484 1.1 ryo /* 1485 1.1 ryo * fill 1st mbuf at least 10byte 1486 1.1 ryo * 1487 1.1 ryo * align 8 * * * * * 1488 1.1 ryo * +0123456789abcdef0123456789abcdef0 1489 1.1 ryo * FROM m->m_data[________abcde___________________] 1490 1.1 ryo * m->m_data[__fg____________________________] 1491 1.1 ryo * m->m_data[_________________hi_____________] 1492 1.1 ryo * m->m_data[__________jk____________________] 1493 1.1 ryo * m->m_data[____l___________________________] 1494 1.1 ryo * 1495 1.1 ryo * +0123456789abcdef0123456789abcdef0 1496 1.1 ryo * TO m->m_data[________abcdefghij______________] 1497 1.1 ryo * m->m_data[________________________________] 1498 1.1 ryo * m->m_data[________________________________] 1499 1.1 ryo * m->m_data[___________k____________________] 1500 1.1 ryo * m->m_data[____l___________________________] 1501 1.1 ryo */ 1502 1.1 ryo if (mt == NULL) { 1503 1.1 ryo mt = m; 1504 1.1 ryo while (mt->m_len == 0) { 1505 1.1 ryo mt = mt->m_next; 1506 1.1 ryo if (mt == NULL) { 1507 1.1 ryo m_freem(m); 1508 1.1 ryo return ENOBUFS; 1509 1.1 ryo } 1510 1.1 ryo } 1511 1.1 ryo 1512 1.1 ryo /* mt = 1st mbuf, x = 2nd mbuf */ 1513 1.1 ryo x = mt->m_next; 1514 1.1 ryo while (mt->m_len < MINBUFSIZE) { 1515 1.1 ryo if (x == NULL) { 1516 1.1 ryo m_freem(m); 1517 1.1 ryo return ENOBUFS; 1518 1.1 ryo } 1519 1.1 ryo 1520 1.1 ryo alignoff = (uintptr_t)x->m_data & 1521 1.1 ryo (ALIGNBYTE - 1); 1522 1.1 ryo chiplen = ALIGNBYTE - alignoff; 1523 1.1 ryo if (chiplen > x->m_len) { 1524 1.1 ryo chiplen = x->m_len; 1525 1.1 ryo } else if ((mt->m_len + chiplen) < 1526 1.1 ryo MINBUFSIZE) { 1527 1.1 ryo /* 1528 1.1 ryo * next mbuf should be greater 1529 1.1 ryo * than ALIGNBYTE? 1530 1.1 ryo */ 1531 1.1 ryo if (x->m_len >= (chiplen + 1532 1.1 ryo ALIGNBYTE * 2)) 1533 1.1 ryo chiplen += ALIGNBYTE; 1534 1.1 ryo else 1535 1.1 ryo chiplen = x->m_len; 1536 1.1 ryo } 1537 1.1 ryo 1538 1.1 ryo if (chiplen && 1539 1.1 ryo (M_TRAILINGSPACE(mt) < chiplen)) { 1540 1.1 ryo /* 1541 1.35 andvar * move data to the beginning of 1542 1.1 ryo * m_dat[] (aligned) to en- 1543 1.1 ryo * large trailingspace 1544 1.1 ryo */ 1545 1.27 maxv ap = M_BUFADDR(mt); 1546 1.1 ryo ap = ALIGN_PTR(ap, ALIGNBYTE); 1547 1.11 ryo memcpy(ap, mt->m_data, 1548 1.11 ryo mt->m_len); 1549 1.1 ryo mt->m_data = ap; 1550 1.1 ryo } 1551 1.1 ryo 1552 1.1 ryo if (chiplen && 1553 1.1 ryo (M_TRAILINGSPACE(mt) >= chiplen)) { 1554 1.1 ryo memcpy(mt->m_data + mt->m_len, 1555 1.1 ryo x->m_data, chiplen); 1556 1.1 ryo mt->m_len += chiplen; 1557 1.1 ryo m_adj(x, chiplen); 1558 1.1 ryo } 1559 1.1 ryo 1560 1.1 ryo x = x->m_next; 1561 1.1 ryo } 1562 1.1 ryo } 1563 1.1 ryo 1564 1.1 ryo } else { 1565 1.1 ryo mt = m; 1566 1.1 ryo 1567 1.1 ryo /* 1568 1.1 ryo * allocate new mbuf x, and rearrange as below; 1569 1.1 ryo * 1570 1.1 ryo * align 8 * * * * * 1571 1.1 ryo * +0123456789abcdef0123456789abcdef0 1572 1.1 ryo * FROM m->m_data[____________abcdefghijklmnopq___] 1573 1.1 ryo * 1574 1.1 ryo * +0123456789abcdef0123456789abcdef0 1575 1.1 ryo * TO x->m_data[________abcdefghijkl____________] 1576 1.1 ryo * m->m_data[________________________mnopq___] 1577 1.1 ryo * 1578 1.1 ryo */ 1579 1.1 ryo if (alignoff != 0) { 1580 1.1 ryo /* at least ALIGNBYTE */ 1581 1.1 ryo chiplen = ALIGNBYTE - alignoff + ALIGNBYTE; 1582 1.1 ryo 1583 1.1 ryo MGET(x, M_DONTWAIT, m->m_type); 1584 1.1 ryo if (x == NULL) { 1585 1.1 ryo m_freem(m); 1586 1.1 ryo return ENOBUFS; 1587 1.1 ryo } 1588 1.1 ryo MCLAIM(x, m->m_owner); 1589 1.1 ryo if (m->m_flags & M_PKTHDR) 1590 1.16 maxv m_move_pkthdr(x, m); 1591 1.1 ryo x->m_data = ALIGN_PTR(x->m_data, ALIGNBYTE); 1592 1.1 ryo memcpy(mtod(x, void *), mtod(m, void *), 1593 1.1 ryo chiplen); 1594 1.1 ryo x->m_len = chiplen; 1595 1.1 ryo x->m_next = m; 1596 1.1 ryo m_adj(m, chiplen); 1597 1.1 ryo 1598 1.1 ryo if (p == NULL) 1599 1.1 ryo m0 = x; 1600 1.1 ryo else 1601 1.1 ryo p->m_next = x; 1602 1.1 ryo } 1603 1.1 ryo } 1604 1.1 ryo p = m; 1605 1.1 ryo } 1606 1.1 ryo *mp = m0; 1607 1.1 ryo 1608 1.1 ryo return 0; 1609 1.1 ryo } 1610 1.1 ryo 1611 1.1 ryo static int 1612 1.1 ryo enet_encap_txring(struct enet_softc *sc, struct mbuf **mp) 1613 1.1 ryo { 1614 1.1 ryo bus_dmamap_t map; 1615 1.1 ryo struct mbuf *m; 1616 1.1 ryo int csumflags, idx, i, error; 1617 1.1 ryo uint32_t flags1, flags2; 1618 1.1 ryo 1619 1.1 ryo idx = sc->sc_tx_prodidx; 1620 1.1 ryo map = sc->sc_txsoft[idx].txs_dmamap; 1621 1.1 ryo 1622 1.1 ryo /* align mbuf data for claim of ENET */ 1623 1.1 ryo error = enet_encap_mbufalign(mp); 1624 1.1 ryo if (error != 0) 1625 1.1 ryo return error; 1626 1.1 ryo 1627 1.1 ryo m = *mp; 1628 1.1 ryo csumflags = m->m_pkthdr.csum_flags; 1629 1.1 ryo 1630 1.1 ryo error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1631 1.1 ryo BUS_DMA_NOWAIT); 1632 1.1 ryo if (error != 0) { 1633 1.1 ryo device_printf(sc->sc_dev, 1634 1.1 ryo "Error mapping mbuf into TX chain: error=%d\n", error); 1635 1.1 ryo m_freem(m); 1636 1.1 ryo return error; 1637 1.1 ryo } 1638 1.1 ryo 1639 1.1 ryo if (map->dm_nsegs > sc->sc_tx_free) { 1640 1.1 ryo bus_dmamap_unload(sc->sc_dmat, map); 1641 1.1 ryo device_printf(sc->sc_dev, 1642 1.1 ryo "too many mbuf chain %d\n", map->dm_nsegs); 1643 1.1 ryo m_freem(m); 1644 1.1 ryo return ENOBUFS; 1645 1.1 ryo } 1646 1.1 ryo 1647 1.1 ryo /* fill protocol cksum zero beforehand */ 1648 1.1 ryo if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1649 1.1 ryo M_CSUM_UDPv6 | M_CSUM_TCPv6)) { 1650 1.12 ryo int ehlen; 1651 1.1 ryo uint16_t etype; 1652 1.1 ryo 1653 1.1 ryo m_copydata(m, ETHER_ADDR_LEN * 2, sizeof(etype), &etype); 1654 1.1 ryo switch (ntohs(etype)) { 1655 1.1 ryo case ETHERTYPE_IP: 1656 1.1 ryo case ETHERTYPE_IPV6: 1657 1.1 ryo ehlen = ETHER_HDR_LEN; 1658 1.1 ryo break; 1659 1.1 ryo case ETHERTYPE_VLAN: 1660 1.1 ryo ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1661 1.1 ryo break; 1662 1.1 ryo default: 1663 1.1 ryo ehlen = 0; 1664 1.1 ryo break; 1665 1.1 ryo } 1666 1.1 ryo 1667 1.1 ryo if (ehlen) { 1668 1.12 ryo const int off = 1669 1.1 ryo M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data) + 1670 1.12 ryo M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data); 1671 1.12 ryo if (m->m_pkthdr.len >= ehlen + off + sizeof(uint16_t)) { 1672 1.12 ryo uint16_t zero = 0; 1673 1.12 ryo m_copyback(m, ehlen + off, sizeof(zero), &zero); 1674 1.12 ryo } 1675 1.1 ryo } 1676 1.1 ryo } 1677 1.1 ryo 1678 1.1 ryo bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1679 1.1 ryo BUS_DMASYNC_PREWRITE); 1680 1.1 ryo 1681 1.1 ryo for (i = 0; i < map->dm_nsegs; i++) { 1682 1.1 ryo flags1 = TXFLAGS1_R; 1683 1.1 ryo flags2 = 0; 1684 1.1 ryo 1685 1.1 ryo if (i == 0) { 1686 1.1 ryo flags1 |= TXFLAGS1_T1; /* mark as first segment */ 1687 1.1 ryo sc->sc_txsoft[idx].txs_mbuf = m; 1688 1.1 ryo } 1689 1.1 ryo 1690 1.1 ryo /* checksum offloading */ 1691 1.1 ryo if (csumflags & (M_CSUM_UDPv4 | M_CSUM_TCPv4 | 1692 1.1 ryo M_CSUM_UDPv6 | M_CSUM_TCPv6)) 1693 1.1 ryo flags2 |= TXFLAGS2_PINS; 1694 1.1 ryo if (csumflags & (M_CSUM_IPv4)) 1695 1.1 ryo flags2 |= TXFLAGS2_IINS; 1696 1.1 ryo 1697 1.1 ryo if (i == map->dm_nsegs - 1) { 1698 1.1 ryo /* mark last segment */ 1699 1.1 ryo flags1 |= TXFLAGS1_L | TXFLAGS1_TC; 1700 1.1 ryo flags2 |= TXFLAGS2_INT; 1701 1.1 ryo } 1702 1.1 ryo if (idx == ENET_TX_RING_CNT - 1) { 1703 1.1 ryo /* mark end of ring */ 1704 1.1 ryo flags1 |= TXFLAGS1_W; 1705 1.1 ryo } 1706 1.1 ryo 1707 1.1 ryo sc->sc_txdesc_ring[idx].tx_databuf = map->dm_segs[i].ds_addr; 1708 1.1 ryo sc->sc_txdesc_ring[idx].tx_flags2 = flags2; 1709 1.1 ryo sc->sc_txdesc_ring[idx].tx_flags3 = 0; 1710 1.8 ryo TXDESC_WRITEOUT(idx); 1711 1.8 ryo 1712 1.1 ryo sc->sc_txdesc_ring[idx].tx_flags1_len = 1713 1.1 ryo flags1 | TXFLAGS1_LEN(map->dm_segs[i].ds_len); 1714 1.1 ryo TXDESC_WRITEOUT(idx); 1715 1.1 ryo 1716 1.1 ryo idx = ENET_TX_NEXTIDX(idx); 1717 1.1 ryo sc->sc_tx_free--; 1718 1.1 ryo } 1719 1.1 ryo 1720 1.1 ryo sc->sc_tx_prodidx = idx; 1721 1.1 ryo 1722 1.1 ryo return 0; 1723 1.1 ryo } 1724 1.1 ryo 1725 1.1 ryo /* 1726 1.1 ryo * device initialize 1727 1.1 ryo */ 1728 1.1 ryo static int 1729 1.1 ryo enet_init_regs(struct enet_softc *sc, int init) 1730 1.1 ryo { 1731 1.1 ryo struct mii_data *mii; 1732 1.1 ryo struct ifmedia_entry *ife; 1733 1.1 ryo paddr_t paddr; 1734 1.1 ryo uint32_t val; 1735 1.11 ryo int miimode, fulldup, ecr_speed, rcr_speed, flowctrl; 1736 1.1 ryo 1737 1.1 ryo if (init) { 1738 1.1 ryo fulldup = 1; 1739 1.1 ryo ecr_speed = ENET_ECR_SPEED; 1740 1.1 ryo rcr_speed = 0; 1741 1.1 ryo flowctrl = 0; 1742 1.1 ryo } else { 1743 1.1 ryo mii = &sc->sc_mii; 1744 1.1 ryo ife = mii->mii_media.ifm_cur; 1745 1.1 ryo 1746 1.18 msaitoh if ((ife->ifm_media & IFM_FDX) != 0) 1747 1.1 ryo fulldup = 1; 1748 1.1 ryo else 1749 1.1 ryo fulldup = 0; 1750 1.1 ryo 1751 1.1 ryo switch (IFM_SUBTYPE(ife->ifm_media)) { 1752 1.1 ryo case IFM_10_T: 1753 1.1 ryo ecr_speed = 0; 1754 1.1 ryo rcr_speed = ENET_RCR_RMII_10T; 1755 1.1 ryo break; 1756 1.1 ryo case IFM_100_TX: 1757 1.1 ryo ecr_speed = 0; 1758 1.1 ryo rcr_speed = 0; 1759 1.1 ryo break; 1760 1.1 ryo default: 1761 1.1 ryo ecr_speed = ENET_ECR_SPEED; 1762 1.1 ryo rcr_speed = 0; 1763 1.1 ryo break; 1764 1.1 ryo } 1765 1.1 ryo 1766 1.1 ryo flowctrl = sc->sc_flowflags & IFM_FLOW; 1767 1.1 ryo } 1768 1.1 ryo 1769 1.11 ryo if (sc->sc_rgmii == 0) 1770 1.11 ryo ecr_speed = 0; 1771 1.11 ryo 1772 1.1 ryo /* reset */ 1773 1.1 ryo ENET_REG_WRITE(sc, ENET_ECR, ecr_speed | ENET_ECR_RESET); 1774 1.1 ryo 1775 1.1 ryo /* mask and clear all interrupt */ 1776 1.1 ryo ENET_REG_WRITE(sc, ENET_EIMR, 0); 1777 1.1 ryo ENET_REG_WRITE(sc, ENET_EIR, 0xffffffff); 1778 1.1 ryo 1779 1.1 ryo /* full duplex */ 1780 1.1 ryo ENET_REG_WRITE(sc, ENET_TCR, fulldup ? ENET_TCR_FDEN : 0); 1781 1.1 ryo 1782 1.1 ryo /* clear and enable MIB register */ 1783 1.1 ryo ENET_REG_WRITE(sc, ENET_MIBC, ENET_MIBC_MIB_CLEAR); 1784 1.1 ryo ENET_REG_WRITE(sc, ENET_MIBC, 0); 1785 1.1 ryo 1786 1.28 hkenken /* MII speed setup. MDCclk(=2.5MHz) = (internal module clock)/((val+1)*2) */ 1787 1.28 hkenken val = (sc->sc_clock + (5000000 - 1)) / 5000000 - 1; 1788 1.28 hkenken ENET_REG_WRITE(sc, ENET_MSCR, __SHIFTIN(val, ENET_MSCR_MII_SPEED)); 1789 1.1 ryo 1790 1.1 ryo /* Opcode/Pause Duration */ 1791 1.1 ryo ENET_REG_WRITE(sc, ENET_OPD, 0x00010020); 1792 1.1 ryo 1793 1.1 ryo /* Receive FIFO */ 1794 1.1 ryo ENET_REG_WRITE(sc, ENET_RSFL, 16); /* RxFIFO Section Full */ 1795 1.1 ryo ENET_REG_WRITE(sc, ENET_RSEM, 0x84); /* RxFIFO Section Empty */ 1796 1.1 ryo ENET_REG_WRITE(sc, ENET_RAEM, 8); /* RxFIFO Almost Empty */ 1797 1.1 ryo ENET_REG_WRITE(sc, ENET_RAFL, 8); /* RxFIFO Almost Full */ 1798 1.1 ryo 1799 1.1 ryo /* Transmit FIFO */ 1800 1.1 ryo ENET_REG_WRITE(sc, ENET_TFWR, ENET_TFWR_STRFWD | 1801 1.1 ryo ENET_TFWR_FIFO(128)); /* TxFIFO Watermark */ 1802 1.1 ryo ENET_REG_WRITE(sc, ENET_TSEM, 0); /* TxFIFO Section Empty */ 1803 1.1 ryo ENET_REG_WRITE(sc, ENET_TAEM, 256); /* TxFIFO Almost Empty */ 1804 1.1 ryo ENET_REG_WRITE(sc, ENET_TAFL, 8); /* TxFIFO Almost Full */ 1805 1.1 ryo ENET_REG_WRITE(sc, ENET_TIPG, 12); /* Tx Inter-Packet Gap */ 1806 1.1 ryo 1807 1.1 ryo /* hardware checksum is default off (override in TX descripter) */ 1808 1.1 ryo ENET_REG_WRITE(sc, ENET_TACC, 0); 1809 1.1 ryo 1810 1.1 ryo /* 1811 1.1 ryo * align ethernet payload on 32bit, discard frames with MAC layer error, 1812 1.1 ryo * and don't discard checksum error 1813 1.1 ryo */ 1814 1.1 ryo ENET_REG_WRITE(sc, ENET_RACC, ENET_RACC_SHIFT16 | ENET_RACC_LINEDIS); 1815 1.1 ryo 1816 1.1 ryo /* maximum frame size */ 1817 1.1 ryo val = ENET_DEFAULT_PKT_LEN; 1818 1.1 ryo ENET_REG_WRITE(sc, ENET_FTRL, val); /* Frame Truncation Length */ 1819 1.11 ryo 1820 1.11 ryo if (sc->sc_rgmii == 0) 1821 1.11 ryo miimode = ENET_RCR_RMII_MODE | ENET_RCR_MII_MODE; 1822 1.11 ryo else 1823 1.11 ryo miimode = ENET_RCR_RGMII_EN; 1824 1.1 ryo ENET_REG_WRITE(sc, ENET_RCR, 1825 1.1 ryo ENET_RCR_PADEN | /* RX frame padding remove */ 1826 1.11 ryo miimode | 1827 1.1 ryo (flowctrl ? ENET_RCR_FCE : 0) | /* flow control enable */ 1828 1.1 ryo rcr_speed | 1829 1.1 ryo (fulldup ? 0 : ENET_RCR_DRT) | 1830 1.1 ryo ENET_RCR_MAX_FL(val)); 1831 1.1 ryo 1832 1.1 ryo /* Maximum Receive BufSize per one descriptor */ 1833 1.1 ryo ENET_REG_WRITE(sc, ENET_MRBR, RXDESC_MAXBUFSIZE); 1834 1.1 ryo 1835 1.1 ryo 1836 1.1 ryo /* TX/RX Descriptor Physical Address */ 1837 1.1 ryo paddr = sc->sc_txdesc_dmamap->dm_segs[0].ds_addr; 1838 1.1 ryo ENET_REG_WRITE(sc, ENET_TDSR, paddr); 1839 1.1 ryo paddr = sc->sc_rxdesc_dmamap->dm_segs[0].ds_addr; 1840 1.1 ryo ENET_REG_WRITE(sc, ENET_RDSR, paddr); 1841 1.1 ryo /* sync cache */ 1842 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_txdesc_dmamap, 0, 1843 1.1 ryo sc->sc_txdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1844 1.1 ryo bus_dmamap_sync(sc->sc_dmat, sc->sc_rxdesc_dmamap, 0, 1845 1.1 ryo sc->sc_rxdesc_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1846 1.1 ryo 1847 1.1 ryo /* enable interrupts */ 1848 1.33 uwe val = ENET_EIR_TXF | ENET_EIR_RXF | ENET_EIR_EBERR; 1849 1.6 ryo if (sc->sc_imxtype == 7) 1850 1.20 msaitoh val |= ENET_EIR_TXF2 | ENET_EIR_RXF2 | ENET_EIR_TXF1 | 1851 1.20 msaitoh ENET_EIR_RXF1; 1852 1.6 ryo ENET_REG_WRITE(sc, ENET_EIMR, val); 1853 1.1 ryo 1854 1.1 ryo /* enable ether */ 1855 1.1 ryo ENET_REG_WRITE(sc, ENET_ECR, 1856 1.1 ryo #if _BYTE_ORDER == _LITTLE_ENDIAN 1857 1.1 ryo ENET_ECR_DBSWP | 1858 1.1 ryo #endif 1859 1.11 ryo ecr_speed | 1860 1.1 ryo ENET_ECR_EN1588 | /* use enhanced TX/RX descriptor */ 1861 1.1 ryo ENET_ECR_ETHEREN); /* Ethernet Enable */ 1862 1.1 ryo 1863 1.1 ryo return 0; 1864 1.1 ryo } 1865 1.1 ryo 1866 1.1 ryo static int 1867 1.1 ryo enet_alloc_dma(struct enet_softc *sc, size_t size, void **addrp, 1868 1.21 msaitoh bus_dmamap_t *mapp) 1869 1.1 ryo { 1870 1.1 ryo bus_dma_segment_t seglist[1]; 1871 1.1 ryo int nsegs, error; 1872 1.1 ryo 1873 1.1 ryo if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seglist, 1874 1.2 ryo 1, &nsegs, M_NOWAIT)) != 0) { 1875 1.1 ryo device_printf(sc->sc_dev, 1876 1.1 ryo "unable to allocate DMA buffer, error=%d\n", error); 1877 1.1 ryo goto fail_alloc; 1878 1.1 ryo } 1879 1.1 ryo 1880 1.1 ryo if ((error = bus_dmamem_map(sc->sc_dmat, seglist, 1, size, addrp, 1881 1.1 ryo BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 1882 1.1 ryo device_printf(sc->sc_dev, 1883 1.1 ryo "unable to map DMA buffer, error=%d\n", 1884 1.1 ryo error); 1885 1.1 ryo goto fail_map; 1886 1.1 ryo } 1887 1.1 ryo 1888 1.1 ryo if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1889 1.1 ryo BUS_DMA_NOWAIT, mapp)) != 0) { 1890 1.1 ryo device_printf(sc->sc_dev, 1891 1.1 ryo "unable to create DMA map, error=%d\n", error); 1892 1.1 ryo goto fail_create; 1893 1.1 ryo } 1894 1.1 ryo 1895 1.1 ryo if ((error = bus_dmamap_load(sc->sc_dmat, *mapp, *addrp, size, NULL, 1896 1.1 ryo BUS_DMA_NOWAIT)) != 0) { 1897 1.1 ryo aprint_error_dev(sc->sc_dev, 1898 1.1 ryo "unable to load DMA map, error=%d\n", error); 1899 1.1 ryo goto fail_load; 1900 1.1 ryo } 1901 1.1 ryo 1902 1.1 ryo return 0; 1903 1.1 ryo 1904 1.1 ryo fail_load: 1905 1.1 ryo bus_dmamap_destroy(sc->sc_dmat, *mapp); 1906 1.1 ryo fail_create: 1907 1.1 ryo bus_dmamem_unmap(sc->sc_dmat, *addrp, size); 1908 1.1 ryo fail_map: 1909 1.1 ryo bus_dmamem_free(sc->sc_dmat, seglist, 1); 1910 1.1 ryo fail_alloc: 1911 1.1 ryo return error; 1912 1.1 ryo } 1913