1 1.96 jakllsch /* $NetBSD: dwc_gmac.c,v 1.96 2025/02/16 18:54:49 jakllsch Exp $ */ 2 1.18 jmcneill 3 1.1 martin /*- 4 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc. 5 1.1 martin * All rights reserved. 6 1.1 martin * 7 1.1 martin * This code is derived from software contributed to The NetBSD Foundation 8 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann. 9 1.1 martin * 10 1.1 martin * Redistribution and use in source and binary forms, with or without 11 1.1 martin * modification, are permitted provided that the following conditions 12 1.1 martin * are met: 13 1.1 martin * 1. Redistributions of source code must retain the above copyright 14 1.1 martin * notice, this list of conditions and the following disclaimer. 15 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 martin * notice, this list of conditions and the following disclaimer in the 17 1.1 martin * documentation and/or other materials provided with the distribution. 18 1.1 martin * 19 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 martin * POSSIBILITY OF SUCH DAMAGE. 30 1.1 martin */ 31 1.1 martin 32 1.1 martin /* 33 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found 34 1.1 martin * on Allwinner A20 cores and others. 35 1.1 martin * 36 1.1 martin * Real documentation seems to not be available, the marketing product 37 1.1 martin * documents could be found here: 38 1.1 martin * 39 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive 40 1.1 martin */ 41 1.1 martin 42 1.94 riastrad /* 43 1.94 riastrad * Lock order: 44 1.94 riastrad * 45 1.94 riastrad * IFNET_LOCK -> sc_mcast_lock 46 1.94 riastrad * IFNET_LOCK -> sc_intr_lock -> {sc_txq.t_mtx, sc_rxq.r_mtx} 47 1.94 riastrad */ 48 1.94 riastrad 49 1.1 martin #include <sys/cdefs.h> 50 1.1 martin 51 1.96 jakllsch __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.96 2025/02/16 18:54:49 jakllsch Exp $"); 52 1.7 martin 53 1.7 martin /* #define DWC_GMAC_DEBUG 1 */ 54 1.1 martin 55 1.38 skrll #ifdef _KERNEL_OPT 56 1.1 martin #include "opt_inet.h" 57 1.38 skrll #endif 58 1.1 martin 59 1.1 martin #include <sys/param.h> 60 1.1 martin #include <sys/bus.h> 61 1.1 martin #include <sys/device.h> 62 1.1 martin #include <sys/intr.h> 63 1.1 martin #include <sys/systm.h> 64 1.1 martin #include <sys/sockio.h> 65 1.29 jmcneill #include <sys/cprng.h> 66 1.63 msaitoh #include <sys/rndsource.h> 67 1.1 martin 68 1.1 martin #include <net/if.h> 69 1.1 martin #include <net/if_ether.h> 70 1.1 martin #include <net/if_media.h> 71 1.1 martin #include <net/bpf.h> 72 1.1 martin #ifdef INET 73 1.1 martin #include <netinet/if_inarp.h> 74 1.1 martin #endif 75 1.1 martin 76 1.1 martin #include <dev/mii/miivar.h> 77 1.1 martin 78 1.1 martin #include <dev/ic/dwc_gmac_reg.h> 79 1.1 martin #include <dev/ic/dwc_gmac_var.h> 80 1.1 martin 81 1.56 msaitoh static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *); 82 1.56 msaitoh static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t); 83 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *); 84 1.1 martin 85 1.61 msaitoh static int dwc_gmac_reset(struct dwc_gmac_softc *); 86 1.79 mrg static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]); 87 1.61 msaitoh static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *); 88 1.61 msaitoh static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *); 89 1.61 msaitoh static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *); 90 1.61 msaitoh static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *); 91 1.61 msaitoh static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *); 92 1.61 msaitoh static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *); 93 1.61 msaitoh static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *); 94 1.61 msaitoh static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *); 95 1.61 msaitoh static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int); 96 1.61 msaitoh static int dwc_gmac_init(struct ifnet *); 97 1.61 msaitoh static void dwc_gmac_stop(struct ifnet *, int); 98 1.61 msaitoh static void dwc_gmac_start(struct ifnet *); 99 1.61 msaitoh static void dwc_gmac_start_locked(struct ifnet *); 100 1.61 msaitoh static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *); 101 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *); 102 1.61 msaitoh static void dwc_gmac_tx_intr(struct dwc_gmac_softc *); 103 1.61 msaitoh static void dwc_gmac_rx_intr(struct dwc_gmac_softc *); 104 1.61 msaitoh static void dwc_gmac_setmulti(struct dwc_gmac_softc *); 105 1.22 martin static int dwc_gmac_ifflags_cb(struct ethercom *); 106 1.55 martin static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *); 107 1.55 martin static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *); 108 1.55 martin static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int); 109 1.55 martin static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *); 110 1.55 martin static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *); 111 1.55 martin static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *); 112 1.55 martin static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *); 113 1.55 martin static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *); 114 1.55 martin static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *); 115 1.55 martin static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int); 116 1.55 martin static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *); 117 1.55 martin static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *); 118 1.55 martin static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *); 119 1.55 martin static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *); 120 1.55 martin static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *); 121 1.55 martin static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *); 122 1.55 martin 123 1.55 martin static const struct dwc_gmac_desc_methods desc_methods_standard = { 124 1.55 martin .tx_init_flags = dwc_gmac_desc_std_tx_init_flags, 125 1.55 martin .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev, 126 1.55 martin .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev, 127 1.55 martin .tx_set_len = dwc_gmac_desc_std_set_len, 128 1.55 martin .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag, 129 1.55 martin .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag, 130 1.55 martin .rx_init_flags = dwc_gmac_desc_std_rx_init_flags, 131 1.55 martin .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev, 132 1.55 martin .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev, 133 1.55 martin .rx_set_len = dwc_gmac_desc_std_set_len, 134 1.55 martin .rx_get_len = dwc_gmac_desc_std_get_len, 135 1.55 martin .rx_has_error = dwc_gmac_desc_std_rx_has_error 136 1.55 martin }; 137 1.55 martin 138 1.55 martin static const struct dwc_gmac_desc_methods desc_methods_enhanced = { 139 1.55 martin .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags, 140 1.55 martin .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev, 141 1.55 martin .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev, 142 1.55 martin .tx_set_len = dwc_gmac_desc_enh_set_len, 143 1.55 martin .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag, 144 1.55 martin .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag, 145 1.55 martin .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags, 146 1.55 martin .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev, 147 1.55 martin .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev, 148 1.55 martin .rx_set_len = dwc_gmac_desc_enh_set_len, 149 1.55 martin .rx_get_len = dwc_gmac_desc_enh_get_len, 150 1.55 martin .rx_has_error = dwc_gmac_desc_enh_rx_has_error 151 1.55 martin }; 152 1.55 martin 153 1.1 martin 154 1.82 skrll #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT + (N)) \ 155 1.82 skrll * sizeof(struct dwc_gmac_dev_dmadesc)) 156 1.82 skrll #define TX_NEXT(N) (((N) + 1) & (AWGE_TX_RING_COUNT - 1)) 157 1.1 martin 158 1.82 skrll #define RX_DESC_OFFSET(N) ((N) * sizeof(struct dwc_gmac_dev_dmadesc)) 159 1.82 skrll #define RX_NEXT(N) (((N) + 1) & (AWGE_RX_RING_COUNT - 1)) 160 1.8 martin 161 1.8 martin 162 1.8 martin 163 1.61 msaitoh #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \ 164 1.61 msaitoh GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \ 165 1.61 msaitoh GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE) 166 1.61 msaitoh 167 1.61 msaitoh #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \ 168 1.61 msaitoh GMAC_DMA_INT_FBE | \ 169 1.61 msaitoh GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \ 170 1.61 msaitoh GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \ 171 1.10 martin GMAC_DMA_INT_TJE) 172 1.8 martin 173 1.8 martin #define AWIN_DEF_MAC_INTRMASK \ 174 1.8 martin (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \ 175 1.55 martin AWIN_GMAC_MAC_INT_LINKCHG) 176 1.1 martin 177 1.7 martin #ifdef DWC_GMAC_DEBUG 178 1.61 msaitoh static void dwc_gmac_dump_dma(struct dwc_gmac_softc *); 179 1.61 msaitoh static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *); 180 1.61 msaitoh static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *); 181 1.61 msaitoh static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *); 182 1.61 msaitoh static void dwc_dump_status(struct dwc_gmac_softc *); 183 1.61 msaitoh static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t); 184 1.7 martin #endif 185 1.7 martin 186 1.51 jmcneill int 187 1.57 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk) 188 1.1 martin { 189 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]; 190 1.91 skrll uint32_t maclo, machi, hwft; 191 1.1 martin struct mii_data * const mii = &sc->sc_mii; 192 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if; 193 1.5 martin prop_dictionary_t dict; 194 1.1 martin 195 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET); 196 1.3 martin sc->sc_mii_clk = mii_clk & 7; 197 1.1 martin 198 1.5 martin dict = device_properties(sc->sc_dev); 199 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL; 200 1.5 martin if (ea != NULL) { 201 1.5 martin /* 202 1.75 andvar * If the MAC address is overridden by a device property, 203 1.5 martin * use that. 204 1.5 martin */ 205 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 206 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 207 1.71 jmcneill memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN); 208 1.5 martin } else { 209 1.5 martin /* 210 1.95 andvar * If we did not get an externally configure address, 211 1.5 martin * try to read one from the current filter setup, 212 1.5 martin * before resetting the chip. 213 1.5 martin */ 214 1.8 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 215 1.8 martin AWIN_GMAC_MAC_ADDR0LO); 216 1.8 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 217 1.8 martin AWIN_GMAC_MAC_ADDR0HI); 218 1.14 jmcneill 219 1.14 jmcneill if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) { 220 1.29 jmcneill /* fake MAC address */ 221 1.29 jmcneill maclo = 0x00f2 | (cprng_strong32() << 16); 222 1.29 jmcneill machi = cprng_strong32(); 223 1.14 jmcneill } 224 1.14 jmcneill 225 1.1 martin enaddr[0] = maclo & 0x0ff; 226 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff; 227 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff; 228 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff; 229 1.1 martin enaddr[4] = machi & 0x0ff; 230 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff; 231 1.1 martin } 232 1.1 martin 233 1.91 skrll const uint32_t ver = 234 1.91 skrll bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION); 235 1.91 skrll const uint32_t snpsver = 236 1.91 skrll __SHIFTOUT(ver, AWIN_GMAC_MAC_VERSION_SNPSVER_MASK); 237 1.91 skrll aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", snpsver); 238 1.55 martin 239 1.1 martin /* 240 1.21 joerg * Init chip and do initial setup 241 1.1 martin */ 242 1.1 martin if (dwc_gmac_reset(sc) != 0) 243 1.51 jmcneill return ENXIO; /* not much to cleanup, haven't attached yet */ 244 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr); 245 1.52 sevan aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 246 1.1 martin ether_sprintf(enaddr)); 247 1.1 martin 248 1.55 martin hwft = 0; 249 1.91 skrll if (snpsver >= 0x35) { 250 1.55 martin hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 251 1.55 martin AWIN_GMAC_DMA_HWFEATURES); 252 1.55 martin aprint_normal_dev(sc->sc_dev, 253 1.55 martin "HW feature mask: %x\n", hwft); 254 1.55 martin } 255 1.83 skrll 256 1.83 skrll if (sizeof(bus_addr_t) > 4) { 257 1.83 skrll int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32), 258 1.83 skrll &sc->sc_dmat, BUS_DMA_WAITOK); 259 1.83 skrll if (error != 0) { 260 1.83 skrll aprint_error_dev(sc->sc_dev, 261 1.83 skrll "failed to create DMA subregion\n"); 262 1.83 skrll return ENOMEM; 263 1.83 skrll } 264 1.83 skrll } 265 1.83 skrll 266 1.55 martin if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) { 267 1.55 martin aprint_normal_dev(sc->sc_dev, 268 1.55 martin "Using enhanced descriptor format\n"); 269 1.55 martin sc->sc_descm = &desc_methods_enhanced; 270 1.55 martin } else { 271 1.55 martin sc->sc_descm = &desc_methods_standard; 272 1.55 martin } 273 1.70 chs if (hwft & GMAC_DMA_FEAT_RMON) { 274 1.70 chs uint32_t val; 275 1.70 chs 276 1.70 chs /* Mask all MMC interrupts */ 277 1.70 chs val = 0xffffffff; 278 1.70 chs bus_space_write_4(sc->sc_bst, sc->sc_bsh, 279 1.70 chs GMAC_MMC_RX_INT_MSK, val); 280 1.70 chs bus_space_write_4(sc->sc_bst, sc->sc_bsh, 281 1.70 chs GMAC_MMC_TX_INT_MSK, val); 282 1.70 chs } 283 1.55 martin 284 1.1 martin /* 285 1.1 martin * Allocate Tx and Rx rings 286 1.1 martin */ 287 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) { 288 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n"); 289 1.1 martin goto fail; 290 1.1 martin } 291 1.38 skrll 292 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) { 293 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n"); 294 1.1 martin goto fail; 295 1.1 martin } 296 1.1 martin 297 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) { 298 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n"); 299 1.1 martin goto fail; 300 1.1 martin } 301 1.1 martin 302 1.93 skrll sc->sc_stopping = false; 303 1.93 skrll sc->sc_txbusy = false; 304 1.93 skrll 305 1.94 riastrad sc->sc_mcast_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 306 1.93 skrll sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 307 1.38 skrll mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET); 308 1.38 skrll mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET); 309 1.38 skrll 310 1.1 martin /* 311 1.1 martin * Prepare interface data 312 1.1 martin */ 313 1.1 martin ifp->if_softc = sc; 314 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 315 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 316 1.43 ozaki ifp->if_extflags = IFEF_MPSAFE; 317 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl; 318 1.1 martin ifp->if_start = dwc_gmac_start; 319 1.1 martin ifp->if_init = dwc_gmac_init; 320 1.1 martin ifp->if_stop = dwc_gmac_stop; 321 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 322 1.1 martin IFQ_SET_READY(&ifp->if_snd); 323 1.1 martin 324 1.1 martin /* 325 1.1 martin * Attach MII subdevices 326 1.1 martin */ 327 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii; 328 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 329 1.62 msaitoh mii->mii_ifp = ifp; 330 1.62 msaitoh mii->mii_readreg = dwc_gmac_miibus_read_reg; 331 1.62 msaitoh mii->mii_writereg = dwc_gmac_miibus_write_reg; 332 1.62 msaitoh mii->mii_statchg = dwc_gmac_miibus_statchg; 333 1.62 msaitoh mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY, 334 1.25 jmcneill MIIF_DOPAUSE); 335 1.1 martin 336 1.62 msaitoh if (LIST_EMPTY(&mii->mii_phys)) { 337 1.62 msaitoh aprint_error_dev(sc->sc_dev, "no PHY found!\n"); 338 1.62 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 339 1.62 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 340 1.62 msaitoh } else { 341 1.62 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 342 1.62 msaitoh } 343 1.1 martin 344 1.1 martin /* 345 1.33 tnn * We can support 802.1Q VLAN-sized frames. 346 1.33 tnn */ 347 1.33 tnn sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 348 1.33 tnn 349 1.33 tnn /* 350 1.1 martin * Ready, attach interface 351 1.1 martin */ 352 1.38 skrll /* Attach the interface. */ 353 1.74 riastrad if_initialize(ifp); 354 1.38 skrll sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if); 355 1.40 ozaki if_deferred_start_init(ifp, NULL); 356 1.1 martin ether_ifattach(ifp, enaddr); 357 1.22 martin ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb); 358 1.38 skrll if_register(ifp); 359 1.63 msaitoh rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 360 1.63 msaitoh RND_TYPE_NET, RND_FLAG_DEFAULT); 361 1.1 martin 362 1.1 martin /* 363 1.1 martin * Enable interrupts 364 1.1 martin */ 365 1.93 skrll mutex_enter(sc->sc_intr_lock); 366 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK, 367 1.8 martin AWIN_DEF_MAC_INTRMASK); 368 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, 369 1.8 martin GMAC_DEF_DMA_INT_MASK); 370 1.93 skrll mutex_exit(sc->sc_intr_lock); 371 1.1 martin 372 1.51 jmcneill return 0; 373 1.51 jmcneill 374 1.1 martin fail: 375 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq); 376 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq); 377 1.41 msaitoh dwc_gmac_free_dma_rings(sc); 378 1.41 msaitoh mutex_destroy(&sc->sc_mdio_lock); 379 1.51 jmcneill 380 1.51 jmcneill return ENXIO; 381 1.1 martin } 382 1.1 martin 383 1.1 martin 384 1.1 martin 385 1.1 martin static int 386 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc) 387 1.1 martin { 388 1.1 martin size_t cnt; 389 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE, 390 1.61 msaitoh bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) 391 1.61 msaitoh | GMAC_BUSMODE_RESET); 392 1.72 ryo for (cnt = 0; cnt < 30000; cnt++) { 393 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) 394 1.1 martin & GMAC_BUSMODE_RESET) == 0) 395 1.1 martin return 0; 396 1.1 martin delay(10); 397 1.1 martin } 398 1.1 martin 399 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n"); 400 1.1 martin return EIO; 401 1.1 martin } 402 1.1 martin 403 1.1 martin static void 404 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc, 405 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]) 406 1.1 martin { 407 1.49 jmcneill uint32_t hi, lo; 408 1.1 martin 409 1.49 jmcneill hi = enaddr[4] | (enaddr[5] << 8); 410 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) 411 1.73 msaitoh | ((uint32_t)enaddr[3] << 24); 412 1.49 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi); 413 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo); 414 1.1 martin } 415 1.1 martin 416 1.1 martin static int 417 1.56 msaitoh dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val) 418 1.1 martin { 419 1.1 martin struct dwc_gmac_softc * const sc = device_private(self); 420 1.6 martin uint16_t mii; 421 1.1 martin size_t cnt; 422 1.1 martin 423 1.61 msaitoh mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK) 424 1.61 msaitoh | __SHIFTIN(reg, GMAC_MII_REG_MASK) 425 1.61 msaitoh | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK) 426 1.6 martin | GMAC_MII_BUSY; 427 1.1 martin 428 1.1 martin mutex_enter(&sc->sc_mdio_lock); 429 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii); 430 1.1 martin 431 1.1 martin for (cnt = 0; cnt < 1000; cnt++) { 432 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, 433 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) { 434 1.56 msaitoh *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 435 1.3 martin AWIN_GMAC_MAC_MIIDATA); 436 1.1 martin break; 437 1.1 martin } 438 1.1 martin delay(10); 439 1.1 martin } 440 1.1 martin 441 1.1 martin mutex_exit(&sc->sc_mdio_lock); 442 1.1 martin 443 1.56 msaitoh if (cnt >= 1000) 444 1.56 msaitoh return ETIMEDOUT; 445 1.61 msaitoh 446 1.56 msaitoh return 0; 447 1.1 martin } 448 1.1 martin 449 1.56 msaitoh static int 450 1.56 msaitoh dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val) 451 1.1 martin { 452 1.1 martin struct dwc_gmac_softc * const sc = device_private(self); 453 1.6 martin uint16_t mii; 454 1.1 martin size_t cnt; 455 1.1 martin 456 1.61 msaitoh mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK) 457 1.61 msaitoh | __SHIFTIN(reg, GMAC_MII_REG_MASK) 458 1.61 msaitoh | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK) 459 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE; 460 1.1 martin 461 1.1 martin mutex_enter(&sc->sc_mdio_lock); 462 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val); 463 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii); 464 1.1 martin 465 1.1 martin for (cnt = 0; cnt < 1000; cnt++) { 466 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, 467 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) 468 1.1 martin break; 469 1.1 martin delay(10); 470 1.1 martin } 471 1.38 skrll 472 1.1 martin mutex_exit(&sc->sc_mdio_lock); 473 1.56 msaitoh 474 1.56 msaitoh if (cnt >= 1000) 475 1.56 msaitoh return ETIMEDOUT; 476 1.56 msaitoh 477 1.56 msaitoh return 0; 478 1.1 martin } 479 1.1 martin 480 1.1 martin static int 481 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, 482 1.1 martin struct dwc_gmac_rx_ring *ring) 483 1.1 martin { 484 1.1 martin struct dwc_gmac_rx_data *data; 485 1.1 martin bus_addr_t physaddr; 486 1.89 skrll const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc); 487 1.1 martin int error, i, next; 488 1.1 martin 489 1.1 martin ring->r_cur = ring->r_next = 0; 490 1.89 skrll memset(ring->r_desc, 0, rxringsz); 491 1.1 martin 492 1.1 martin /* 493 1.1 martin * Pre-allocate Rx buffers and populate Rx ring. 494 1.1 martin */ 495 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) { 496 1.1 martin struct dwc_gmac_dev_dmadesc *desc; 497 1.1 martin 498 1.1 martin data = &sc->sc_rxq.r_data[i]; 499 1.1 martin 500 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA); 501 1.1 martin if (data->rd_m == NULL) { 502 1.1 martin aprint_error_dev(sc->sc_dev, 503 1.1 martin "could not allocate rx mbuf #%d\n", i); 504 1.1 martin error = ENOMEM; 505 1.1 martin goto fail; 506 1.1 martin } 507 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 508 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map); 509 1.1 martin if (error != 0) { 510 1.1 martin aprint_error_dev(sc->sc_dev, 511 1.1 martin "could not create DMA map\n"); 512 1.1 martin data->rd_map = NULL; 513 1.1 martin goto fail; 514 1.1 martin } 515 1.1 martin MCLGET(data->rd_m, M_DONTWAIT); 516 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) { 517 1.1 martin aprint_error_dev(sc->sc_dev, 518 1.1 martin "could not allocate mbuf cluster #%d\n", i); 519 1.1 martin error = ENOMEM; 520 1.1 martin goto fail; 521 1.1 martin } 522 1.66 tnn data->rd_m->m_len = data->rd_m->m_pkthdr.len 523 1.66 tnn = data->rd_m->m_ext.ext_size; 524 1.96 jakllsch m_adj(data->rd_m, ETHER_ALIGN); 525 1.66 tnn if (data->rd_m->m_len > AWGE_MAX_PACKET) { 526 1.66 tnn data->rd_m->m_len = data->rd_m->m_pkthdr.len 527 1.66 tnn = AWGE_MAX_PACKET; 528 1.66 tnn } 529 1.1 martin 530 1.66 tnn error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map, 531 1.66 tnn data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT); 532 1.1 martin if (error != 0) { 533 1.1 martin aprint_error_dev(sc->sc_dev, 534 1.1 martin "could not load rx buf DMA map #%d", i); 535 1.1 martin goto fail; 536 1.1 martin } 537 1.66 tnn bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0, 538 1.66 tnn data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 539 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr; 540 1.1 martin 541 1.1 martin desc = &sc->sc_rxq.r_desc[i]; 542 1.1 martin desc->ddesc_data = htole32(physaddr); 543 1.8 martin next = RX_NEXT(i); 544 1.38 skrll desc->ddesc_next = htole32(ring->r_physaddr 545 1.1 martin + next * sizeof(*desc)); 546 1.55 martin sc->sc_descm->rx_init_flags(desc); 547 1.66 tnn sc->sc_descm->rx_set_len(desc, data->rd_m->m_len); 548 1.55 martin sc->sc_descm->rx_set_owned_by_dev(desc); 549 1.1 martin } 550 1.1 martin 551 1.89 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 552 1.89 skrll RX_DESC_OFFSET(0), 553 1.82 skrll AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc), 554 1.85 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 555 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, 556 1.6 martin ring->r_physaddr); 557 1.1 martin 558 1.1 martin return 0; 559 1.1 martin 560 1.1 martin fail: 561 1.1 martin dwc_gmac_free_rx_ring(sc, ring); 562 1.1 martin return error; 563 1.1 martin } 564 1.1 martin 565 1.1 martin static void 566 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, 567 1.1 martin struct dwc_gmac_rx_ring *ring) 568 1.1 martin { 569 1.1 martin struct dwc_gmac_dev_dmadesc *desc; 570 1.66 tnn struct dwc_gmac_rx_data *data; 571 1.1 martin int i; 572 1.1 martin 573 1.38 skrll mutex_enter(&ring->r_mtx); 574 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) { 575 1.1 martin desc = &sc->sc_rxq.r_desc[i]; 576 1.66 tnn data = &sc->sc_rxq.r_data[i]; 577 1.55 martin sc->sc_descm->rx_init_flags(desc); 578 1.66 tnn sc->sc_descm->rx_set_len(desc, data->rd_m->m_len); 579 1.55 martin sc->sc_descm->rx_set_owned_by_dev(desc); 580 1.1 martin } 581 1.1 martin 582 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0, 583 1.82 skrll AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc), 584 1.61 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 585 1.1 martin 586 1.1 martin ring->r_cur = ring->r_next = 0; 587 1.11 martin /* reset DMA address to start of ring */ 588 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, 589 1.11 martin sc->sc_rxq.r_physaddr); 590 1.38 skrll mutex_exit(&ring->r_mtx); 591 1.1 martin } 592 1.1 martin 593 1.1 martin static int 594 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc) 595 1.1 martin { 596 1.89 skrll const size_t ringsize = AWGE_TOTAL_RING_COUNT * 597 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc); 598 1.1 martin int error, nsegs; 599 1.1 martin void *rings; 600 1.1 martin 601 1.89 skrll error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0, 602 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map); 603 1.1 martin if (error != 0) { 604 1.1 martin aprint_error_dev(sc->sc_dev, 605 1.1 martin "could not create desc DMA map\n"); 606 1.1 martin sc->sc_dma_ring_map = NULL; 607 1.1 martin goto fail; 608 1.1 martin } 609 1.1 martin 610 1.89 skrll error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0, 611 1.61 msaitoh &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT); 612 1.1 martin if (error != 0) { 613 1.1 martin aprint_error_dev(sc->sc_dev, 614 1.1 martin "could not map DMA memory\n"); 615 1.1 martin goto fail; 616 1.1 martin } 617 1.1 martin 618 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs, 619 1.89 skrll ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 620 1.1 martin if (error != 0) { 621 1.1 martin aprint_error_dev(sc->sc_dev, 622 1.1 martin "could not allocate DMA memory\n"); 623 1.1 martin goto fail; 624 1.1 martin } 625 1.1 martin 626 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings, 627 1.89 skrll ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 628 1.1 martin if (error != 0) { 629 1.1 martin aprint_error_dev(sc->sc_dev, 630 1.1 martin "could not load desc DMA map\n"); 631 1.1 martin goto fail; 632 1.1 martin } 633 1.1 martin 634 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */ 635 1.1 martin sc->sc_rxq.r_desc = rings; 636 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr; 637 1.1 martin 638 1.1 martin /* and next rings to the TX side */ 639 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT; 640 1.38 skrll sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr + 641 1.82 skrll AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc); 642 1.1 martin 643 1.1 martin return 0; 644 1.1 martin 645 1.1 martin fail: 646 1.1 martin dwc_gmac_free_dma_rings(sc); 647 1.1 martin return error; 648 1.1 martin } 649 1.1 martin 650 1.1 martin static void 651 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc) 652 1.1 martin { 653 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0, 654 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 655 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map); 656 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc, 657 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc)); 658 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1); 659 1.1 martin } 660 1.1 martin 661 1.1 martin static void 662 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring) 663 1.1 martin { 664 1.1 martin struct dwc_gmac_rx_data *data; 665 1.1 martin int i; 666 1.1 martin 667 1.1 martin if (ring->r_desc == NULL) 668 1.1 martin return; 669 1.1 martin 670 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) { 671 1.1 martin data = &ring->r_data[i]; 672 1.1 martin 673 1.1 martin if (data->rd_map != NULL) { 674 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0, 675 1.1 martin AWGE_RX_RING_COUNT 676 1.82 skrll * sizeof(struct dwc_gmac_dev_dmadesc), 677 1.1 martin BUS_DMASYNC_POSTREAD); 678 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map); 679 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map); 680 1.1 martin } 681 1.88 rin m_freem(data->rd_m); 682 1.1 martin } 683 1.1 martin } 684 1.1 martin 685 1.1 martin static int 686 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, 687 1.1 martin struct dwc_gmac_tx_ring *ring) 688 1.1 martin { 689 1.1 martin int i, error = 0; 690 1.1 martin 691 1.1 martin ring->t_queued = 0; 692 1.1 martin ring->t_cur = ring->t_next = 0; 693 1.1 martin 694 1.82 skrll memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc)); 695 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 696 1.1 martin TX_DESC_OFFSET(0), 697 1.82 skrll AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc), 698 1.89 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 699 1.1 martin 700 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) { 701 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 702 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0, 703 1.61 msaitoh BUS_DMA_NOWAIT | BUS_DMA_COHERENT, 704 1.1 martin &ring->t_data[i].td_map); 705 1.1 martin if (error != 0) { 706 1.1 martin aprint_error_dev(sc->sc_dev, 707 1.1 martin "could not create TX DMA map #%d\n", i); 708 1.1 martin ring->t_data[i].td_map = NULL; 709 1.1 martin goto fail; 710 1.1 martin } 711 1.1 martin ring->t_desc[i].ddesc_next = htole32( 712 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc) 713 1.87 skrll * TX_NEXT(i)); 714 1.1 martin } 715 1.89 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 716 1.89 skrll TX_DESC_OFFSET(0), 717 1.89 skrll AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc), 718 1.89 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 719 1.1 martin 720 1.1 martin return 0; 721 1.1 martin 722 1.1 martin fail: 723 1.1 martin dwc_gmac_free_tx_ring(sc, ring); 724 1.1 martin return error; 725 1.1 martin } 726 1.1 martin 727 1.1 martin static void 728 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops) 729 1.1 martin { 730 1.64 mrg /* 'end' is pointing one descriptor beyond the last we want to sync */ 731 1.1 martin if (end > start) { 732 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 733 1.1 martin TX_DESC_OFFSET(start), 734 1.84 skrll TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start), 735 1.1 martin ops); 736 1.1 martin return; 737 1.1 martin } 738 1.1 martin /* sync from 'start' to end of ring */ 739 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 740 1.1 martin TX_DESC_OFFSET(start), 741 1.84 skrll TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start), 742 1.1 martin ops); 743 1.47 jmcneill if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) { 744 1.47 jmcneill /* sync from start of ring to 'end' */ 745 1.47 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 746 1.47 jmcneill TX_DESC_OFFSET(0), 747 1.84 skrll TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0), 748 1.47 jmcneill ops); 749 1.47 jmcneill } 750 1.1 martin } 751 1.1 martin 752 1.1 martin static void 753 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, 754 1.1 martin struct dwc_gmac_tx_ring *ring) 755 1.1 martin { 756 1.1 martin int i; 757 1.1 martin 758 1.38 skrll mutex_enter(&ring->t_mtx); 759 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) { 760 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i]; 761 1.1 martin 762 1.1 martin if (data->td_m != NULL) { 763 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 764 1.1 martin 0, data->td_active->dm_mapsize, 765 1.1 martin BUS_DMASYNC_POSTWRITE); 766 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active); 767 1.1 martin m_freem(data->td_m); 768 1.1 martin data->td_m = NULL; 769 1.1 martin } 770 1.1 martin } 771 1.1 martin 772 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 773 1.1 martin TX_DESC_OFFSET(0), 774 1.82 skrll AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc), 775 1.61 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 776 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR, 777 1.6 martin sc->sc_txq.t_physaddr); 778 1.1 martin 779 1.1 martin ring->t_queued = 0; 780 1.1 martin ring->t_cur = ring->t_next = 0; 781 1.38 skrll mutex_exit(&ring->t_mtx); 782 1.1 martin } 783 1.1 martin 784 1.1 martin static void 785 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, 786 1.1 martin struct dwc_gmac_tx_ring *ring) 787 1.1 martin { 788 1.1 martin int i; 789 1.1 martin 790 1.1 martin /* unload the maps */ 791 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) { 792 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i]; 793 1.1 martin 794 1.1 martin if (data->td_m != NULL) { 795 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 796 1.1 martin 0, data->td_map->dm_mapsize, 797 1.1 martin BUS_DMASYNC_POSTWRITE); 798 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active); 799 1.1 martin m_freem(data->td_m); 800 1.1 martin data->td_m = NULL; 801 1.1 martin } 802 1.1 martin } 803 1.1 martin 804 1.1 martin /* and actually free them */ 805 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) { 806 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i]; 807 1.1 martin 808 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map); 809 1.1 martin } 810 1.1 martin } 811 1.1 martin 812 1.1 martin static void 813 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp) 814 1.1 martin { 815 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc; 816 1.1 martin struct mii_data * const mii = &sc->sc_mii; 817 1.25 jmcneill uint32_t conf, flow; 818 1.1 martin 819 1.1 martin /* 820 1.1 martin * Set MII or GMII interface based on the speed 821 1.38 skrll * negotiated by the PHY. 822 1.9 martin */ 823 1.9 martin conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF); 824 1.61 msaitoh conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL 825 1.61 msaitoh | AWIN_GMAC_MAC_CONF_FULLDPLX); 826 1.11 martin conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST 827 1.11 martin | AWIN_GMAC_MAC_CONF_DISABLERXOWN 828 1.25 jmcneill | AWIN_GMAC_MAC_CONF_DISABLEJABBER 829 1.11 martin | AWIN_GMAC_MAC_CONF_RXENABLE 830 1.11 martin | AWIN_GMAC_MAC_CONF_TXENABLE; 831 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) { 832 1.1 martin case IFM_10_T: 833 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_MIISEL; 834 1.9 martin break; 835 1.1 martin case IFM_100_TX: 836 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_FES100 | 837 1.12 jmcneill AWIN_GMAC_MAC_CONF_MIISEL; 838 1.1 martin break; 839 1.1 martin case IFM_1000_T: 840 1.1 martin break; 841 1.1 martin } 842 1.46 jmcneill if (sc->sc_set_speed) 843 1.46 jmcneill sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active)); 844 1.25 jmcneill 845 1.25 jmcneill flow = 0; 846 1.25 jmcneill if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 847 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FULLDPLX; 848 1.25 jmcneill flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE); 849 1.25 jmcneill } 850 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_TXPAUSE) { 851 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE; 852 1.25 jmcneill } 853 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_RXPAUSE) { 854 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE; 855 1.25 jmcneill } 856 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, 857 1.25 jmcneill AWIN_GMAC_MAC_FLOWCTRL, flow); 858 1.9 martin 859 1.9 martin #ifdef DWC_GMAC_DEBUG 860 1.9 martin aprint_normal_dev(sc->sc_dev, 861 1.9 martin "setting MAC conf register: %08x\n", conf); 862 1.9 martin #endif 863 1.9 martin 864 1.9 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, 865 1.9 martin AWIN_GMAC_MAC_CONF, conf); 866 1.1 martin } 867 1.1 martin 868 1.1 martin static int 869 1.1 martin dwc_gmac_init(struct ifnet *ifp) 870 1.1 martin { 871 1.92 skrll struct dwc_gmac_softc * const sc = ifp->if_softc; 872 1.13 jmcneill uint32_t ffilt; 873 1.1 martin 874 1.93 skrll ASSERT_SLEEPABLE(); 875 1.93 skrll KASSERT(IFNET_LOCKED(ifp)); 876 1.93 skrll KASSERT(ifp == &sc->sc_ec.ec_if); 877 1.1 martin 878 1.94 riastrad dwc_gmac_stop(ifp, 0); 879 1.1 martin 880 1.1 martin /* 881 1.11 martin * Configure DMA burst/transfer mode and RX/TX priorities. 882 1.11 martin * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented. 883 1.11 martin */ 884 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE, 885 1.25 jmcneill GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL | 886 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_RPBL) | 887 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_PBL)); 888 1.11 martin 889 1.11 martin /* 890 1.13 jmcneill * Set up address filter 891 1.11 martin */ 892 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT); 893 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) { 894 1.13 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_PR; 895 1.20 jmcneill } else { 896 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_PR; 897 1.20 jmcneill } 898 1.20 jmcneill if (ifp->if_flags & IFF_BROADCAST) { 899 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF; 900 1.20 jmcneill } else { 901 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_DBF; 902 1.20 jmcneill } 903 1.13 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt); 904 1.11 martin 905 1.11 martin /* 906 1.20 jmcneill * Set up multicast filter 907 1.20 jmcneill */ 908 1.94 riastrad mutex_enter(sc->sc_mcast_lock); 909 1.20 jmcneill dwc_gmac_setmulti(sc); 910 1.94 riastrad mutex_exit(sc->sc_mcast_lock); 911 1.20 jmcneill 912 1.20 jmcneill /* 913 1.6 martin * Set up dma pointer for RX and TX ring 914 1.1 martin */ 915 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, 916 1.6 martin sc->sc_rxq.r_physaddr); 917 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR, 918 1.6 martin sc->sc_txq.t_physaddr); 919 1.6 martin 920 1.6 martin /* 921 1.10 martin * Start RX/TX part 922 1.6 martin */ 923 1.46 jmcneill uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART; 924 1.46 jmcneill if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) { 925 1.46 jmcneill opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD; 926 1.46 jmcneill } 927 1.46 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode); 928 1.90 skrll #ifdef DWC_GMAC_DEBUG 929 1.90 skrll aprint_normal_dev(sc->sc_dev, 930 1.90 skrll "setting DMA opmode register: %08x\n", opmode); 931 1.90 skrll #endif 932 1.1 martin 933 1.93 skrll ifp->if_flags |= IFF_RUNNING; 934 1.93 skrll sc->sc_if_flags = ifp->if_flags; 935 1.93 skrll 936 1.93 skrll mutex_enter(sc->sc_intr_lock); 937 1.38 skrll sc->sc_stopping = false; 938 1.94 riastrad mutex_exit(sc->sc_intr_lock); 939 1.38 skrll 940 1.93 skrll mutex_enter(&sc->sc_txq.t_mtx); 941 1.78 thorpej sc->sc_txbusy = false; 942 1.93 skrll mutex_exit(&sc->sc_txq.t_mtx); 943 1.1 martin 944 1.1 martin return 0; 945 1.1 martin } 946 1.1 martin 947 1.1 martin static void 948 1.1 martin dwc_gmac_start(struct ifnet *ifp) 949 1.1 martin { 950 1.92 skrll struct dwc_gmac_softc * const sc = ifp->if_softc; 951 1.43 ozaki KASSERT(if_is_mpsafe(ifp)); 952 1.38 skrll 953 1.93 skrll mutex_enter(sc->sc_intr_lock); 954 1.38 skrll if (!sc->sc_stopping) { 955 1.38 skrll dwc_gmac_start_locked(ifp); 956 1.38 skrll } 957 1.93 skrll mutex_exit(sc->sc_intr_lock); 958 1.38 skrll } 959 1.38 skrll 960 1.38 skrll static void 961 1.38 skrll dwc_gmac_start_locked(struct ifnet *ifp) 962 1.38 skrll { 963 1.92 skrll struct dwc_gmac_softc * const sc = ifp->if_softc; 964 1.1 martin int old = sc->sc_txq.t_queued; 965 1.30 martin int start = sc->sc_txq.t_cur; 966 1.1 martin struct mbuf *m0; 967 1.1 martin 968 1.93 skrll KASSERT(mutex_owned(sc->sc_intr_lock)); 969 1.93 skrll 970 1.93 skrll mutex_enter(&sc->sc_txq.t_mtx); 971 1.93 skrll if (sc->sc_txbusy) { 972 1.93 skrll mutex_exit(&sc->sc_txq.t_mtx); 973 1.1 martin return; 974 1.93 skrll } 975 1.1 martin 976 1.1 martin for (;;) { 977 1.1 martin IFQ_POLL(&ifp->if_snd, m0); 978 1.1 martin if (m0 == NULL) 979 1.1 martin break; 980 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) { 981 1.78 thorpej sc->sc_txbusy = true; 982 1.1 martin break; 983 1.1 martin } 984 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0); 985 1.50 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT); 986 1.32 martin if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) { 987 1.78 thorpej sc->sc_txbusy = true; 988 1.32 martin break; 989 1.32 martin } 990 1.1 martin } 991 1.1 martin 992 1.1 martin if (sc->sc_txq.t_queued != old) { 993 1.1 martin /* packets have been queued, kick it off */ 994 1.30 martin dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur, 995 1.61 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 996 1.10 martin 997 1.10 martin #ifdef DWC_GMAC_DEBUG 998 1.10 martin dwc_dump_status(sc); 999 1.10 martin #endif 1000 1.55 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, 1001 1.55 martin AWIN_GMAC_DMA_TXPOLL, ~0U); 1002 1.1 martin } 1003 1.93 skrll mutex_exit(&sc->sc_txq.t_mtx); 1004 1.1 martin } 1005 1.1 martin 1006 1.1 martin static void 1007 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable) 1008 1.1 martin { 1009 1.92 skrll struct dwc_gmac_softc * const sc = ifp->if_softc; 1010 1.1 martin 1011 1.94 riastrad ASSERT_SLEEPABLE(); 1012 1.94 riastrad KASSERT(IFNET_LOCKED(ifp)); 1013 1.38 skrll 1014 1.94 riastrad ifp->if_flags &= ~IFF_RUNNING; 1015 1.38 skrll 1016 1.94 riastrad mutex_enter(sc->sc_mcast_lock); 1017 1.94 riastrad sc->sc_if_flags = ifp->if_flags; 1018 1.94 riastrad mutex_exit(sc->sc_mcast_lock); 1019 1.93 skrll 1020 1.93 skrll mutex_enter(sc->sc_intr_lock); 1021 1.38 skrll sc->sc_stopping = true; 1022 1.94 riastrad mutex_exit(sc->sc_intr_lock); 1023 1.38 skrll 1024 1.93 skrll mutex_enter(&sc->sc_txq.t_mtx); 1025 1.93 skrll sc->sc_txbusy = false; 1026 1.93 skrll mutex_exit(&sc->sc_txq.t_mtx); 1027 1.93 skrll 1028 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, 1029 1.6 martin AWIN_GMAC_DMA_OPMODE, 1030 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, 1031 1.62 msaitoh AWIN_GMAC_DMA_OPMODE) 1032 1.61 msaitoh & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART)); 1033 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, 1034 1.6 martin AWIN_GMAC_DMA_OPMODE, 1035 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, 1036 1.62 msaitoh AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX); 1037 1.6 martin 1038 1.1 martin mii_down(&sc->sc_mii); 1039 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq); 1040 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq); 1041 1.1 martin } 1042 1.1 martin 1043 1.1 martin /* 1044 1.1 martin * Add m0 to the TX ring 1045 1.1 martin */ 1046 1.1 martin static int 1047 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0) 1048 1.1 martin { 1049 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL; 1050 1.1 martin struct dwc_gmac_tx_data *data = NULL; 1051 1.1 martin bus_dmamap_t map; 1052 1.1 martin int error, i, first; 1053 1.1 martin 1054 1.8 martin #ifdef DWC_GMAC_DEBUG 1055 1.8 martin aprint_normal_dev(sc->sc_dev, 1056 1.8 martin "dwc_gmac_queue: adding mbuf chain %p\n", m0); 1057 1.8 martin #endif 1058 1.8 martin 1059 1.1 martin first = sc->sc_txq.t_cur; 1060 1.1 martin map = sc->sc_txq.t_data[first].td_map; 1061 1.1 martin 1062 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, 1063 1.61 msaitoh BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1064 1.1 martin if (error != 0) { 1065 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf " 1066 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error); 1067 1.1 martin return error; 1068 1.1 martin } 1069 1.1 martin 1070 1.32 martin if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) { 1071 1.1 martin bus_dmamap_unload(sc->sc_dmat, map); 1072 1.1 martin return ENOBUFS; 1073 1.1 martin } 1074 1.1 martin 1075 1.1 martin for (i = 0; i < map->dm_nsegs; i++) { 1076 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur]; 1077 1.8 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur]; 1078 1.8 martin 1079 1.8 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr); 1080 1.7 martin 1081 1.7 martin #ifdef DWC_GMAC_DEBUG 1082 1.81 skrll aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx " 1083 1.55 martin "len %lu\n", sc->sc_txq.t_cur, 1084 1.7 martin (unsigned long)map->dm_segs[i].ds_addr, 1085 1.55 martin (unsigned long)map->dm_segs[i].ds_len); 1086 1.7 martin #endif 1087 1.7 martin 1088 1.55 martin sc->sc_descm->tx_init_flags(desc); 1089 1.55 martin sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len); 1090 1.55 martin 1091 1.55 martin if (i == 0) 1092 1.55 martin sc->sc_descm->tx_set_first_frag(desc); 1093 1.1 martin 1094 1.1 martin /* 1095 1.1 martin * Defer passing ownership of the first descriptor 1096 1.23 joerg * until we are done. 1097 1.1 martin */ 1098 1.55 martin if (i != 0) 1099 1.55 martin sc->sc_descm->tx_set_owned_by_dev(desc); 1100 1.8 martin 1101 1.6 martin sc->sc_txq.t_queued++; 1102 1.8 martin sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur); 1103 1.1 martin } 1104 1.1 martin 1105 1.55 martin sc->sc_descm->tx_set_last_frag(desc); 1106 1.1 martin 1107 1.1 martin data->td_m = m0; 1108 1.1 martin data->td_active = map; 1109 1.1 martin 1110 1.89 skrll /* sync the packet buffer */ 1111 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1112 1.34 jmcneill BUS_DMASYNC_PREWRITE); 1113 1.1 martin 1114 1.89 skrll /* sync the new descriptors - ownership not transferred yet */ 1115 1.89 skrll dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur, 1116 1.89 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1117 1.89 skrll 1118 1.32 martin /* Pass first to device */ 1119 1.55 martin sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]); 1120 1.55 martin 1121 1.1 martin return 0; 1122 1.1 martin } 1123 1.1 martin 1124 1.22 martin /* 1125 1.22 martin * If the interface is up and running, only modify the receive 1126 1.22 martin * filter when setting promiscuous or debug mode. Otherwise fall 1127 1.22 martin * through to ether_ioctl, which will reset the chip. 1128 1.22 martin */ 1129 1.22 martin static int 1130 1.22 martin dwc_gmac_ifflags_cb(struct ethercom *ec) 1131 1.22 martin { 1132 1.92 skrll struct ifnet * const ifp = &ec->ec_if; 1133 1.92 skrll struct dwc_gmac_softc * const sc = ifp->if_softc; 1134 1.38 skrll int ret = 0; 1135 1.38 skrll 1136 1.93 skrll KASSERT(IFNET_LOCKED(ifp)); 1137 1.94 riastrad mutex_enter(sc->sc_mcast_lock); 1138 1.93 skrll 1139 1.65 msaitoh u_short change = ifp->if_flags ^ sc->sc_if_flags; 1140 1.38 skrll sc->sc_if_flags = ifp->if_flags; 1141 1.22 martin 1142 1.61 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 1143 1.38 skrll ret = ENETRESET; 1144 1.93 skrll } else if ((change & IFF_PROMISC) != 0) { 1145 1.22 martin dwc_gmac_setmulti(sc); 1146 1.38 skrll } 1147 1.93 skrll 1148 1.94 riastrad mutex_exit(sc->sc_mcast_lock); 1149 1.38 skrll 1150 1.38 skrll return ret; 1151 1.22 martin } 1152 1.22 martin 1153 1.1 martin static int 1154 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1155 1.1 martin { 1156 1.92 skrll struct dwc_gmac_softc * const sc = ifp->if_softc; 1157 1.38 skrll int error = 0; 1158 1.38 skrll 1159 1.93 skrll switch (cmd) { 1160 1.93 skrll case SIOCADDMULTI: 1161 1.93 skrll case SIOCDELMULTI: 1162 1.93 skrll break; 1163 1.93 skrll default: 1164 1.93 skrll KASSERT(IFNET_LOCKED(ifp)); 1165 1.93 skrll } 1166 1.93 skrll 1167 1.93 skrll const int s = splnet(); 1168 1.38 skrll error = ether_ioctl(ifp, cmd, data); 1169 1.38 skrll splx(s); 1170 1.1 martin 1171 1.38 skrll if (error == ENETRESET) { 1172 1.1 martin error = 0; 1173 1.93 skrll if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1174 1.94 riastrad mutex_enter(sc->sc_mcast_lock); 1175 1.93 skrll if (sc->sc_if_flags & IFF_RUNNING) { 1176 1.93 skrll /* 1177 1.93 skrll * Multicast list has changed; set the hardware 1178 1.93 skrll * filter accordingly. 1179 1.93 skrll */ 1180 1.93 skrll dwc_gmac_setmulti(sc); 1181 1.93 skrll } 1182 1.94 riastrad mutex_exit(sc->sc_mcast_lock); 1183 1.22 martin } 1184 1.1 martin } 1185 1.1 martin 1186 1.1 martin return error; 1187 1.1 martin } 1188 1.1 martin 1189 1.8 martin static void 1190 1.8 martin dwc_gmac_tx_intr(struct dwc_gmac_softc *sc) 1191 1.8 martin { 1192 1.92 skrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 1193 1.8 martin struct dwc_gmac_tx_data *data; 1194 1.8 martin struct dwc_gmac_dev_dmadesc *desc; 1195 1.32 martin int i, nsegs; 1196 1.8 martin 1197 1.38 skrll mutex_enter(&sc->sc_txq.t_mtx); 1198 1.38 skrll 1199 1.32 martin for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) { 1200 1.8 martin #ifdef DWC_GMAC_DEBUG 1201 1.8 martin aprint_normal_dev(sc->sc_dev, 1202 1.90 skrll "%s: checking desc #%d (t_queued: %d)\n", __func__, 1203 1.8 martin i, sc->sc_txq.t_queued); 1204 1.8 martin #endif 1205 1.8 martin 1206 1.26 martin /* 1207 1.82 skrll * i + 1 does not need to be a valid descriptor, 1208 1.26 martin * this is just a special notion to just sync 1209 1.26 martin * a single tx descriptor (i) 1210 1.26 martin */ 1211 1.82 skrll dwc_gmac_txdesc_sync(sc, i, i + 1, 1212 1.61 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1213 1.11 martin 1214 1.32 martin desc = &sc->sc_txq.t_desc[i]; 1215 1.55 martin if (sc->sc_descm->tx_is_owned_by_dev(desc)) 1216 1.8 martin break; 1217 1.11 martin 1218 1.8 martin data = &sc->sc_txq.t_data[i]; 1219 1.8 martin if (data->td_m == NULL) 1220 1.8 martin continue; 1221 1.32 martin 1222 1.69 thorpej if_statinc(ifp, if_opackets); 1223 1.32 martin nsegs = data->td_active->dm_nsegs; 1224 1.8 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 0, 1225 1.8 martin data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1226 1.8 martin bus_dmamap_unload(sc->sc_dmat, data->td_active); 1227 1.8 martin 1228 1.8 martin #ifdef DWC_GMAC_DEBUG 1229 1.8 martin aprint_normal_dev(sc->sc_dev, 1230 1.90 skrll "%s: done with packet at desc #%d, freeing mbuf %p\n", 1231 1.90 skrll __func__, i, data->td_m); 1232 1.8 martin #endif 1233 1.8 martin 1234 1.8 martin m_freem(data->td_m); 1235 1.8 martin data->td_m = NULL; 1236 1.32 martin 1237 1.32 martin sc->sc_txq.t_queued -= nsegs; 1238 1.8 martin } 1239 1.8 martin 1240 1.8 martin sc->sc_txq.t_next = i; 1241 1.8 martin 1242 1.8 martin if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) { 1243 1.78 thorpej sc->sc_txbusy = false; 1244 1.8 martin } 1245 1.38 skrll mutex_exit(&sc->sc_txq.t_mtx); 1246 1.8 martin } 1247 1.8 martin 1248 1.8 martin static void 1249 1.8 martin dwc_gmac_rx_intr(struct dwc_gmac_softc *sc) 1250 1.8 martin { 1251 1.92 skrll struct ifnet * const ifp = &sc->sc_ec.ec_if; 1252 1.11 martin struct dwc_gmac_dev_dmadesc *desc; 1253 1.11 martin struct dwc_gmac_rx_data *data; 1254 1.11 martin bus_addr_t physaddr; 1255 1.11 martin struct mbuf *m, *mnew; 1256 1.11 martin int i, len, error; 1257 1.11 martin 1258 1.38 skrll mutex_enter(&sc->sc_rxq.r_mtx); 1259 1.11 martin for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) { 1260 1.90 skrll #ifdef DWC_GMAC_DEBUG 1261 1.90 skrll aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n", 1262 1.90 skrll __func__, i); 1263 1.90 skrll #endif 1264 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 1265 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc), 1266 1.61 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1267 1.11 martin desc = &sc->sc_rxq.r_desc[i]; 1268 1.11 martin data = &sc->sc_rxq.r_data[i]; 1269 1.11 martin 1270 1.55 martin if (sc->sc_descm->rx_is_owned_by_dev(desc)) 1271 1.11 martin break; 1272 1.11 martin 1273 1.55 martin if (sc->sc_descm->rx_has_error(desc)) { 1274 1.11 martin #ifdef DWC_GMAC_DEBUG 1275 1.15 martin aprint_normal_dev(sc->sc_dev, 1276 1.90 skrll "%s: RX error: status %08x, skipping\n", 1277 1.90 skrll __func__, le32toh(desc->ddesc_status0)); 1278 1.11 martin #endif 1279 1.69 thorpej if_statinc(ifp, if_ierrors); 1280 1.11 martin goto skip; 1281 1.11 martin } 1282 1.11 martin 1283 1.55 martin len = sc->sc_descm->rx_get_len(desc); 1284 1.11 martin 1285 1.11 martin #ifdef DWC_GMAC_DEBUG 1286 1.15 martin aprint_normal_dev(sc->sc_dev, 1287 1.90 skrll "%s: device is done with descriptor #%d, len: %d\n", 1288 1.90 skrll __func__, i, len); 1289 1.11 martin #endif 1290 1.11 martin 1291 1.11 martin /* 1292 1.11 martin * Try to get a new mbuf before passing this one 1293 1.11 martin * up, if that fails, drop the packet and reuse 1294 1.11 martin * the existing one. 1295 1.11 martin */ 1296 1.11 martin MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1297 1.11 martin if (mnew == NULL) { 1298 1.69 thorpej if_statinc(ifp, if_ierrors); 1299 1.11 martin goto skip; 1300 1.11 martin } 1301 1.11 martin MCLGET(mnew, M_DONTWAIT); 1302 1.11 martin if ((mnew->m_flags & M_EXT) == 0) { 1303 1.11 martin m_freem(mnew); 1304 1.69 thorpej if_statinc(ifp, if_ierrors); 1305 1.11 martin goto skip; 1306 1.11 martin } 1307 1.66 tnn mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size; 1308 1.96 jakllsch m_adj(mnew, ETHER_ALIGN); 1309 1.66 tnn if (mnew->m_len > AWGE_MAX_PACKET) { 1310 1.66 tnn mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET; 1311 1.66 tnn } 1312 1.11 martin 1313 1.11 martin /* unload old DMA map */ 1314 1.11 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0, 1315 1.11 martin data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1316 1.11 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map); 1317 1.11 martin 1318 1.11 martin /* and reload with new mbuf */ 1319 1.66 tnn error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map, 1320 1.66 tnn mnew, BUS_DMA_READ | BUS_DMA_NOWAIT); 1321 1.11 martin if (error != 0) { 1322 1.11 martin m_freem(mnew); 1323 1.11 martin /* try to reload old mbuf */ 1324 1.66 tnn error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map, 1325 1.66 tnn data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT); 1326 1.11 martin if (error != 0) { 1327 1.11 martin panic("%s: could not load old rx mbuf", 1328 1.11 martin device_xname(sc->sc_dev)); 1329 1.11 martin } 1330 1.69 thorpej if_statinc(ifp, if_ierrors); 1331 1.11 martin goto skip; 1332 1.11 martin } 1333 1.11 martin physaddr = data->rd_map->dm_segs[0].ds_addr; 1334 1.11 martin 1335 1.90 skrll #ifdef DWC_GMAC_DEBUG 1336 1.90 skrll aprint_normal_dev(sc->sc_dev, 1337 1.90 skrll "%s: receiving packet at desc #%d, using mbuf %p\n", 1338 1.90 skrll __func__, i, data->rd_m); 1339 1.90 skrll #endif 1340 1.11 martin /* 1341 1.11 martin * New mbuf loaded, update RX ring and continue 1342 1.11 martin */ 1343 1.11 martin m = data->rd_m; 1344 1.11 martin data->rd_m = mnew; 1345 1.11 martin desc->ddesc_data = htole32(physaddr); 1346 1.11 martin 1347 1.11 martin /* finalize mbuf */ 1348 1.11 martin m->m_pkthdr.len = m->m_len = len; 1349 1.36 ozaki m_set_rcvif(m, ifp); 1350 1.77 sekiya m->m_flags |= M_HASFCS; 1351 1.11 martin 1352 1.39 skrll if_percpuq_enqueue(sc->sc_ipq, m); 1353 1.11 martin 1354 1.11 martin skip: 1355 1.27 matt bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0, 1356 1.27 matt data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1357 1.55 martin 1358 1.55 martin sc->sc_descm->rx_init_flags(desc); 1359 1.66 tnn sc->sc_descm->rx_set_len(desc, data->rd_m->m_len); 1360 1.89 skrll 1361 1.89 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 1362 1.89 skrll RX_DESC_OFFSET(i), sizeof(*desc), 1363 1.89 skrll BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1364 1.89 skrll 1365 1.55 martin sc->sc_descm->rx_set_owned_by_dev(desc); 1366 1.55 martin 1367 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 1368 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc), 1369 1.61 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1370 1.11 martin } 1371 1.11 martin 1372 1.11 martin /* update RX pointer */ 1373 1.11 martin sc->sc_rxq.r_cur = i; 1374 1.11 martin 1375 1.38 skrll mutex_exit(&sc->sc_rxq.r_mtx); 1376 1.8 martin } 1377 1.8 martin 1378 1.20 jmcneill static void 1379 1.20 jmcneill dwc_gmac_setmulti(struct dwc_gmac_softc *sc) 1380 1.20 jmcneill { 1381 1.20 jmcneill struct ether_multi *enm; 1382 1.20 jmcneill struct ether_multistep step; 1383 1.59 ozaki struct ethercom *ec = &sc->sc_ec; 1384 1.20 jmcneill uint32_t hashes[2] = { 0, 0 }; 1385 1.22 martin uint32_t ffilt, h; 1386 1.38 skrll int mcnt; 1387 1.22 martin 1388 1.94 riastrad KASSERT(mutex_owned(sc->sc_mcast_lock)); 1389 1.20 jmcneill 1390 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT); 1391 1.38 skrll 1392 1.93 skrll if (sc->sc_if_flags & IFF_PROMISC) { 1393 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PR; 1394 1.22 martin goto special_filter; 1395 1.20 jmcneill } 1396 1.20 jmcneill 1397 1.61 msaitoh ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR); 1398 1.20 jmcneill 1399 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0); 1400 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0); 1401 1.20 jmcneill 1402 1.59 ozaki ETHER_LOCK(ec); 1403 1.60 ozaki ec->ec_flags &= ~ETHER_F_ALLMULTI; 1404 1.59 ozaki ETHER_FIRST_MULTI(step, ec, enm); 1405 1.20 jmcneill mcnt = 0; 1406 1.20 jmcneill while (enm != NULL) { 1407 1.20 jmcneill if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1408 1.22 martin ETHER_ADDR_LEN) != 0) { 1409 1.60 ozaki ffilt |= AWIN_GMAC_MAC_FFILT_PM; 1410 1.60 ozaki ec->ec_flags |= ETHER_F_ALLMULTI; 1411 1.59 ozaki ETHER_UNLOCK(ec); 1412 1.22 martin goto special_filter; 1413 1.22 martin } 1414 1.20 jmcneill 1415 1.86 jakllsch h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26; 1416 1.20 jmcneill hashes[h >> 5] |= (1 << (h & 0x1f)); 1417 1.20 jmcneill 1418 1.20 jmcneill mcnt++; 1419 1.20 jmcneill ETHER_NEXT_MULTI(step, enm); 1420 1.20 jmcneill } 1421 1.59 ozaki ETHER_UNLOCK(ec); 1422 1.20 jmcneill 1423 1.20 jmcneill if (mcnt) 1424 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_HMC; 1425 1.20 jmcneill else 1426 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC; 1427 1.20 jmcneill 1428 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt); 1429 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 1430 1.20 jmcneill hashes[0]); 1431 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 1432 1.20 jmcneill hashes[1]); 1433 1.22 martin 1434 1.22 martin #ifdef DWC_GMAC_DEBUG 1435 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt); 1436 1.22 martin #endif 1437 1.22 martin return; 1438 1.22 martin 1439 1.22 martin special_filter: 1440 1.22 martin #ifdef DWC_GMAC_DEBUG 1441 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt); 1442 1.22 martin #endif 1443 1.22 martin /* no MAC hashes, ALLMULTI or PROMISC */ 1444 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, 1445 1.22 martin ffilt); 1446 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 1447 1.22 martin 0xffffffff); 1448 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 1449 1.22 martin 0xffffffff); 1450 1.20 jmcneill } 1451 1.20 jmcneill 1452 1.1 martin int 1453 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc) 1454 1.1 martin { 1455 1.1 martin uint32_t status, dma_status; 1456 1.8 martin int rv = 0; 1457 1.1 martin 1458 1.93 skrll mutex_enter(sc->sc_intr_lock); 1459 1.93 skrll if (sc->sc_stopping) { 1460 1.93 skrll mutex_exit(sc->sc_intr_lock); 1461 1.38 skrll return 0; 1462 1.93 skrll } 1463 1.38 skrll 1464 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR); 1465 1.2 martin if (status & AWIN_GMAC_MII_IRQ) { 1466 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh, 1467 1.1 martin AWIN_GMAC_MII_STATUS); 1468 1.8 martin rv = 1; 1469 1.2 martin mii_pollstat(&sc->sc_mii); 1470 1.2 martin } 1471 1.1 martin 1472 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 1473 1.1 martin AWIN_GMAC_DMA_STATUS); 1474 1.1 martin 1475 1.61 msaitoh if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE)) 1476 1.8 martin rv = 1; 1477 1.1 martin 1478 1.8 martin if (dma_status & GMAC_DMA_INT_TIE) 1479 1.8 martin dwc_gmac_tx_intr(sc); 1480 1.1 martin 1481 1.8 martin if (dma_status & GMAC_DMA_INT_RIE) 1482 1.8 martin dwc_gmac_rx_intr(sc); 1483 1.8 martin 1484 1.8 martin /* 1485 1.8 martin * Check error conditions 1486 1.8 martin */ 1487 1.8 martin if (dma_status & GMAC_DMA_INT_ERRORS) { 1488 1.69 thorpej if_statinc(&sc->sc_ec.ec_if, if_oerrors); 1489 1.8 martin #ifdef DWC_GMAC_DEBUG 1490 1.8 martin dwc_dump_and_abort(sc, "interrupt error condition"); 1491 1.8 martin #endif 1492 1.8 martin } 1493 1.8 martin 1494 1.63 msaitoh rnd_add_uint32(&sc->rnd_source, dma_status); 1495 1.63 msaitoh 1496 1.8 martin /* ack interrupt */ 1497 1.8 martin if (dma_status) 1498 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, 1499 1.8 martin AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK); 1500 1.8 martin 1501 1.28 martin /* 1502 1.28 martin * Get more packets 1503 1.28 martin */ 1504 1.28 martin if (rv) 1505 1.40 ozaki if_schedule_deferred_start(&sc->sc_ec.ec_if); 1506 1.28 martin 1507 1.93 skrll mutex_exit(sc->sc_intr_lock); 1508 1.93 skrll 1509 1.8 martin return rv; 1510 1.1 martin } 1511 1.7 martin 1512 1.55 martin static void 1513 1.55 martin dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc) 1514 1.55 martin { 1515 1.55 martin 1516 1.55 martin desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV); 1517 1.55 martin } 1518 1.55 martin 1519 1.55 martin static int 1520 1.55 martin dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc) 1521 1.55 martin { 1522 1.55 martin 1523 1.55 martin return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV); 1524 1.55 martin } 1525 1.55 martin 1526 1.55 martin static void 1527 1.55 martin dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len) 1528 1.55 martin { 1529 1.55 martin uint32_t cntl = le32toh(desc->ddesc_cntl1); 1530 1.55 martin 1531 1.55 martin desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) | 1532 1.55 martin __SHIFTIN(len, DDESC_CNTL_SIZE1MASK)); 1533 1.55 martin } 1534 1.55 martin 1535 1.55 martin static uint32_t 1536 1.55 martin dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc) 1537 1.55 martin { 1538 1.55 martin 1539 1.55 martin return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK); 1540 1.55 martin } 1541 1.55 martin 1542 1.55 martin static void 1543 1.55 martin dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc) 1544 1.55 martin { 1545 1.55 martin 1546 1.55 martin desc->ddesc_status0 = 0; 1547 1.55 martin desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN); 1548 1.55 martin } 1549 1.55 martin 1550 1.55 martin static void 1551 1.55 martin dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc) 1552 1.55 martin { 1553 1.55 martin uint32_t cntl = le32toh(desc->ddesc_cntl1); 1554 1.55 martin 1555 1.55 martin desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST); 1556 1.55 martin } 1557 1.55 martin 1558 1.55 martin static void 1559 1.55 martin dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc) 1560 1.55 martin { 1561 1.55 martin uint32_t cntl = le32toh(desc->ddesc_cntl1); 1562 1.55 martin 1563 1.55 martin desc->ddesc_cntl1 = htole32(cntl | 1564 1.55 martin DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT); 1565 1.55 martin } 1566 1.55 martin 1567 1.55 martin static void 1568 1.55 martin dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc) 1569 1.55 martin { 1570 1.55 martin 1571 1.55 martin desc->ddesc_status0 = 0; 1572 1.55 martin desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN); 1573 1.55 martin } 1574 1.55 martin 1575 1.55 martin static int 1576 1.55 martin dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) { 1577 1.55 martin return !!(le32toh(desc->ddesc_status0) & 1578 1.55 martin (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED)); 1579 1.55 martin } 1580 1.55 martin 1581 1.55 martin static void 1582 1.55 martin dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len) 1583 1.55 martin { 1584 1.55 martin uint32_t tdes1 = le32toh(desc->ddesc_cntl1); 1585 1.55 martin 1586 1.55 martin desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) | 1587 1.55 martin __SHIFTIN(len, DDESC_DES1_SIZE1MASK)); 1588 1.55 martin } 1589 1.55 martin 1590 1.55 martin static uint32_t 1591 1.55 martin dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc) 1592 1.55 martin { 1593 1.55 martin 1594 1.55 martin return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL); 1595 1.55 martin } 1596 1.55 martin 1597 1.55 martin static void 1598 1.55 martin dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc) 1599 1.55 martin { 1600 1.55 martin 1601 1.55 martin desc->ddesc_status0 = htole32(DDESC_TDES0_TCH); 1602 1.55 martin desc->ddesc_cntl1 = 0; 1603 1.55 martin } 1604 1.55 martin 1605 1.55 martin static void 1606 1.55 martin dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc) 1607 1.55 martin { 1608 1.55 martin uint32_t tdes0 = le32toh(desc->ddesc_status0); 1609 1.55 martin 1610 1.55 martin desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS); 1611 1.55 martin } 1612 1.55 martin 1613 1.55 martin static void 1614 1.55 martin dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc) 1615 1.55 martin { 1616 1.55 martin uint32_t tdes0 = le32toh(desc->ddesc_status0); 1617 1.55 martin 1618 1.55 martin desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC); 1619 1.55 martin } 1620 1.55 martin 1621 1.55 martin static void 1622 1.55 martin dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc) 1623 1.55 martin { 1624 1.55 martin 1625 1.55 martin desc->ddesc_status0 = 0; 1626 1.55 martin desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH); 1627 1.55 martin } 1628 1.55 martin 1629 1.55 martin static int 1630 1.55 martin dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) 1631 1.55 martin { 1632 1.55 martin 1633 1.55 martin return !!(le32toh(desc->ddesc_status0) & 1634 1.55 martin (DDESC_RDES0_ES | DDESC_RDES0_LE)); 1635 1.55 martin } 1636 1.55 martin 1637 1.7 martin #ifdef DWC_GMAC_DEBUG 1638 1.7 martin static void 1639 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc) 1640 1.7 martin { 1641 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n", 1642 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)); 1643 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n", 1644 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL)); 1645 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n", 1646 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL)); 1647 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n", 1648 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR)); 1649 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n", 1650 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR)); 1651 1.90 skrll aprint_normal_dev(sc->sc_dev, " status: %08x\n", 1652 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS)); 1653 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n", 1654 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE)); 1655 1.90 skrll aprint_normal_dev(sc->sc_dev, "int en.: %08x\n", 1656 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE)); 1657 1.90 skrll aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n", 1658 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC)); 1659 1.90 skrll aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n", 1660 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC)); 1661 1.90 skrll aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n", 1662 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR)); 1663 1.90 skrll aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n", 1664 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR)); 1665 1.7 martin } 1666 1.7 martin 1667 1.7 martin static void 1668 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc) 1669 1.7 martin { 1670 1.89 skrll const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc); 1671 1.7 martin 1672 1.8 martin aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n", 1673 1.8 martin sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued); 1674 1.8 martin aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n"); 1675 1.89 skrll 1676 1.89 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 1677 1.89 skrll TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz, 1678 1.89 skrll BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1679 1.89 skrll 1680 1.90 skrll for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) { 1681 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i]; 1682 1.90 skrll aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x " 1683 1.15 martin "data: %08x next: %08x\n", 1684 1.90 skrll i, sc->sc_txq.t_physaddr + i * descsz, 1685 1.55 martin le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1), 1686 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next)); 1687 1.7 martin } 1688 1.7 martin } 1689 1.8 martin 1690 1.8 martin static void 1691 1.11 martin dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc) 1692 1.11 martin { 1693 1.89 skrll const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc); 1694 1.11 martin 1695 1.11 martin aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n", 1696 1.11 martin sc->sc_rxq.r_cur, sc->sc_rxq.r_next); 1697 1.11 martin aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n"); 1698 1.89 skrll 1699 1.89 skrll bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 1700 1.89 skrll RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz, 1701 1.89 skrll BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1702 1.89 skrll 1703 1.90 skrll for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) { 1704 1.11 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i]; 1705 1.90 skrll char buf[200]; 1706 1.90 skrll 1707 1.90 skrll if (!sc->sc_descm->rx_is_owned_by_dev(desc)) { 1708 1.90 skrll /* print interrupt state */ 1709 1.90 skrll snprintb(buf, sizeof(buf), 1710 1.90 skrll "\177\20" 1711 1.90 skrll "b\x1e" "daff\0" 1712 1.90 skrll "f\x10\xe" "frlen\0" 1713 1.90 skrll "b\x0f" "error\0" 1714 1.90 skrll "b\x0e" "rxtrunc\0" /* descriptor error? */ 1715 1.90 skrll "b\x0d" "saff\0" 1716 1.90 skrll "b\x0c" "giantframe\0" /* length error? */ 1717 1.90 skrll "b\x0b" "damaged\0" 1718 1.90 skrll "b\x0a" "vlan\0" 1719 1.90 skrll "b\x09" "first\0" 1720 1.90 skrll "b\x08" "last\0" 1721 1.90 skrll "b\x07" "giant\0" 1722 1.90 skrll "b\x06" "collison\0" 1723 1.90 skrll "b\x05" "ether\0" 1724 1.90 skrll "b\x04" "watchdog\0" 1725 1.90 skrll "b\x03" "miierror\0" 1726 1.90 skrll "b\x02" "dribbling\0" 1727 1.90 skrll "b\x01" "crc\0" 1728 1.90 skrll "\0", le32toh(desc->ddesc_status0)); 1729 1.90 skrll } 1730 1.90 skrll 1731 1.90 skrll aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x " 1732 1.90 skrll "data: %08x next: %08x %s\n", 1733 1.90 skrll i, sc->sc_rxq.r_physaddr + i * descsz, 1734 1.55 martin le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1), 1735 1.90 skrll le32toh(desc->ddesc_data), le32toh(desc->ddesc_next), 1736 1.90 skrll sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf); 1737 1.11 martin } 1738 1.11 martin } 1739 1.11 martin 1740 1.11 martin static void 1741 1.10 martin dwc_dump_status(struct dwc_gmac_softc *sc) 1742 1.8 martin { 1743 1.8 martin uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 1744 1.87 skrll AWIN_GMAC_MAC_INTR); 1745 1.8 martin uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 1746 1.87 skrll AWIN_GMAC_DMA_STATUS); 1747 1.8 martin char buf[200]; 1748 1.8 martin 1749 1.8 martin /* print interrupt state */ 1750 1.90 skrll snprintb(buf, sizeof(buf), 1751 1.90 skrll "\177\20" 1752 1.90 skrll "b\x1c" "GPI\0" 1753 1.90 skrll "b\x1b" "GMC\0" 1754 1.90 skrll "b\x1a" "GLI\0" 1755 1.90 skrll "f\x17\x3" "EB\0" 1756 1.90 skrll "f\x14\x3" "TPS\0" 1757 1.90 skrll "f\x11\x3" "RPS\0" 1758 1.90 skrll "b\x10" "NI\0" 1759 1.90 skrll "b\x0f" "AI\0" 1760 1.90 skrll "b\x0e" "ER\0" 1761 1.90 skrll "b\x0d" "FB\0" 1762 1.90 skrll "b\x0a" "ET\0" 1763 1.90 skrll "b\x09" "RW\0" 1764 1.90 skrll "b\x08" "RS\0" 1765 1.90 skrll "b\x07" "RU\0" 1766 1.90 skrll "b\x06" "RI\0" 1767 1.90 skrll "b\x05" "UN\0" 1768 1.90 skrll "b\x04" "OV\0" 1769 1.90 skrll "b\x03" "TJ\0" 1770 1.90 skrll "b\x02" "TU\0" 1771 1.90 skrll "b\x01" "TS\0" 1772 1.90 skrll "b\x00" "TI\0" 1773 1.8 martin "\0", dma_status); 1774 1.10 martin aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n", 1775 1.8 martin status, buf); 1776 1.10 martin } 1777 1.8 martin 1778 1.10 martin static void 1779 1.10 martin dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg) 1780 1.10 martin { 1781 1.10 martin dwc_dump_status(sc); 1782 1.22 martin dwc_gmac_dump_ffilt(sc, 1783 1.22 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT)); 1784 1.8 martin dwc_gmac_dump_dma(sc); 1785 1.8 martin dwc_gmac_dump_tx_desc(sc); 1786 1.11 martin dwc_gmac_dump_rx_desc(sc); 1787 1.8 martin 1788 1.21 joerg panic("%s", msg); 1789 1.8 martin } 1790 1.22 martin 1791 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt) 1792 1.22 martin { 1793 1.22 martin char buf[200]; 1794 1.22 martin 1795 1.22 martin /* print filter setup */ 1796 1.22 martin snprintb(buf, sizeof(buf), "\177\20" 1797 1.22 martin "b\x1f""RA\0" 1798 1.22 martin "b\x0a""HPF\0" 1799 1.22 martin "b\x09""SAF\0" 1800 1.22 martin "b\x08""SAIF\0" 1801 1.22 martin "b\x05""DBF\0" 1802 1.22 martin "b\x04""PM\0" 1803 1.22 martin "b\x03""DAIF\0" 1804 1.22 martin "b\x02""HMC\0" 1805 1.22 martin "b\x01""HUC\0" 1806 1.22 martin "b\x00""PR\0" 1807 1.22 martin "\0", ffilt); 1808 1.22 martin aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf); 1809 1.22 martin } 1810 1.7 martin #endif 1811