1 1.61 rin /* $NetBSD: if_gfe.c,v 1.61 2024/07/05 04:31:51 rin Exp $ */ 2 1.1 matt 3 1.1 matt /* 4 1.1 matt * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. 5 1.1 matt * All rights reserved. 6 1.1 matt * 7 1.1 matt * Redistribution and use in source and binary forms, with or without 8 1.1 matt * modification, are permitted provided that the following conditions 9 1.1 matt * are met: 10 1.1 matt * 1. Redistributions of source code must retain the above copyright 11 1.1 matt * notice, this list of conditions and the following disclaimer. 12 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 matt * notice, this list of conditions and the following disclaimer in the 14 1.1 matt * documentation and/or other materials provided with the distribution. 15 1.1 matt * 3. All advertising materials mentioning features or use of this software 16 1.1 matt * must display the following acknowledgement: 17 1.1 matt * This product includes software developed for the NetBSD Project by 18 1.1 matt * Allegro Networks, Inc., and Wasabi Systems, Inc. 19 1.1 matt * 4. The name of Allegro Networks, Inc. may not be used to endorse 20 1.1 matt * or promote products derived from this software without specific prior 21 1.1 matt * written permission. 22 1.1 matt * 5. The name of Wasabi Systems, Inc. may not be used to endorse 23 1.1 matt * or promote products derived from this software without specific prior 24 1.1 matt * written permission. 25 1.1 matt * 26 1.1 matt * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND 27 1.1 matt * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, 28 1.1 matt * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY 29 1.1 matt * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 1.1 matt * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. 31 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 38 1.1 matt */ 39 1.1 matt 40 1.1 matt /* 41 1.1 matt * if_gfe.c -- GT ethernet MAC driver 42 1.1 matt */ 43 1.12 lukem 44 1.12 lukem #include <sys/cdefs.h> 45 1.61 rin __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.61 2024/07/05 04:31:51 rin Exp $"); 46 1.1 matt 47 1.1 matt #include "opt_inet.h" 48 1.1 matt 49 1.1 matt #include <sys/param.h> 50 1.36 kiyohara #include <sys/bus.h> 51 1.1 matt #include <sys/callout.h> 52 1.1 matt #include <sys/device.h> 53 1.1 matt #include <sys/errno.h> 54 1.1 matt #include <sys/ioctl.h> 55 1.1 matt #include <sys/mbuf.h> 56 1.36 kiyohara #include <sys/mutex.h> 57 1.1 matt #include <sys/socket.h> 58 1.1 matt 59 1.1 matt #include <net/if.h> 60 1.1 matt #include <net/if_dl.h> 61 1.1 matt #include <net/if_ether.h> 62 1.1 matt #include <net/if_media.h> 63 1.1 matt 64 1.1 matt #ifdef INET 65 1.1 matt #include <netinet/in.h> 66 1.1 matt #include <netinet/if_inarp.h> 67 1.1 matt #endif 68 1.1 matt #include <net/bpf.h> 69 1.45 riastrad #include <sys/rndsource.h> 70 1.1 matt 71 1.36 kiyohara #include <dev/mii/mii.h> 72 1.1 matt #include <dev/mii/miivar.h> 73 1.1 matt 74 1.36 kiyohara #include <dev/marvell/gtreg.h> 75 1.36 kiyohara #include <dev/marvell/gtvar.h> 76 1.1 matt #include <dev/marvell/gtethreg.h> 77 1.36 kiyohara #include <dev/marvell/if_gfevar.h> 78 1.36 kiyohara #include <dev/marvell/marvellreg.h> 79 1.36 kiyohara #include <dev/marvell/marvellvar.h> 80 1.36 kiyohara 81 1.36 kiyohara #include <prop/proplib.h> 82 1.36 kiyohara 83 1.36 kiyohara #include "locators.h" 84 1.1 matt 85 1.1 matt 86 1.1 matt #define GE_READ(sc, reg) \ 87 1.36 kiyohara bus_space_read_4((sc)->sc_memt, (sc)->sc_memh, (reg)) 88 1.1 matt #define GE_WRITE(sc, reg, v) \ 89 1.36 kiyohara bus_space_write_4((sc)->sc_memt, (sc)->sc_memh, (reg), (v)) 90 1.1 matt 91 1.1 matt #define GE_DEBUG 92 1.1 matt #if 0 93 1.1 matt #define GE_NOHASH 94 1.1 matt #define GE_NORX 95 1.1 matt #endif 96 1.1 matt 97 1.1 matt #ifdef GE_DEBUG 98 1.36 kiyohara #define GE_DPRINTF(sc, a) \ 99 1.36 kiyohara do { \ 100 1.36 kiyohara if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ 101 1.36 kiyohara printf a; \ 102 1.36 kiyohara } while (0 /* CONSTCOND */) 103 1.1 matt #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) 104 1.1 matt #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) 105 1.1 matt #else 106 1.1 matt #define GE_DPRINTF(sc, a) do { } while (0) 107 1.1 matt #define GE_FUNC_ENTER(sc, func) do { } while (0) 108 1.1 matt #define GE_FUNC_EXIT(sc, str) do { } while (0) 109 1.1 matt #endif 110 1.1 matt enum gfe_whack_op { 111 1.1 matt GE_WHACK_START, GE_WHACK_RESTART, 112 1.1 matt GE_WHACK_CHANGE, GE_WHACK_STOP 113 1.1 matt }; 114 1.1 matt 115 1.1 matt enum gfe_hash_op { 116 1.1 matt GE_HASH_ADD, GE_HASH_REMOVE, 117 1.1 matt }; 118 1.1 matt 119 1.2 matt #if 1 120 1.2 matt #define htogt32(a) htobe32(a) 121 1.2 matt #define gt32toh(a) be32toh(a) 122 1.2 matt #else 123 1.2 matt #define htogt32(a) htole32(a) 124 1.2 matt #define gt32toh(a) le32toh(a) 125 1.2 matt #endif 126 1.2 matt 127 1.6 matt #define GE_RXDSYNC(sc, rxq, n, ops) \ 128 1.6 matt bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ 129 1.6 matt (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ 130 1.6 matt (ops)) 131 1.6 matt #define GE_RXDPRESYNC(sc, rxq, n) \ 132 1.52 msaitoh GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE) 133 1.6 matt #define GE_RXDPOSTSYNC(sc, rxq, n) \ 134 1.52 msaitoh GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE) 135 1.6 matt 136 1.6 matt #define GE_TXDSYNC(sc, txq, n, ops) \ 137 1.6 matt bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ 138 1.6 matt (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ 139 1.6 matt (ops)) 140 1.6 matt #define GE_TXDPRESYNC(sc, txq, n) \ 141 1.52 msaitoh GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE) 142 1.6 matt #define GE_TXDPOSTSYNC(sc, txq, n) \ 143 1.52 msaitoh GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE) 144 1.6 matt 145 1.1 matt #define STATIC 146 1.1 matt 147 1.36 kiyohara 148 1.36 kiyohara STATIC int gfec_match(device_t, cfdata_t, void *); 149 1.36 kiyohara STATIC void gfec_attach(device_t, device_t, void *); 150 1.36 kiyohara 151 1.36 kiyohara STATIC int gfec_print(void *, const char *); 152 1.36 kiyohara STATIC int gfec_search(device_t, cfdata_t, const int *, void *); 153 1.36 kiyohara 154 1.36 kiyohara STATIC int gfec_enet_phy(device_t, int); 155 1.50 msaitoh STATIC int gfec_mii_read(device_t, int, int, uint16_t *); 156 1.50 msaitoh STATIC int gfec_mii_write(device_t, int, int, uint16_t); 157 1.41 matt STATIC void gfec_mii_statchg(struct ifnet *); 158 1.36 kiyohara 159 1.36 kiyohara STATIC int gfe_match(device_t, cfdata_t, void *); 160 1.36 kiyohara STATIC void gfe_attach(device_t, device_t, void *); 161 1.1 matt 162 1.2 matt STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, 163 1.2 matt size_t, int); 164 1.1 matt STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); 165 1.1 matt 166 1.36 kiyohara STATIC int gfe_ifioctl(struct ifnet *, u_long, void *); 167 1.36 kiyohara STATIC void gfe_ifstart(struct ifnet *); 168 1.36 kiyohara STATIC void gfe_ifwatchdog(struct ifnet *); 169 1.1 matt 170 1.1 matt STATIC void gfe_tick(void *arg); 171 1.1 matt 172 1.1 matt STATIC void gfe_tx_restart(void *); 173 1.1 matt STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); 174 1.1 matt STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); 175 1.1 matt STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); 176 1.15 matt STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); 177 1.1 matt STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); 178 1.1 matt STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); 179 1.1 matt 180 1.1 matt STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); 181 1.1 matt STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); 182 1.1 matt STATIC int gfe_rx_prime(struct gfe_softc *); 183 1.1 matt STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); 184 1.1 matt STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); 185 1.15 matt STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); 186 1.1 matt STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); 187 1.1 matt 188 1.1 matt STATIC int gfe_intr(void *); 189 1.1 matt 190 1.1 matt STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); 191 1.1 matt 192 1.6 matt STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); 193 1.1 matt STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, 194 1.6 matt enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); 195 1.1 matt STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, 196 1.1 matt u_long); 197 1.1 matt STATIC int gfe_hash_fill(struct gfe_softc *); 198 1.1 matt STATIC int gfe_hash_alloc(struct gfe_softc *); 199 1.1 matt 200 1.36 kiyohara 201 1.36 kiyohara CFATTACH_DECL_NEW(gfec, sizeof(struct gfec_softc), 202 1.36 kiyohara gfec_match, gfec_attach, NULL, NULL); 203 1.36 kiyohara CFATTACH_DECL_NEW(gfe, sizeof(struct gfe_softc), 204 1.1 matt gfe_match, gfe_attach, NULL, NULL); 205 1.1 matt 206 1.2 matt 207 1.36 kiyohara /* ARGSUSED */ 208 1.1 matt int 209 1.36 kiyohara gfec_match(device_t parent, cfdata_t cf, void *aux) 210 1.1 matt { 211 1.36 kiyohara struct marvell_attach_args *mva = aux; 212 1.1 matt 213 1.36 kiyohara if (strcmp(mva->mva_name, cf->cf_name) != 0) 214 1.1 matt return 0; 215 1.37 kiyohara if (mva->mva_offset == MVA_OFFSET_DEFAULT) 216 1.36 kiyohara return 0; 217 1.36 kiyohara 218 1.36 kiyohara mva->mva_size = ETHC_SIZE; 219 1.36 kiyohara return 1; 220 1.36 kiyohara } 221 1.36 kiyohara 222 1.36 kiyohara /* ARGSUSED */ 223 1.36 kiyohara void 224 1.36 kiyohara gfec_attach(device_t parent, device_t self, void *aux) 225 1.36 kiyohara { 226 1.36 kiyohara struct gfec_softc *sc = device_private(self); 227 1.36 kiyohara struct marvell_attach_args *mva = aux, gfea; 228 1.36 kiyohara static int gfe_irqs[] = { 32, 33, 34 }; 229 1.36 kiyohara int i; 230 1.36 kiyohara 231 1.36 kiyohara aprint_naive("\n"); 232 1.36 kiyohara aprint_normal(": Ethernet Controller\n"); 233 1.36 kiyohara 234 1.36 kiyohara sc->sc_dev = self; 235 1.36 kiyohara sc->sc_iot = mva->mva_iot; 236 1.36 kiyohara if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, mva->mva_offset, 237 1.36 kiyohara mva->mva_size, &sc->sc_ioh)) { 238 1.36 kiyohara aprint_error_dev(self, "Cannot map registers\n"); 239 1.36 kiyohara return; 240 1.36 kiyohara } 241 1.36 kiyohara 242 1.36 kiyohara mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 243 1.36 kiyohara 244 1.36 kiyohara for (i = 0; i < ETH_NUM; i++) { 245 1.36 kiyohara gfea.mva_name = "gfe"; 246 1.36 kiyohara gfea.mva_model = mva->mva_model; 247 1.36 kiyohara gfea.mva_iot = sc->sc_iot; 248 1.36 kiyohara gfea.mva_ioh = sc->sc_ioh; 249 1.36 kiyohara gfea.mva_unit = i; 250 1.36 kiyohara gfea.mva_dmat = mva->mva_dmat; 251 1.36 kiyohara gfea.mva_irq = gfe_irqs[i]; 252 1.58 thorpej config_found(sc->sc_dev, &gfea, gfec_print, 253 1.59 thorpej CFARGS(.submatch = gfec_search)); 254 1.36 kiyohara } 255 1.36 kiyohara } 256 1.36 kiyohara 257 1.36 kiyohara int 258 1.36 kiyohara gfec_print(void *aux, const char *pnp) 259 1.36 kiyohara { 260 1.36 kiyohara struct marvell_attach_args *gfea = aux; 261 1.36 kiyohara 262 1.36 kiyohara if (pnp) 263 1.36 kiyohara aprint_normal("%s at %s port %d", 264 1.36 kiyohara gfea->mva_name, pnp, gfea->mva_unit); 265 1.36 kiyohara else { 266 1.36 kiyohara if (gfea->mva_unit != GFECCF_PORT_DEFAULT) 267 1.36 kiyohara aprint_normal(" port %d", gfea->mva_unit); 268 1.36 kiyohara if (gfea->mva_irq != GFECCF_IRQ_DEFAULT) 269 1.36 kiyohara aprint_normal(" irq %d", gfea->mva_irq); 270 1.36 kiyohara } 271 1.36 kiyohara return UNCONF; 272 1.36 kiyohara } 273 1.36 kiyohara 274 1.36 kiyohara /* ARGSUSED */ 275 1.36 kiyohara int 276 1.36 kiyohara gfec_search(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 277 1.36 kiyohara { 278 1.36 kiyohara struct marvell_attach_args *gfea = aux; 279 1.36 kiyohara 280 1.36 kiyohara if (cf->cf_loc[GFECCF_PORT] == gfea->mva_unit && 281 1.36 kiyohara cf->cf_loc[GFECCF_IRQ] != GFECCF_IRQ_DEFAULT) 282 1.36 kiyohara gfea->mva_irq = cf->cf_loc[GFECCF_IRQ]; 283 1.36 kiyohara 284 1.36 kiyohara return config_match(parent, cf, aux); 285 1.36 kiyohara } 286 1.36 kiyohara 287 1.36 kiyohara int 288 1.36 kiyohara gfec_enet_phy(device_t dev, int unit) 289 1.36 kiyohara { 290 1.36 kiyohara struct gfec_softc *sc = device_private(dev); 291 1.36 kiyohara uint32_t epar; 292 1.36 kiyohara 293 1.36 kiyohara epar = bus_space_read_4(sc->sc_iot, sc->sc_ioh, ETH_EPAR); 294 1.36 kiyohara return ETH_EPAR_PhyAD_GET(epar, unit); 295 1.36 kiyohara } 296 1.36 kiyohara 297 1.36 kiyohara int 298 1.50 msaitoh gfec_mii_read(device_t dev, int phy, int reg, uint16_t *val) 299 1.36 kiyohara { 300 1.36 kiyohara struct gfec_softc *csc = device_private(device_parent(dev)); 301 1.36 kiyohara uint32_t data; 302 1.36 kiyohara int count = 10000; 303 1.36 kiyohara 304 1.36 kiyohara mutex_enter(&csc->sc_mtx); 305 1.36 kiyohara 306 1.36 kiyohara do { 307 1.36 kiyohara DELAY(10); 308 1.36 kiyohara data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 309 1.36 kiyohara } while ((data & ETH_ESMIR_Busy) && count-- > 0); 310 1.36 kiyohara 311 1.36 kiyohara if (count == 0) { 312 1.36 kiyohara aprint_error_dev(dev, 313 1.36 kiyohara "mii read for phy %d reg %d busied out\n", phy, reg); 314 1.36 kiyohara mutex_exit(&csc->sc_mtx); 315 1.50 msaitoh return ETIMEDOUT; 316 1.36 kiyohara } 317 1.36 kiyohara 318 1.36 kiyohara bus_space_write_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR, 319 1.36 kiyohara ETH_ESMIR_READ(phy, reg)); 320 1.36 kiyohara 321 1.36 kiyohara count = 10000; 322 1.36 kiyohara do { 323 1.36 kiyohara DELAY(10); 324 1.36 kiyohara data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 325 1.36 kiyohara } while ((data & ETH_ESMIR_ReadValid) == 0 && count-- > 0); 326 1.36 kiyohara 327 1.36 kiyohara mutex_exit(&csc->sc_mtx); 328 1.36 kiyohara 329 1.50 msaitoh if (count == 0) { 330 1.36 kiyohara aprint_error_dev(dev, 331 1.36 kiyohara "mii read for phy %d reg %d timed out\n", phy, reg); 332 1.50 msaitoh return ETIMEDOUT; 333 1.50 msaitoh } 334 1.36 kiyohara #if defined(GTMIIDEBUG) 335 1.36 kiyohara aprint_normal_dev(dev, "mii_read(%d, %d): %#x data %#x\n", 336 1.36 kiyohara phy, reg, data, ETH_ESMIR_Value_GET(data)); 337 1.36 kiyohara #endif 338 1.50 msaitoh *val = ETH_ESMIR_Value_GET(data); 339 1.50 msaitoh return 0; 340 1.36 kiyohara } 341 1.1 matt 342 1.50 msaitoh int 343 1.50 msaitoh gfec_mii_write(device_t dev, int phy, int reg, uint16_t value) 344 1.36 kiyohara { 345 1.36 kiyohara struct gfec_softc *csc = device_private(device_parent(dev)); 346 1.36 kiyohara uint32_t data; 347 1.36 kiyohara int count = 10000; 348 1.36 kiyohara 349 1.36 kiyohara mutex_enter(&csc->sc_mtx); 350 1.36 kiyohara 351 1.36 kiyohara do { 352 1.36 kiyohara DELAY(10); 353 1.36 kiyohara data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 354 1.36 kiyohara } while ((data & ETH_ESMIR_Busy) && count-- > 0); 355 1.36 kiyohara 356 1.36 kiyohara if (count == 0) { 357 1.36 kiyohara aprint_error_dev(dev, 358 1.36 kiyohara "mii write for phy %d reg %d busied out (busy)\n", 359 1.36 kiyohara phy, reg); 360 1.36 kiyohara mutex_exit(&csc->sc_mtx); 361 1.50 msaitoh return ETIMEDOUT; 362 1.36 kiyohara } 363 1.36 kiyohara 364 1.36 kiyohara bus_space_write_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR, 365 1.36 kiyohara ETH_ESMIR_WRITE(phy, reg, value)); 366 1.36 kiyohara 367 1.36 kiyohara count = 10000; 368 1.36 kiyohara do { 369 1.36 kiyohara DELAY(10); 370 1.36 kiyohara data = bus_space_read_4(csc->sc_iot, csc->sc_ioh, ETH_ESMIR); 371 1.36 kiyohara } while ((data & ETH_ESMIR_Busy) && count-- > 0); 372 1.36 kiyohara 373 1.36 kiyohara mutex_exit(&csc->sc_mtx); 374 1.36 kiyohara 375 1.50 msaitoh if (count == 0) { 376 1.36 kiyohara aprint_error_dev(dev, 377 1.36 kiyohara "mii write for phy %d reg %d timed out\n", phy, reg); 378 1.50 msaitoh return ETIMEDOUT; 379 1.50 msaitoh } 380 1.36 kiyohara #if defined(GTMIIDEBUG) 381 1.50 msaitoh aprint_normal_dev(dev, "mii_write(%d, %d, %#hx)\n", phy, reg, value); 382 1.36 kiyohara #endif 383 1.50 msaitoh return 0; 384 1.36 kiyohara } 385 1.36 kiyohara 386 1.36 kiyohara void 387 1.41 matt gfec_mii_statchg(struct ifnet *ifp) 388 1.36 kiyohara { 389 1.41 matt /* struct gfe_softc *sc = ifp->if_softc; */ 390 1.36 kiyohara /* do nothing? */ 391 1.36 kiyohara } 392 1.36 kiyohara 393 1.36 kiyohara /* ARGSUSED */ 394 1.36 kiyohara int 395 1.36 kiyohara gfe_match(device_t parent, cfdata_t cf, void *aux) 396 1.36 kiyohara { 397 1.1 matt 398 1.1 matt return 1; 399 1.16 perry } 400 1.1 matt 401 1.36 kiyohara /* ARGSUSED */ 402 1.1 matt void 403 1.33 cegger gfe_attach(device_t parent, device_t self, void *aux) 404 1.1 matt { 405 1.36 kiyohara struct marvell_attach_args *mva = aux; 406 1.20 thorpej struct gfe_softc * const sc = device_private(self); 407 1.5 matt struct ifnet * const ifp = &sc->sc_ec.ec_if; 408 1.52 msaitoh struct mii_data * const mii = &sc->sc_mii; 409 1.36 kiyohara uint32_t sdcr; 410 1.36 kiyohara int phyaddr, error; 411 1.36 kiyohara prop_data_t ea; 412 1.1 matt uint8_t enaddr[6]; 413 1.1 matt 414 1.36 kiyohara aprint_naive("\n"); 415 1.36 kiyohara aprint_normal(": Ethernet Controller\n"); 416 1.2 matt 417 1.36 kiyohara if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 418 1.36 kiyohara mva->mva_offset, mva->mva_size, &sc->sc_memh)) { 419 1.36 kiyohara aprint_error_dev(self, "failed to map registers\n"); 420 1.36 kiyohara return; 421 1.3 matt } 422 1.36 kiyohara sc->sc_dev = self; 423 1.36 kiyohara sc->sc_memt = mva->mva_iot; 424 1.36 kiyohara sc->sc_dmat = mva->mva_dmat; 425 1.36 kiyohara sc->sc_macno = (mva->mva_offset == ETH_BASE(0)) ? 0 : 426 1.36 kiyohara ((mva->mva_offset == ETH_BASE(1)) ? 1 : 2); 427 1.1 matt 428 1.23 ad callout_init(&sc->sc_co, 0); 429 1.1 matt 430 1.36 kiyohara phyaddr = gfec_enet_phy(parent, sc->sc_macno); 431 1.1 matt 432 1.36 kiyohara ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr"); 433 1.36 kiyohara if (ea != NULL) { 434 1.36 kiyohara KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); 435 1.36 kiyohara KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); 436 1.36 kiyohara memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN); 437 1.36 kiyohara } 438 1.1 matt 439 1.36 kiyohara sc->sc_pcr = GE_READ(sc, ETH_EPCR); 440 1.36 kiyohara sc->sc_pcxr = GE_READ(sc, ETH_EPCXR); 441 1.36 kiyohara sc->sc_intrmask = GE_READ(sc, ETH_EIMR) | ETH_IR_MIIPhySTC; 442 1.1 matt 443 1.36 kiyohara aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(enaddr)); 444 1.1 matt 445 1.1 matt #if defined(DEBUG) 446 1.36 kiyohara printf("pcr %#x, pcxr %#x\n", sc->sc_pcr, sc->sc_pcxr); 447 1.1 matt #endif 448 1.1 matt 449 1.1 matt sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; 450 1.36 kiyohara if (device_cfdata(self)->cf_flags & 1) { 451 1.36 kiyohara aprint_normal_dev(self, "phy %d (rmii)\n", phyaddr); 452 1.2 matt sc->sc_pcxr |= ETH_EPCXR_RMIIEn; 453 1.2 matt } else { 454 1.36 kiyohara aprint_normal_dev(self, "phy %d (mii)\n", phyaddr); 455 1.2 matt sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; 456 1.2 matt } 457 1.36 kiyohara if (device_cfdata(self)->cf_flags & 2) 458 1.15 matt sc->sc_flags |= GE_NOFREE; 459 1.36 kiyohara /* Set Max Frame Length is 1536 */ 460 1.36 kiyohara sc->sc_pcxr &= ~ETH_EPCXR_MFL_SET(ETH_EPCXR_MFL_MASK); 461 1.36 kiyohara sc->sc_pcxr |= ETH_EPCXR_MFL_SET(ETH_EPCXR_MFL_1536); 462 1.36 kiyohara sc->sc_max_frame_length = 1536; 463 1.1 matt 464 1.1 matt if (sc->sc_pcr & ETH_EPCR_EN) { 465 1.1 matt int tries = 1000; 466 1.1 matt /* 467 1.1 matt * Abort transmitter and receiver and wait for them to quiese 468 1.1 matt */ 469 1.36 kiyohara GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_AR | ETH_ESDCMR_AT); 470 1.1 matt do { 471 1.1 matt delay(100); 472 1.36 kiyohara if (tries-- <= 0) { 473 1.36 kiyohara aprint_error_dev(self, "Abort TX/RX failed\n"); 474 1.36 kiyohara break; 475 1.36 kiyohara } 476 1.36 kiyohara } while (GE_READ(sc, ETH_ESDCMR) & 477 1.36 kiyohara (ETH_ESDCMR_AR | ETH_ESDCMR_AT)); 478 1.1 matt } 479 1.1 matt 480 1.36 kiyohara sc->sc_pcr &= 481 1.36 kiyohara ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); 482 1.1 matt 483 1.1 matt #if defined(DEBUG) 484 1.36 kiyohara printf("pcr %#x, pcxr %#x\n", sc->sc_pcr, sc->sc_pcxr); 485 1.1 matt #endif 486 1.1 matt 487 1.1 matt /* 488 1.1 matt * Now turn off the GT. If it didn't quiese, too ***ing bad. 489 1.1 matt */ 490 1.36 kiyohara GE_WRITE(sc, ETH_EPCR, sc->sc_pcr); 491 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 492 1.36 kiyohara sdcr = GE_READ(sc, ETH_ESDCR); 493 1.1 matt ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); 494 1.1 matt sdcr |= ETH_ESDCR_RIFB; 495 1.36 kiyohara GE_WRITE(sc, ETH_ESDCR, sdcr); 496 1.1 matt 497 1.52 msaitoh mii->mii_ifp = ifp; 498 1.52 msaitoh mii->mii_readreg = gfec_mii_read; 499 1.52 msaitoh mii->mii_writereg = gfec_mii_write; 500 1.52 msaitoh mii->mii_statchg = gfec_mii_statchg; 501 1.1 matt 502 1.52 msaitoh sc->sc_ec.ec_mii = mii; 503 1.52 msaitoh ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 504 1.52 msaitoh 505 1.52 msaitoh mii_attach(sc->sc_dev, mii, 0xffffffff, phyaddr, 506 1.1 matt MII_OFFSET_ANY, MIIF_NOISOLATE); 507 1.52 msaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) { 508 1.52 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 509 1.52 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 510 1.52 msaitoh } else 511 1.52 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 512 1.1 matt 513 1.36 kiyohara strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 514 1.1 matt ifp->if_softc = sc; 515 1.1 matt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 516 1.1 matt #if 0 517 1.1 matt ifp->if_flags |= IFF_DEBUG; 518 1.1 matt #endif 519 1.1 matt ifp->if_ioctl = gfe_ifioctl; 520 1.1 matt ifp->if_start = gfe_ifstart; 521 1.1 matt ifp->if_watchdog = gfe_ifwatchdog; 522 1.1 matt 523 1.15 matt if (sc->sc_flags & GE_NOFREE) { 524 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); 525 1.15 matt if (!error) 526 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); 527 1.15 matt if (!error) 528 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); 529 1.15 matt if (!error) 530 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); 531 1.15 matt if (!error) 532 1.15 matt error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); 533 1.15 matt if (!error) 534 1.15 matt error = gfe_hash_alloc(sc); 535 1.15 matt if (error) 536 1.36 kiyohara aprint_error_dev(self, 537 1.36 kiyohara "failed to allocate resources: %d\n", error); 538 1.15 matt } 539 1.15 matt 540 1.1 matt if_attach(ifp); 541 1.1 matt ether_ifattach(ifp, enaddr); 542 1.35 joerg bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 543 1.36 kiyohara rnd_attach_source(&sc->sc_rnd_source, device_xname(self), RND_TYPE_NET, 544 1.42 tls RND_FLAG_DEFAULT); 545 1.36 kiyohara marvell_intr_establish(mva->mva_irq, IPL_NET, gfe_intr, sc); 546 1.1 matt } 547 1.1 matt 548 1.1 matt int 549 1.1 matt gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, 550 1.2 matt size_t size, int flags) 551 1.1 matt { 552 1.1 matt int error = 0; 553 1.1 matt GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); 554 1.15 matt 555 1.15 matt KASSERT(gdm->gdm_kva == NULL); 556 1.1 matt gdm->gdm_size = size; 557 1.1 matt gdm->gdm_maxsegs = maxsegs; 558 1.1 matt 559 1.7 thorpej error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, 560 1.1 matt gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, 561 1.1 matt BUS_DMA_NOWAIT); 562 1.1 matt if (error) 563 1.1 matt goto fail; 564 1.1 matt 565 1.1 matt error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, 566 1.2 matt gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); 567 1.1 matt if (error) 568 1.1 matt goto fail; 569 1.1 matt 570 1.1 matt error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, 571 1.52 msaitoh gdm->gdm_size, 0, BUS_DMA_ALLOCNOW |BUS_DMA_NOWAIT, &gdm->gdm_map); 572 1.1 matt if (error) 573 1.1 matt goto fail; 574 1.1 matt 575 1.1 matt error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, 576 1.1 matt gdm->gdm_size, NULL, BUS_DMA_NOWAIT); 577 1.2 matt if (error) 578 1.2 matt goto fail; 579 1.1 matt 580 1.2 matt /* invalidate from cache */ 581 1.2 matt bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, 582 1.2 matt BUS_DMASYNC_PREREAD); 583 1.1 matt fail: 584 1.1 matt if (error) { 585 1.1 matt gfe_dmamem_free(sc, gdm); 586 1.1 matt GE_DPRINTF(sc, (":err=%d", error)); 587 1.1 matt } 588 1.2 matt GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x", 589 1.2 matt gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, 590 1.2 matt gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); 591 1.1 matt GE_FUNC_EXIT(sc, ""); 592 1.1 matt return error; 593 1.1 matt } 594 1.1 matt 595 1.1 matt void 596 1.1 matt gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) 597 1.1 matt { 598 1.1 matt GE_FUNC_ENTER(sc, "gfe_dmamem_free"); 599 1.1 matt if (gdm->gdm_map) 600 1.1 matt bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); 601 1.1 matt if (gdm->gdm_kva) 602 1.1 matt bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); 603 1.1 matt if (gdm->gdm_nsegs > 0) 604 1.1 matt bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); 605 1.1 matt gdm->gdm_map = NULL; 606 1.1 matt gdm->gdm_kva = NULL; 607 1.1 matt gdm->gdm_nsegs = 0; 608 1.1 matt GE_FUNC_EXIT(sc, ""); 609 1.1 matt } 610 1.1 matt 611 1.1 matt int 612 1.21 christos gfe_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 613 1.1 matt { 614 1.1 matt struct gfe_softc * const sc = ifp->if_softc; 615 1.1 matt struct ifreq *ifr = (struct ifreq *) data; 616 1.1 matt struct ifaddr *ifa = (struct ifaddr *) data; 617 1.1 matt int s, error = 0; 618 1.1 matt 619 1.1 matt GE_FUNC_ENTER(sc, "gfe_ifioctl"); 620 1.1 matt s = splnet(); 621 1.1 matt 622 1.1 matt switch (cmd) { 623 1.31 dyoung case SIOCINITIFADDR: 624 1.1 matt ifp->if_flags |= IFF_UP; 625 1.31 dyoung error = gfe_whack(sc, GE_WHACK_START); 626 1.1 matt switch (ifa->ifa_addr->sa_family) { 627 1.1 matt #ifdef INET 628 1.1 matt case AF_INET: 629 1.1 matt if (error == 0) 630 1.1 matt arp_ifinit(ifp, ifa); 631 1.1 matt break; 632 1.1 matt #endif 633 1.1 matt default: 634 1.1 matt break; 635 1.1 matt } 636 1.1 matt break; 637 1.1 matt 638 1.1 matt case SIOCSIFFLAGS: 639 1.31 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0) 640 1.31 dyoung break; 641 1.31 dyoung /* XXX re-use ether_ioctl() */ 642 1.52 msaitoh switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) { 643 1.52 msaitoh case IFF_UP | IFF_RUNNING:/* active->active, update */ 644 1.1 matt error = gfe_whack(sc, GE_WHACK_CHANGE); 645 1.1 matt break; 646 1.1 matt case IFF_RUNNING: /* not up, so we stop */ 647 1.1 matt error = gfe_whack(sc, GE_WHACK_STOP); 648 1.1 matt break; 649 1.1 matt case IFF_UP: /* not running, so we start */ 650 1.1 matt error = gfe_whack(sc, GE_WHACK_START); 651 1.1 matt break; 652 1.1 matt case 0: /* idle->idle: do nothing */ 653 1.1 matt break; 654 1.1 matt } 655 1.1 matt break; 656 1.1 matt 657 1.1 matt case SIOCSIFMTU: 658 1.1 matt if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { 659 1.1 matt error = EINVAL; 660 1.1 matt break; 661 1.1 matt } 662 1.28 dyoung if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 663 1.28 dyoung error = 0; 664 1.1 matt break; 665 1.1 matt 666 1.1 matt default: 667 1.51 msaitoh if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 668 1.51 msaitoh if (ifp->if_flags & IFF_RUNNING) 669 1.51 msaitoh error = gfe_whack(sc, GE_WHACK_CHANGE); 670 1.51 msaitoh else 671 1.51 msaitoh error = 0; 672 1.51 msaitoh } 673 1.1 matt break; 674 1.1 matt } 675 1.1 matt splx(s); 676 1.1 matt GE_FUNC_EXIT(sc, ""); 677 1.1 matt return error; 678 1.1 matt } 679 1.1 matt 680 1.1 matt void 681 1.1 matt gfe_ifstart(struct ifnet *ifp) 682 1.1 matt { 683 1.1 matt struct gfe_softc * const sc = ifp->if_softc; 684 1.1 matt struct mbuf *m; 685 1.1 matt 686 1.1 matt GE_FUNC_ENTER(sc, "gfe_ifstart"); 687 1.1 matt 688 1.1 matt if ((ifp->if_flags & IFF_RUNNING) == 0) { 689 1.1 matt GE_FUNC_EXIT(sc, "$"); 690 1.1 matt return; 691 1.1 matt } 692 1.1 matt 693 1.1 matt for (;;) { 694 1.60 thorpej IF_POLL(&ifp->if_snd, m); 695 1.1 matt if (m == NULL) { 696 1.1 matt ifp->if_flags &= ~IFF_OACTIVE; 697 1.1 matt GE_FUNC_EXIT(sc, ""); 698 1.1 matt return; 699 1.1 matt } 700 1.1 matt 701 1.1 matt /* 702 1.1 matt * No space in the pending queue? try later. 703 1.1 matt */ 704 1.15 matt if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) 705 1.1 matt break; 706 1.1 matt 707 1.60 thorpej IF_DEQUEUE(&ifp->if_snd, m); 708 1.60 thorpej 709 1.1 matt /* 710 1.1 matt * Try to enqueue a mbuf to the device. If that fails, we 711 1.1 matt * can always try to map the next mbuf. 712 1.1 matt */ 713 1.15 matt IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); 714 1.1 matt GE_DPRINTF(sc, (">")); 715 1.1 matt #ifndef GE_NOTX 716 1.1 matt (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); 717 1.1 matt #endif 718 1.1 matt } 719 1.1 matt 720 1.1 matt /* 721 1.1 matt * Attempt to queue the mbuf for send failed. 722 1.1 matt */ 723 1.1 matt ifp->if_flags |= IFF_OACTIVE; 724 1.1 matt GE_FUNC_EXIT(sc, "%%"); 725 1.1 matt } 726 1.1 matt 727 1.1 matt void 728 1.1 matt gfe_ifwatchdog(struct ifnet *ifp) 729 1.1 matt { 730 1.1 matt struct gfe_softc * const sc = ifp->if_softc; 731 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; 732 1.1 matt 733 1.1 matt GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); 734 1.36 kiyohara aprint_error_dev(sc->sc_dev, "device timeout"); 735 1.15 matt if (ifp->if_flags & IFF_RUNNING) { 736 1.36 kiyohara uint32_t curtxdnum; 737 1.36 kiyohara 738 1.36 kiyohara curtxdnum = (GE_READ(sc, txq->txq_ectdp) - 739 1.36 kiyohara txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); 740 1.6 matt GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 741 1.6 matt GE_TXDPOSTSYNC(sc, txq, curtxdnum); 742 1.36 kiyohara aprint_error(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ", 743 1.6 matt txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, 744 1.6 matt txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, 745 1.36 kiyohara GE_READ(sc, ETH_EICR)); 746 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi); 747 1.6 matt GE_TXDPRESYNC(sc, txq, curtxdnum); 748 1.1 matt } 749 1.36 kiyohara aprint_error("\n"); 750 1.55 skrll if_statinc(ifp, if_oerrors); 751 1.1 matt (void) gfe_whack(sc, GE_WHACK_RESTART); 752 1.1 matt GE_FUNC_EXIT(sc, ""); 753 1.1 matt } 754 1.36 kiyohara 755 1.1 matt int 756 1.1 matt gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) 757 1.1 matt { 758 1.15 matt struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 759 1.1 matt int error; 760 1.1 matt 761 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); 762 1.2 matt GE_DPRINTF(sc, ("(%d)", rxprio)); 763 1.1 matt 764 1.2 matt error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, 765 1.5 matt GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); 766 1.1 matt if (error) { 767 1.1 matt GE_FUNC_EXIT(sc, "!!"); 768 1.1 matt return error; 769 1.1 matt } 770 1.15 matt 771 1.1 matt error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, 772 1.2 matt GE_RXBUF_MEMSIZE, 0); 773 1.1 matt if (error) { 774 1.1 matt GE_FUNC_EXIT(sc, "!!!"); 775 1.1 matt return error; 776 1.1 matt } 777 1.15 matt GE_FUNC_EXIT(sc, ""); 778 1.15 matt return error; 779 1.15 matt } 780 1.1 matt 781 1.15 matt int 782 1.15 matt gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) 783 1.15 matt { 784 1.15 matt struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 785 1.15 matt volatile struct gt_eth_desc *rxd; 786 1.15 matt const bus_dma_segment_t *ds; 787 1.15 matt int idx; 788 1.15 matt bus_addr_t nxtaddr; 789 1.15 matt bus_size_t boff; 790 1.15 matt 791 1.15 matt GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); 792 1.15 matt GE_DPRINTF(sc, ("(%d)", rxprio)); 793 1.15 matt 794 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) { 795 1.15 matt int error = gfe_rx_rxqalloc(sc, rxprio); 796 1.15 matt if (error) { 797 1.15 matt GE_FUNC_EXIT(sc, "!"); 798 1.15 matt return error; 799 1.15 matt } 800 1.15 matt } else { 801 1.15 matt KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); 802 1.15 matt KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); 803 1.15 matt } 804 1.15 matt 805 1.15 matt memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); 806 1.1 matt 807 1.1 matt rxq->rxq_descs = 808 1.1 matt (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; 809 1.1 matt rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; 810 1.1 matt rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; 811 1.1 matt rxq->rxq_fi = 0; 812 1.1 matt rxq->rxq_active = GE_RXDESC_MAX; 813 1.36 kiyohara boff = 0; 814 1.36 kiyohara ds = rxq->rxq_buf_mem.gdm_map->dm_segs; 815 1.36 kiyohara nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); 816 1.36 kiyohara for (idx = 0, rxd = rxq->rxq_descs; idx < GE_RXDESC_MAX; 817 1.44 joerg idx++, rxd++, nxtaddr += sizeof(*rxd)) { 818 1.2 matt rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); 819 1.2 matt rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 820 1.2 matt rxd->ed_bufptr = htogt32(ds->ds_addr + boff); 821 1.1 matt /* 822 1.1 matt * update the nxtptr to point to the next txd. 823 1.1 matt */ 824 1.1 matt if (idx == GE_RXDESC_MAX - 1) 825 1.1 matt nxtaddr = rxq->rxq_desc_busaddr; 826 1.2 matt rxd->ed_nxtptr = htogt32(nxtaddr); 827 1.1 matt boff += GE_RXBUF_SIZE; 828 1.1 matt if (boff == ds->ds_len) { 829 1.1 matt ds++; 830 1.1 matt boff = 0; 831 1.1 matt } 832 1.1 matt } 833 1.1 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, 834 1.1 matt rxq->rxq_desc_mem.gdm_map->dm_mapsize, 835 1.52 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 836 1.1 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, 837 1.1 matt rxq->rxq_buf_mem.gdm_map->dm_mapsize, 838 1.2 matt BUS_DMASYNC_PREREAD); 839 1.1 matt 840 1.52 msaitoh rxq->rxq_intrbits = ETH_IR_RxBuffer | ETH_IR_RxError; 841 1.1 matt switch (rxprio) { 842 1.1 matt case GE_RXPRIO_HI: 843 1.52 msaitoh rxq->rxq_intrbits |= ETH_IR_RxBuffer_3 | ETH_IR_RxError_3; 844 1.36 kiyohara rxq->rxq_efrdp = ETH_EFRDP3; 845 1.36 kiyohara rxq->rxq_ecrdp = ETH_ECRDP3; 846 1.1 matt break; 847 1.1 matt case GE_RXPRIO_MEDHI: 848 1.52 msaitoh rxq->rxq_intrbits |= ETH_IR_RxBuffer_2 | ETH_IR_RxError_2; 849 1.36 kiyohara rxq->rxq_efrdp = ETH_EFRDP2; 850 1.36 kiyohara rxq->rxq_ecrdp = ETH_ECRDP2; 851 1.1 matt break; 852 1.1 matt case GE_RXPRIO_MEDLO: 853 1.52 msaitoh rxq->rxq_intrbits |= ETH_IR_RxBuffer_1 | ETH_IR_RxError_1; 854 1.36 kiyohara rxq->rxq_efrdp = ETH_EFRDP1; 855 1.36 kiyohara rxq->rxq_ecrdp = ETH_ECRDP1; 856 1.1 matt break; 857 1.1 matt case GE_RXPRIO_LO: 858 1.52 msaitoh rxq->rxq_intrbits |= ETH_IR_RxBuffer_0 | ETH_IR_RxError_0; 859 1.36 kiyohara rxq->rxq_efrdp = ETH_EFRDP0; 860 1.36 kiyohara rxq->rxq_ecrdp = ETH_ECRDP0; 861 1.1 matt break; 862 1.1 matt } 863 1.1 matt GE_FUNC_EXIT(sc, ""); 864 1.15 matt return 0; 865 1.1 matt } 866 1.1 matt 867 1.1 matt void 868 1.1 matt gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) 869 1.1 matt { 870 1.1 matt struct ifnet * const ifp = &sc->sc_ec.ec_if; 871 1.15 matt struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; 872 1.1 matt struct mbuf *m = rxq->rxq_curpkt; 873 1.1 matt 874 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_get"); 875 1.1 matt GE_DPRINTF(sc, ("(%d)", rxprio)); 876 1.1 matt 877 1.1 matt while (rxq->rxq_active > 0) { 878 1.1 matt volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; 879 1.1 matt struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; 880 1.1 matt const struct ether_header *eh; 881 1.1 matt unsigned int cmdsts; 882 1.1 matt size_t buflen; 883 1.1 matt 884 1.6 matt GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); 885 1.2 matt cmdsts = gt32toh(rxd->ed_cmdsts); 886 1.1 matt GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); 887 1.1 matt rxq->rxq_cmdsts = cmdsts; 888 1.1 matt /* 889 1.1 matt * Sometimes the GE "forgets" to reset the ownership bit. 890 1.1 matt * But if the length has been rewritten, the packet is ours 891 1.1 matt * so pretend the O bit is set. 892 1.1 matt */ 893 1.2 matt buflen = gt32toh(rxd->ed_lencnt) & 0xffff; 894 1.1 matt if ((cmdsts & RX_CMD_O) && buflen == 0) { 895 1.6 matt GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 896 1.1 matt break; 897 1.1 matt } 898 1.1 matt 899 1.1 matt /* 900 1.1 matt * If this is not a single buffer packet with no errors 901 1.1 matt * or for some reason it's bigger than our frame size, 902 1.1 matt * ignore it and go to the next packet. 903 1.1 matt */ 904 1.52 msaitoh if ((cmdsts & (RX_CMD_F | RX_CMD_L | RX_STS_ES)) != 905 1.52 msaitoh (RX_CMD_F | RX_CMD_L) || 906 1.52 msaitoh (buflen > sc->sc_max_frame_length)) { 907 1.1 matt GE_DPRINTF(sc, ("!")); 908 1.1 matt --rxq->rxq_active; 909 1.56 skrll if_statinc(ifp, if_ipackets); 910 1.55 skrll if_statinc(ifp, if_ierrors); 911 1.1 matt goto give_it_back; 912 1.1 matt } 913 1.1 matt 914 1.14 thorpej /* CRC is included with the packet; trim it off. */ 915 1.14 thorpej buflen -= ETHER_CRC_LEN; 916 1.14 thorpej 917 1.1 matt if (m == NULL) { 918 1.1 matt MGETHDR(m, M_DONTWAIT, MT_DATA); 919 1.1 matt if (m == NULL) { 920 1.1 matt GE_DPRINTF(sc, ("?")); 921 1.1 matt break; 922 1.1 matt } 923 1.1 matt } 924 1.1 matt if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { 925 1.1 matt MCLGET(m, M_DONTWAIT); 926 1.1 matt if ((m->m_flags & M_EXT) == 0) { 927 1.1 matt GE_DPRINTF(sc, ("?")); 928 1.1 matt break; 929 1.1 matt } 930 1.1 matt } 931 1.5 matt m->m_data += 2; 932 1.1 matt m->m_len = 0; 933 1.1 matt m->m_pkthdr.len = 0; 934 1.47 ozaki m_set_rcvif(m, ifp); 935 1.1 matt rxq->rxq_cmdsts = cmdsts; 936 1.1 matt --rxq->rxq_active; 937 1.1 matt 938 1.1 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 939 1.2 matt rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); 940 1.1 matt 941 1.1 matt KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); 942 1.30 he memcpy(m->m_data + m->m_len, rxb->rxb_data, buflen); 943 1.1 matt m->m_len = buflen; 944 1.1 matt m->m_pkthdr.len = buflen; 945 1.1 matt 946 1.1 matt eh = (const struct ether_header *) m->m_data; 947 1.1 matt if ((ifp->if_flags & IFF_PROMISC) || 948 1.1 matt (rxq->rxq_cmdsts & RX_STS_M) == 0 || 949 1.1 matt (rxq->rxq_cmdsts & RX_STS_HE) || 950 1.1 matt (eh->ether_dhost[0] & 1) != 0 || 951 1.24 dyoung memcmp(eh->ether_dhost, CLLADDR(ifp->if_sadl), 952 1.36 kiyohara ETHER_ADDR_LEN) == 0) { 953 1.46 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 954 1.1 matt m = NULL; 955 1.1 matt GE_DPRINTF(sc, (">")); 956 1.1 matt } else { 957 1.1 matt m->m_len = 0; 958 1.1 matt m->m_pkthdr.len = 0; 959 1.1 matt GE_DPRINTF(sc, ("+")); 960 1.1 matt } 961 1.1 matt rxq->rxq_cmdsts = 0; 962 1.1 matt 963 1.1 matt give_it_back: 964 1.1 matt rxd->ed_lencnt &= ~0xffff; /* zero out length */ 965 1.2 matt rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); 966 1.2 matt #if 0 967 1.2 matt GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", 968 1.2 matt rxq->rxq_fi, 969 1.2 matt ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], 970 1.2 matt ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); 971 1.2 matt #endif 972 1.6 matt GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); 973 1.1 matt if (++rxq->rxq_fi == GE_RXDESC_MAX) 974 1.1 matt rxq->rxq_fi = 0; 975 1.1 matt rxq->rxq_active++; 976 1.1 matt } 977 1.1 matt rxq->rxq_curpkt = m; 978 1.1 matt GE_FUNC_EXIT(sc, ""); 979 1.1 matt } 980 1.1 matt 981 1.1 matt uint32_t 982 1.1 matt gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) 983 1.1 matt { 984 1.5 matt struct ifnet * const ifp = &sc->sc_ec.ec_if; 985 1.1 matt struct gfe_rxqueue *rxq; 986 1.1 matt uint32_t rxbits; 987 1.1 matt #define RXPRIO_DECODER 0xffffaa50 988 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_process"); 989 1.1 matt 990 1.1 matt rxbits = ETH_IR_RxBuffer_GET(cause); 991 1.1 matt while (rxbits) { 992 1.1 matt enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 993 1.1 matt GE_DPRINTF(sc, ("%1x", rxbits)); 994 1.1 matt rxbits &= ~(1 << rxprio); 995 1.1 matt gfe_rx_get(sc, rxprio); 996 1.1 matt } 997 1.1 matt 998 1.1 matt rxbits = ETH_IR_RxError_GET(cause); 999 1.1 matt while (rxbits) { 1000 1.1 matt enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; 1001 1.1 matt uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; 1002 1.1 matt int idx; 1003 1.1 matt rxbits &= ~(1 << rxprio); 1004 1.15 matt rxq = &sc->sc_rxq[rxprio]; 1005 1.1 matt sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); 1006 1.1 matt intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); 1007 1.1 matt if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { 1008 1.1 matt sc->sc_tickflags |= GE_TICK_RX_RESTART; 1009 1.1 matt callout_reset(&sc->sc_co, 1, gfe_tick, sc); 1010 1.1 matt } 1011 1.55 skrll if_statinc(ifp, if_ierrors); 1012 1.1 matt GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", 1013 1.36 kiyohara device_xname(sc->sc_dev), rxprio, rxq->rxq_fi)); 1014 1.1 matt memset(masks, 0, sizeof(masks)); 1015 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 1016 1.2 matt 0, rxq->rxq_desc_mem.gdm_size, 1017 1.52 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1018 1.1 matt for (idx = 0; idx < GE_RXDESC_MAX; idx++) { 1019 1.1 matt volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; 1020 1.1 matt 1021 1.2 matt if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) 1022 1.1 matt masks[idx/32] |= 1 << (idx & 31); 1023 1.1 matt } 1024 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 1025 1.2 matt 0, rxq->rxq_desc_mem.gdm_size, 1026 1.52 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1027 1.1 matt #if defined(DEBUG) 1028 1.1 matt printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", 1029 1.36 kiyohara device_xname(sc->sc_dev), rxprio, rxq->rxq_fi, 1030 1.1 matt rxq->rxq_cmdsts, masks[0], masks[1]); 1031 1.1 matt #endif 1032 1.1 matt } 1033 1.1 matt if ((intrmask & ETH_IR_RxBits) == 0) 1034 1.52 msaitoh intrmask &= ~(ETH_IR_RxBuffer | ETH_IR_RxError); 1035 1.1 matt 1036 1.1 matt GE_FUNC_EXIT(sc, ""); 1037 1.1 matt return intrmask; 1038 1.1 matt } 1039 1.1 matt 1040 1.1 matt int 1041 1.1 matt gfe_rx_prime(struct gfe_softc *sc) 1042 1.1 matt { 1043 1.1 matt struct gfe_rxqueue *rxq; 1044 1.1 matt int error; 1045 1.1 matt 1046 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_prime"); 1047 1.1 matt 1048 1.15 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); 1049 1.1 matt if (error) 1050 1.1 matt goto bail; 1051 1.15 matt rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 1052 1.1 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1053 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP3, rxq->rxq_desc_busaddr); 1054 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP3, rxq->rxq_desc_busaddr); 1055 1.1 matt } 1056 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits; 1057 1.1 matt 1058 1.15 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); 1059 1.1 matt if (error) 1060 1.1 matt goto bail; 1061 1.1 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1062 1.15 matt rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 1063 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP2, rxq->rxq_desc_busaddr); 1064 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP2, rxq->rxq_desc_busaddr); 1065 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits; 1066 1.1 matt } 1067 1.1 matt 1068 1.15 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); 1069 1.1 matt if (error) 1070 1.1 matt goto bail; 1071 1.1 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1072 1.15 matt rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 1073 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP1, rxq->rxq_desc_busaddr); 1074 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP1, rxq->rxq_desc_busaddr); 1075 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits; 1076 1.1 matt } 1077 1.1 matt 1078 1.15 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); 1079 1.1 matt if (error) 1080 1.1 matt goto bail; 1081 1.1 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) { 1082 1.15 matt rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 1083 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP0, rxq->rxq_desc_busaddr); 1084 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP0, rxq->rxq_desc_busaddr); 1085 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits; 1086 1.1 matt } 1087 1.1 matt 1088 1.1 matt bail: 1089 1.1 matt GE_FUNC_EXIT(sc, ""); 1090 1.1 matt return error; 1091 1.1 matt } 1092 1.1 matt 1093 1.1 matt void 1094 1.1 matt gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) 1095 1.1 matt { 1096 1.15 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; 1097 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); 1098 1.1 matt if (rxq == NULL) { 1099 1.1 matt GE_FUNC_EXIT(sc, ""); 1100 1.1 matt return; 1101 1.1 matt } 1102 1.1 matt 1103 1.61 rin m_freem(rxq->rxq_curpkt); 1104 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) { 1105 1.15 matt gfe_dmamem_free(sc, &rxq->rxq_desc_mem); 1106 1.15 matt gfe_dmamem_free(sc, &rxq->rxq_buf_mem); 1107 1.15 matt } 1108 1.1 matt GE_FUNC_EXIT(sc, ""); 1109 1.1 matt } 1110 1.1 matt 1111 1.1 matt void 1112 1.1 matt gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1113 1.1 matt { 1114 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_stop"); 1115 1.1 matt sc->sc_flags &= ~GE_RXACTIVE; 1116 1.52 msaitoh sc->sc_idlemask &= ~(ETH_IR_RxBits | ETH_IR_RxBuffer | ETH_IR_RxError); 1117 1.52 msaitoh sc->sc_intrmask &= ~(ETH_IR_RxBits | ETH_IR_RxBuffer | ETH_IR_RxError); 1118 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1119 1.36 kiyohara GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_AR); 1120 1.1 matt do { 1121 1.1 matt delay(10); 1122 1.36 kiyohara } while (GE_READ(sc, ETH_ESDCMR) & ETH_ESDCMR_AR); 1123 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_HI); 1124 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); 1125 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); 1126 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_LO); 1127 1.1 matt GE_FUNC_EXIT(sc, ""); 1128 1.1 matt } 1129 1.36 kiyohara 1130 1.1 matt void 1131 1.1 matt gfe_tick(void *arg) 1132 1.1 matt { 1133 1.1 matt struct gfe_softc * const sc = arg; 1134 1.1 matt uint32_t intrmask; 1135 1.1 matt unsigned int tickflags; 1136 1.1 matt int s; 1137 1.1 matt 1138 1.1 matt GE_FUNC_ENTER(sc, "gfe_tick"); 1139 1.1 matt 1140 1.1 matt s = splnet(); 1141 1.1 matt 1142 1.1 matt tickflags = sc->sc_tickflags; 1143 1.1 matt sc->sc_tickflags = 0; 1144 1.1 matt intrmask = sc->sc_intrmask; 1145 1.1 matt if (tickflags & GE_TICK_TX_IFSTART) 1146 1.1 matt gfe_ifstart(&sc->sc_ec.ec_if); 1147 1.1 matt if (tickflags & GE_TICK_RX_RESTART) { 1148 1.1 matt intrmask |= sc->sc_idlemask; 1149 1.52 msaitoh if (sc->sc_idlemask & (ETH_IR_RxBuffer_3 | ETH_IR_RxError_3)) { 1150 1.15 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; 1151 1.1 matt rxq->rxq_fi = 0; 1152 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP3, rxq->rxq_desc_busaddr); 1153 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP3, rxq->rxq_desc_busaddr); 1154 1.1 matt } 1155 1.52 msaitoh if (sc->sc_idlemask & (ETH_IR_RxBuffer_2 | ETH_IR_RxError_2)) { 1156 1.15 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; 1157 1.1 matt rxq->rxq_fi = 0; 1158 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP2, rxq->rxq_desc_busaddr); 1159 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP2, rxq->rxq_desc_busaddr); 1160 1.1 matt } 1161 1.52 msaitoh if (sc->sc_idlemask & (ETH_IR_RxBuffer_1 | ETH_IR_RxError_1)) { 1162 1.15 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; 1163 1.1 matt rxq->rxq_fi = 0; 1164 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP1, rxq->rxq_desc_busaddr); 1165 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP1, rxq->rxq_desc_busaddr); 1166 1.1 matt } 1167 1.52 msaitoh if (sc->sc_idlemask & (ETH_IR_RxBuffer_0 | ETH_IR_RxError_0)) { 1168 1.15 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; 1169 1.1 matt rxq->rxq_fi = 0; 1170 1.36 kiyohara GE_WRITE(sc, ETH_EFRDP0, rxq->rxq_desc_busaddr); 1171 1.36 kiyohara GE_WRITE(sc, ETH_ECRDP0, rxq->rxq_desc_busaddr); 1172 1.1 matt } 1173 1.1 matt sc->sc_idlemask = 0; 1174 1.1 matt } 1175 1.1 matt if (intrmask != sc->sc_intrmask) { 1176 1.1 matt sc->sc_intrmask = intrmask; 1177 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1178 1.1 matt } 1179 1.1 matt gfe_intr(sc); 1180 1.1 matt splx(s); 1181 1.1 matt 1182 1.1 matt GE_FUNC_EXIT(sc, ""); 1183 1.1 matt } 1184 1.1 matt 1185 1.1 matt int 1186 1.1 matt gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) 1187 1.1 matt { 1188 1.5 matt const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1189 1.5 matt struct ifnet * const ifp = &sc->sc_ec.ec_if; 1190 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1191 1.1 matt volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; 1192 1.1 matt uint32_t intrmask = sc->sc_intrmask; 1193 1.9 matt size_t buflen; 1194 1.1 matt struct mbuf *m; 1195 1.1 matt 1196 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); 1197 1.1 matt 1198 1.1 matt /* 1199 1.13 scw * Anything in the pending queue to enqueue? if not, punt. Likewise 1200 1.13 scw * if the txq is not yet created. 1201 1.1 matt * otherwise grab its dmamap. 1202 1.1 matt */ 1203 1.13 scw if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { 1204 1.1 matt GE_FUNC_EXIT(sc, "-"); 1205 1.1 matt return 0; 1206 1.1 matt } 1207 1.1 matt 1208 1.1 matt /* 1209 1.1 matt * Have we [over]consumed our limit of descriptors? 1210 1.1 matt * Do we have enough free descriptors? 1211 1.1 matt */ 1212 1.6 matt if (GE_TXDESC_MAX == txq->txq_nactive + 2) { 1213 1.1 matt volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi]; 1214 1.1 matt uint32_t cmdsts; 1215 1.1 matt size_t pktlen; 1216 1.6 matt GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1217 1.2 matt cmdsts = gt32toh(txd2->ed_cmdsts); 1218 1.1 matt if (cmdsts & TX_CMD_O) { 1219 1.6 matt int nextin; 1220 1.6 matt /* 1221 1.6 matt * Sometime the Discovery forgets to update the 1222 1.6 matt * last descriptor. See if we own the descriptor 1223 1.6 matt * after it (since we know we've turned that to 1224 1.6 matt * the discovery and if we owned it, the Discovery 1225 1.6 matt * gave it back). If we do, we know the Discovery 1226 1.6 matt * gave back this one but forgot to mark it as ours. 1227 1.6 matt */ 1228 1.6 matt nextin = txq->txq_fi + 1; 1229 1.6 matt if (nextin == GE_TXDESC_MAX) 1230 1.6 matt nextin = 0; 1231 1.6 matt GE_TXDPOSTSYNC(sc, txq, nextin); 1232 1.6 matt if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1233 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1234 1.6 matt GE_TXDPRESYNC(sc, txq, nextin); 1235 1.6 matt GE_FUNC_EXIT(sc, "@"); 1236 1.6 matt return 0; 1237 1.6 matt } 1238 1.6 matt #ifdef DEBUG 1239 1.6 matt printf("%s: txenqueue: transmitter resynced at %d\n", 1240 1.36 kiyohara device_xname(sc->sc_dev), txq->txq_fi); 1241 1.6 matt #endif 1242 1.1 matt } 1243 1.1 matt if (++txq->txq_fi == GE_TXDESC_MAX) 1244 1.1 matt txq->txq_fi = 0; 1245 1.2 matt txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr; 1246 1.2 matt pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff; 1247 1.5 matt txq->txq_inptr += roundup(pktlen, dcache_line_size); 1248 1.1 matt txq->txq_nactive--; 1249 1.1 matt 1250 1.1 matt /* statistics */ 1251 1.55 skrll if_statinc(ifp, if_opackets); 1252 1.1 matt if (cmdsts & TX_STS_ES) 1253 1.55 skrll if_statinc(ifp, if_oerrors); 1254 1.1 matt GE_DPRINTF(sc, ("%%")); 1255 1.1 matt } 1256 1.1 matt 1257 1.9 matt buflen = roundup(m->m_pkthdr.len, dcache_line_size); 1258 1.9 matt 1259 1.1 matt /* 1260 1.1 matt * If this packet would wrap around the end of the buffer, reset back 1261 1.1 matt * to the beginning. 1262 1.1 matt */ 1263 1.9 matt if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { 1264 1.1 matt txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; 1265 1.1 matt txq->txq_outptr = 0; 1266 1.1 matt } 1267 1.1 matt 1268 1.1 matt /* 1269 1.1 matt * Make sure the output packet doesn't run over the beginning of 1270 1.1 matt * what we've already given the GT. 1271 1.1 matt */ 1272 1.5 matt if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && 1273 1.9 matt txq->txq_outptr + buflen > txq->txq_inptr) { 1274 1.1 matt intrmask |= txq->txq_intrbits & 1275 1.52 msaitoh (ETH_IR_TxBufferHigh | ETH_IR_TxBufferLow); 1276 1.1 matt if (sc->sc_intrmask != intrmask) { 1277 1.1 matt sc->sc_intrmask = intrmask; 1278 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1279 1.1 matt } 1280 1.1 matt GE_FUNC_EXIT(sc, "#"); 1281 1.1 matt return 0; 1282 1.1 matt } 1283 1.1 matt 1284 1.16 perry /* 1285 1.1 matt * The end-of-list descriptor we put on last time is the starting point 1286 1.1 matt * for this packet. The GT is supposed to terminate list processing on 1287 1.1 matt * a NULL nxtptr but that currently is broken so a CPU-owned descriptor 1288 1.1 matt * must terminate the list. 1289 1.1 matt */ 1290 1.1 matt intrmask = sc->sc_intrmask; 1291 1.1 matt 1292 1.1 matt m_copydata(m, 0, m->m_pkthdr.len, 1293 1.22 he (char *)txq->txq_buf_mem.gdm_kva + (int)txq->txq_outptr); 1294 1.1 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1295 1.9 matt txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); 1296 1.2 matt txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); 1297 1.2 matt txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); 1298 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1299 1.2 matt 1300 1.1 matt /* 1301 1.1 matt * Request a buffer interrupt every 2/3 of the way thru the transmit 1302 1.1 matt * buffer. 1303 1.1 matt */ 1304 1.9 matt txq->txq_ei_gapcount += buflen; 1305 1.1 matt if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { 1306 1.52 msaitoh txd->ed_cmdsts = htogt32(TX_CMD_FIRST |TX_CMD_LAST |TX_CMD_EI); 1307 1.1 matt txq->txq_ei_gapcount = 0; 1308 1.1 matt } else { 1309 1.52 msaitoh txd->ed_cmdsts = htogt32(TX_CMD_FIRST | TX_CMD_LAST); 1310 1.1 matt } 1311 1.2 matt #if 0 1312 1.2 matt GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, 1313 1.2 matt ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1314 1.2 matt ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1315 1.2 matt #endif 1316 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_lo); 1317 1.1 matt 1318 1.9 matt txq->txq_outptr += buflen; 1319 1.1 matt /* 1320 1.1 matt * Tell the SDMA engine to "Fetch!" 1321 1.1 matt */ 1322 1.36 kiyohara GE_WRITE(sc, ETH_ESDCMR, 1323 1.52 msaitoh txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH | ETH_ESDCMR_TXDL)); 1324 1.1 matt 1325 1.1 matt GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); 1326 1.1 matt 1327 1.1 matt /* 1328 1.1 matt * Update the last out appropriately. 1329 1.1 matt */ 1330 1.5 matt txq->txq_nactive++; 1331 1.1 matt if (++txq->txq_lo == GE_TXDESC_MAX) 1332 1.1 matt txq->txq_lo = 0; 1333 1.1 matt 1334 1.1 matt /* 1335 1.1 matt * Move mbuf from the pending queue to the snd queue. 1336 1.1 matt */ 1337 1.1 matt IF_DEQUEUE(&txq->txq_pendq, m); 1338 1.49 msaitoh bpf_mtap(ifp, m, BPF_D_OUT); 1339 1.1 matt m_freem(m); 1340 1.5 matt ifp->if_flags &= ~IFF_OACTIVE; 1341 1.1 matt 1342 1.1 matt /* 1343 1.1 matt * Since we have put an item into the packet queue, we now want 1344 1.1 matt * an interrupt when the transmit queue finishes processing the 1345 1.1 matt * list. But only update the mask if needs changing. 1346 1.1 matt */ 1347 1.52 msaitoh intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh | ETH_IR_TxEndLow); 1348 1.1 matt if (sc->sc_intrmask != intrmask) { 1349 1.1 matt sc->sc_intrmask = intrmask; 1350 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1351 1.1 matt } 1352 1.5 matt if (ifp->if_timer == 0) 1353 1.5 matt ifp->if_timer = 5; 1354 1.1 matt GE_FUNC_EXIT(sc, "*"); 1355 1.1 matt return 1; 1356 1.1 matt } 1357 1.1 matt 1358 1.1 matt uint32_t 1359 1.1 matt gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) 1360 1.1 matt { 1361 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1362 1.5 matt struct ifnet * const ifp = &sc->sc_ec.ec_if; 1363 1.1 matt 1364 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_done"); 1365 1.1 matt 1366 1.1 matt if (txq == NULL) { 1367 1.1 matt GE_FUNC_EXIT(sc, ""); 1368 1.1 matt return intrmask; 1369 1.1 matt } 1370 1.1 matt 1371 1.1 matt while (txq->txq_nactive > 0) { 1372 1.5 matt const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; 1373 1.2 matt volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi]; 1374 1.1 matt uint32_t cmdsts; 1375 1.1 matt size_t pktlen; 1376 1.1 matt 1377 1.6 matt GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); 1378 1.2 matt if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { 1379 1.6 matt int nextin; 1380 1.6 matt 1381 1.6 matt if (txq->txq_nactive == 1) { 1382 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1383 1.6 matt GE_FUNC_EXIT(sc, ""); 1384 1.6 matt return intrmask; 1385 1.6 matt } 1386 1.1 matt /* 1387 1.6 matt * Sometimes the Discovery forgets to update the 1388 1.6 matt * ownership bit in the descriptor. See if we own the 1389 1.6 matt * descriptor after it (since we know we've turned 1390 1.6 matt * that to the Discovery and if we own it now then the 1391 1.6 matt * Discovery gave it back). If we do, we know the 1392 1.6 matt * Discovery gave back this one but forgot to mark it 1393 1.6 matt * as ours. 1394 1.1 matt */ 1395 1.6 matt nextin = txq->txq_fi + 1; 1396 1.6 matt if (nextin == GE_TXDESC_MAX) 1397 1.6 matt nextin = 0; 1398 1.6 matt GE_TXDPOSTSYNC(sc, txq, nextin); 1399 1.6 matt if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { 1400 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi); 1401 1.6 matt GE_TXDPRESYNC(sc, txq, nextin); 1402 1.6 matt GE_FUNC_EXIT(sc, ""); 1403 1.6 matt return intrmask; 1404 1.1 matt } 1405 1.6 matt #ifdef DEBUG 1406 1.6 matt printf("%s: txdone: transmitter resynced at %d\n", 1407 1.36 kiyohara device_xname(sc->sc_dev), txq->txq_fi); 1408 1.1 matt #endif 1409 1.1 matt } 1410 1.2 matt #if 0 1411 1.2 matt GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", 1412 1.2 matt txq->txq_lo, 1413 1.2 matt ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], 1414 1.2 matt ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); 1415 1.2 matt #endif 1416 1.1 matt GE_DPRINTF(sc, ("(%d)", txq->txq_fi)); 1417 1.1 matt if (++txq->txq_fi == GE_TXDESC_MAX) 1418 1.1 matt txq->txq_fi = 0; 1419 1.2 matt txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; 1420 1.2 matt pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; 1421 1.2 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, 1422 1.2 matt txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); 1423 1.10 matt txq->txq_inptr += roundup(pktlen, dcache_line_size); 1424 1.1 matt 1425 1.1 matt /* statistics */ 1426 1.55 skrll if_statinc(ifp, if_opackets); 1427 1.1 matt if (cmdsts & TX_STS_ES) 1428 1.55 skrll if_statinc(ifp, if_oerrors); 1429 1.1 matt 1430 1.6 matt /* txd->ed_bufptr = 0; */ 1431 1.1 matt 1432 1.5 matt ifp->if_timer = 5; 1433 1.1 matt --txq->txq_nactive; 1434 1.1 matt } 1435 1.1 matt if (txq->txq_nactive != 0) 1436 1.1 matt panic("%s: transmit fifo%d empty but active count (%d) > 0!", 1437 1.36 kiyohara device_xname(sc->sc_dev), txprio, txq->txq_nactive); 1438 1.5 matt ifp->if_timer = 0; 1439 1.52 msaitoh intrmask &= 1440 1.52 msaitoh ~(txq->txq_intrbits & (ETH_IR_TxEndHigh | ETH_IR_TxEndLow)); 1441 1.52 msaitoh intrmask &= 1442 1.52 msaitoh ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh | ETH_IR_TxBufferLow)); 1443 1.1 matt GE_FUNC_EXIT(sc, ""); 1444 1.1 matt return intrmask; 1445 1.1 matt } 1446 1.1 matt 1447 1.1 matt int 1448 1.15 matt gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) 1449 1.15 matt { 1450 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1451 1.15 matt int error; 1452 1.15 matt 1453 1.15 matt GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); 1454 1.15 matt 1455 1.15 matt error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, 1456 1.15 matt GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); 1457 1.15 matt if (error) { 1458 1.15 matt GE_FUNC_EXIT(sc, ""); 1459 1.15 matt return error; 1460 1.15 matt } 1461 1.15 matt error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); 1462 1.15 matt if (error) { 1463 1.15 matt gfe_dmamem_free(sc, &txq->txq_desc_mem); 1464 1.15 matt GE_FUNC_EXIT(sc, ""); 1465 1.15 matt return error; 1466 1.15 matt } 1467 1.15 matt GE_FUNC_EXIT(sc, ""); 1468 1.15 matt return 0; 1469 1.15 matt } 1470 1.15 matt 1471 1.15 matt int 1472 1.1 matt gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) 1473 1.1 matt { 1474 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1475 1.1 matt volatile struct gt_eth_desc *txd; 1476 1.1 matt unsigned int i; 1477 1.1 matt bus_addr_t addr; 1478 1.1 matt 1479 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_start"); 1480 1.1 matt 1481 1.36 kiyohara sc->sc_intrmask &= 1482 1.36 kiyohara ~(ETH_IR_TxEndHigh | 1483 1.36 kiyohara ETH_IR_TxBufferHigh | 1484 1.36 kiyohara ETH_IR_TxEndLow | 1485 1.36 kiyohara ETH_IR_TxBufferLow); 1486 1.1 matt 1487 1.15 matt if (sc->sc_flags & GE_NOFREE) { 1488 1.15 matt KASSERT(txq->txq_desc_mem.gdm_kva != NULL); 1489 1.15 matt KASSERT(txq->txq_buf_mem.gdm_kva != NULL); 1490 1.15 matt } else { 1491 1.15 matt int error = gfe_tx_txqalloc(sc, txprio); 1492 1.1 matt if (error) { 1493 1.15 matt GE_FUNC_EXIT(sc, "!"); 1494 1.1 matt return error; 1495 1.1 matt } 1496 1.1 matt } 1497 1.1 matt 1498 1.1 matt txq->txq_descs = 1499 1.1 matt (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; 1500 1.1 matt txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; 1501 1.1 matt txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; 1502 1.1 matt 1503 1.1 matt txq->txq_pendq.ifq_maxlen = 10; 1504 1.1 matt txq->txq_ei_gapcount = 0; 1505 1.1 matt txq->txq_nactive = 0; 1506 1.1 matt txq->txq_fi = 0; 1507 1.1 matt txq->txq_lo = 0; 1508 1.1 matt txq->txq_inptr = GE_TXBUF_SIZE; 1509 1.1 matt txq->txq_outptr = 0; 1510 1.1 matt for (i = 0, txd = txq->txq_descs, 1511 1.36 kiyohara addr = txq->txq_desc_busaddr + sizeof(*txd); 1512 1.36 kiyohara i < GE_TXDESC_MAX - 1; i++, txd++, addr += sizeof(*txd)) { 1513 1.1 matt /* 1514 1.1 matt * update the nxtptr to point to the next txd. 1515 1.1 matt */ 1516 1.1 matt txd->ed_cmdsts = 0; 1517 1.2 matt txd->ed_nxtptr = htogt32(addr); 1518 1.1 matt } 1519 1.1 matt txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = 1520 1.2 matt htogt32(txq->txq_desc_busaddr); 1521 1.1 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, 1522 1.52 msaitoh GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1523 1.1 matt 1524 1.1 matt switch (txprio) { 1525 1.1 matt case GE_TXPRIO_HI: 1526 1.52 msaitoh txq->txq_intrbits = ETH_IR_TxEndHigh | ETH_IR_TxBufferHigh; 1527 1.1 matt txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; 1528 1.1 matt txq->txq_epsrbits = ETH_EPSR_TxHigh; 1529 1.36 kiyohara txq->txq_ectdp = ETH_ECTDP1; 1530 1.36 kiyohara GE_WRITE(sc, ETH_ECTDP1, txq->txq_desc_busaddr); 1531 1.1 matt break; 1532 1.1 matt 1533 1.1 matt case GE_TXPRIO_LO: 1534 1.52 msaitoh txq->txq_intrbits = ETH_IR_TxEndLow | ETH_IR_TxBufferLow; 1535 1.1 matt txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; 1536 1.1 matt txq->txq_epsrbits = ETH_EPSR_TxLow; 1537 1.36 kiyohara txq->txq_ectdp = ETH_ECTDP0; 1538 1.36 kiyohara GE_WRITE(sc, ETH_ECTDP0, txq->txq_desc_busaddr); 1539 1.1 matt break; 1540 1.1 matt 1541 1.1 matt case GE_TXPRIO_NONE: 1542 1.1 matt break; 1543 1.1 matt } 1544 1.1 matt #if 0 1545 1.1 matt GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); 1546 1.36 kiyohara GE_WRITE(sc->sc_dev, txq->txq_ectdp, txq->txq_desc_busaddr); 1547 1.1 matt GE_DPRINTF(sc, (")")); 1548 1.1 matt #endif 1549 1.1 matt 1550 1.1 matt /* 1551 1.1 matt * If we are restarting, there may be packets in the pending queue 1552 1.1 matt * waiting to be enqueued. Try enqueuing packets from both priority 1553 1.1 matt * queues until the pending queue is empty or there no room for them 1554 1.1 matt * on the device. 1555 1.1 matt */ 1556 1.1 matt while (gfe_tx_enqueue(sc, txprio)) 1557 1.1 matt continue; 1558 1.1 matt 1559 1.1 matt GE_FUNC_EXIT(sc, ""); 1560 1.1 matt return 0; 1561 1.1 matt } 1562 1.1 matt 1563 1.1 matt void 1564 1.1 matt gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) 1565 1.1 matt { 1566 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; 1567 1.1 matt 1568 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); 1569 1.1 matt if (txq == NULL) { 1570 1.1 matt GE_FUNC_EXIT(sc, ""); 1571 1.1 matt return; 1572 1.1 matt } 1573 1.1 matt 1574 1.1 matt if (!flush) { 1575 1.1 matt GE_FUNC_EXIT(sc, ""); 1576 1.1 matt return; 1577 1.1 matt } 1578 1.1 matt 1579 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) { 1580 1.15 matt gfe_dmamem_free(sc, &txq->txq_desc_mem); 1581 1.15 matt gfe_dmamem_free(sc, &txq->txq_buf_mem); 1582 1.15 matt } 1583 1.1 matt GE_FUNC_EXIT(sc, "-F"); 1584 1.1 matt } 1585 1.1 matt 1586 1.1 matt void 1587 1.1 matt gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) 1588 1.1 matt { 1589 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_stop"); 1590 1.1 matt 1591 1.52 msaitoh GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_STDH | ETH_ESDCMR_STDL); 1592 1.1 matt 1593 1.1 matt sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); 1594 1.1 matt sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); 1595 1.36 kiyohara sc->sc_intrmask &= 1596 1.36 kiyohara ~(ETH_IR_TxEndHigh | 1597 1.36 kiyohara ETH_IR_TxBufferHigh | 1598 1.36 kiyohara ETH_IR_TxEndLow | 1599 1.36 kiyohara ETH_IR_TxBufferLow); 1600 1.1 matt 1601 1.1 matt gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); 1602 1.1 matt gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); 1603 1.1 matt 1604 1.1 matt sc->sc_ec.ec_if.if_timer = 0; 1605 1.1 matt GE_FUNC_EXIT(sc, ""); 1606 1.1 matt } 1607 1.36 kiyohara 1608 1.1 matt int 1609 1.1 matt gfe_intr(void *arg) 1610 1.1 matt { 1611 1.1 matt struct gfe_softc * const sc = arg; 1612 1.1 matt uint32_t cause; 1613 1.1 matt uint32_t intrmask = sc->sc_intrmask; 1614 1.1 matt int claim = 0; 1615 1.1 matt int cnt; 1616 1.1 matt 1617 1.1 matt GE_FUNC_ENTER(sc, "gfe_intr"); 1618 1.1 matt 1619 1.1 matt for (cnt = 0; cnt < 4; cnt++) { 1620 1.1 matt if (sc->sc_intrmask != intrmask) { 1621 1.1 matt sc->sc_intrmask = intrmask; 1622 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1623 1.1 matt } 1624 1.36 kiyohara cause = GE_READ(sc, ETH_EICR); 1625 1.1 matt cause &= sc->sc_intrmask; 1626 1.1 matt GE_DPRINTF(sc, (".%#x", cause)); 1627 1.1 matt if (cause == 0) 1628 1.1 matt break; 1629 1.1 matt 1630 1.1 matt claim = 1; 1631 1.1 matt 1632 1.36 kiyohara GE_WRITE(sc, ETH_EICR, ~cause); 1633 1.1 matt #ifndef GE_NORX 1634 1.52 msaitoh if (cause & (ETH_IR_RxBuffer | ETH_IR_RxError)) 1635 1.1 matt intrmask = gfe_rx_process(sc, cause, intrmask); 1636 1.1 matt #endif 1637 1.1 matt 1638 1.1 matt #ifndef GE_NOTX 1639 1.52 msaitoh if (cause & (ETH_IR_TxBufferHigh | ETH_IR_TxEndHigh)) 1640 1.1 matt intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); 1641 1.52 msaitoh if (cause & (ETH_IR_TxBufferLow | ETH_IR_TxEndLow)) 1642 1.1 matt intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); 1643 1.1 matt #endif 1644 1.1 matt if (cause & ETH_IR_MIIPhySTC) { 1645 1.1 matt sc->sc_flags |= GE_PHYSTSCHG; 1646 1.1 matt /* intrmask &= ~ETH_IR_MIIPhySTC; */ 1647 1.1 matt } 1648 1.1 matt } 1649 1.13 scw 1650 1.13 scw while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) 1651 1.13 scw continue; 1652 1.13 scw while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) 1653 1.13 scw continue; 1654 1.1 matt 1655 1.1 matt GE_FUNC_EXIT(sc, ""); 1656 1.1 matt return claim; 1657 1.1 matt } 1658 1.1 matt 1659 1.1 matt int 1660 1.1 matt gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) 1661 1.1 matt { 1662 1.1 matt int error = 0; 1663 1.1 matt GE_FUNC_ENTER(sc, "gfe_whack"); 1664 1.1 matt 1665 1.1 matt switch (op) { 1666 1.1 matt case GE_WHACK_RESTART: 1667 1.1 matt #ifndef GE_NOTX 1668 1.1 matt gfe_tx_stop(sc, op); 1669 1.1 matt #endif 1670 1.1 matt /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ 1671 1.1 matt /* FALLTHROUGH */ 1672 1.1 matt case GE_WHACK_START: 1673 1.1 matt #ifndef GE_NOHASH 1674 1.1 matt if (error == 0 && sc->sc_hashtable == NULL) { 1675 1.1 matt error = gfe_hash_alloc(sc); 1676 1.1 matt if (error) 1677 1.1 matt break; 1678 1.1 matt } 1679 1.1 matt if (op != GE_WHACK_RESTART) 1680 1.1 matt gfe_hash_fill(sc); 1681 1.1 matt #endif 1682 1.1 matt #ifndef GE_NORX 1683 1.1 matt if (op != GE_WHACK_RESTART) { 1684 1.1 matt error = gfe_rx_prime(sc); 1685 1.1 matt if (error) 1686 1.1 matt break; 1687 1.1 matt } 1688 1.1 matt #endif 1689 1.1 matt #ifndef GE_NOTX 1690 1.1 matt error = gfe_tx_start(sc, GE_TXPRIO_HI); 1691 1.1 matt if (error) 1692 1.1 matt break; 1693 1.1 matt #endif 1694 1.1 matt sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; 1695 1.36 kiyohara GE_WRITE(sc, ETH_EPCR, sc->sc_pcr | ETH_EPCR_EN); 1696 1.36 kiyohara GE_WRITE(sc, ETH_EPCXR, sc->sc_pcxr); 1697 1.36 kiyohara GE_WRITE(sc, ETH_EICR, 0); 1698 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1699 1.1 matt #ifndef GE_NOHASH 1700 1.36 kiyohara GE_WRITE(sc, ETH_EHTPR, 1701 1.36 kiyohara sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); 1702 1.1 matt #endif 1703 1.1 matt #ifndef GE_NORX 1704 1.36 kiyohara GE_WRITE(sc, ETH_ESDCMR, ETH_ESDCMR_ERD); 1705 1.1 matt sc->sc_flags |= GE_RXACTIVE; 1706 1.1 matt #endif 1707 1.1 matt /* FALLTHROUGH */ 1708 1.1 matt case GE_WHACK_CHANGE: 1709 1.1 matt GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", 1710 1.36 kiyohara GE_READ(sc, ETH_EPCR), GE_READ(sc, ETH_EIMR))); 1711 1.36 kiyohara GE_WRITE(sc, ETH_EPCR, sc->sc_pcr | ETH_EPCR_EN); 1712 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, sc->sc_intrmask); 1713 1.1 matt gfe_ifstart(&sc->sc_ec.ec_if); 1714 1.2 matt GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", 1715 1.36 kiyohara GE_READ(sc, ETH_ECTDP0), GE_READ(sc, ETH_ECTDP1))); 1716 1.2 matt GE_FUNC_EXIT(sc, ""); 1717 1.1 matt return error; 1718 1.1 matt case GE_WHACK_STOP: 1719 1.1 matt break; 1720 1.1 matt } 1721 1.1 matt 1722 1.1 matt #ifdef GE_DEBUG 1723 1.1 matt if (error) 1724 1.1 matt GE_DPRINTF(sc, (" failed: %d\n", error)); 1725 1.1 matt #endif 1726 1.36 kiyohara GE_WRITE(sc, ETH_EPCR, sc->sc_pcr); 1727 1.36 kiyohara GE_WRITE(sc, ETH_EIMR, 0); 1728 1.1 matt sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; 1729 1.1 matt #ifndef GE_NOTX 1730 1.1 matt gfe_tx_stop(sc, GE_WHACK_STOP); 1731 1.1 matt #endif 1732 1.1 matt #ifndef GE_NORX 1733 1.1 matt gfe_rx_stop(sc, GE_WHACK_STOP); 1734 1.1 matt #endif 1735 1.1 matt #ifndef GE_NOHASH 1736 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) { 1737 1.15 matt gfe_dmamem_free(sc, &sc->sc_hash_mem); 1738 1.15 matt sc->sc_hashtable = NULL; 1739 1.15 matt } 1740 1.1 matt #endif 1741 1.1 matt 1742 1.1 matt GE_FUNC_EXIT(sc, ""); 1743 1.1 matt return error; 1744 1.1 matt } 1745 1.36 kiyohara 1746 1.1 matt int 1747 1.1 matt gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) 1748 1.1 matt { 1749 1.1 matt uint32_t w0, add0, add1; 1750 1.1 matt uint32_t result; 1751 1.1 matt 1752 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_compute"); 1753 1.53 msaitoh add0 = ((uint32_t) eaddr[5] << 0) | 1754 1.53 msaitoh ((uint32_t) eaddr[4] << 8) | 1755 1.1 matt ((uint32_t) eaddr[3] << 16); 1756 1.1 matt 1757 1.1 matt add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); 1758 1.1 matt add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); 1759 1.1 matt add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); 1760 1.1 matt 1761 1.53 msaitoh add1 = ((uint32_t) eaddr[2] << 0) | 1762 1.53 msaitoh ((uint32_t) eaddr[1] << 8) | 1763 1.1 matt ((uint32_t) eaddr[0] << 16); 1764 1.1 matt 1765 1.1 matt add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); 1766 1.1 matt add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); 1767 1.1 matt add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); 1768 1.1 matt 1769 1.1 matt GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); 1770 1.1 matt /* 1771 1.1 matt * hashResult is the 15 bits Hash entry address. 1772 1.1 matt * ethernetADD is a 48 bit number, which is derived from the Ethernet 1773 1.1 matt * MAC address, by nibble swapping in every byte (i.e MAC address 1774 1.1 matt * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). 1775 1.1 matt */ 1776 1.1 matt 1777 1.1 matt if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { 1778 1.1 matt /* 1779 1.1 matt * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) 1780 1.1 matt * 1781 1.1 matt * hashFunc0 calculates the hashResult in the following manner: 1782 1.1 matt * hashResult[ 8:0] = ethernetADD[14:8,1,0] 1783 1.1 matt * XOR ethernetADD[23:15] XOR ethernetADD[32:24] 1784 1.1 matt */ 1785 1.1 matt result = (add0 & 3) | ((add0 >> 6) & ~3); 1786 1.1 matt result ^= (add0 >> 15) ^ (add1 >> 0); 1787 1.1 matt result &= 0x1ff; 1788 1.1 matt /* 1789 1.1 matt * hashResult[14:9] = ethernetADD[7:2] 1790 1.1 matt */ 1791 1.1 matt result |= (add0 & ~3) << 7; /* excess bits will be masked */ 1792 1.1 matt GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff)); 1793 1.1 matt } else { 1794 1.1 matt #define TRIBITFLIP 073516240 /* yes its in octal */ 1795 1.1 matt /* 1796 1.1 matt * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) 1797 1.1 matt * 1798 1.1 matt * hashFunc1 calculates the hashResult in the following manner: 1799 1.1 matt * hashResult[08:00] = ethernetADD[06:14] 1800 1.1 matt * XOR ethernetADD[15:23] XOR ethernetADD[24:32] 1801 1.1 matt */ 1802 1.1 matt w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; 1803 1.1 matt /* 1804 1.1 matt * Now bitswap those 9 bits 1805 1.1 matt */ 1806 1.1 matt result = 0; 1807 1.1 matt result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; 1808 1.1 matt result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; 1809 1.1 matt result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; 1810 1.1 matt 1811 1.1 matt /* 1812 1.1 matt * hashResult[14:09] = ethernetADD[00:05] 1813 1.1 matt */ 1814 1.1 matt result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; 1815 1.1 matt result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; 1816 1.1 matt GE_DPRINTF(sc, ("1(%#x)", result)); 1817 1.1 matt } 1818 1.1 matt GE_FUNC_EXIT(sc, ""); 1819 1.1 matt return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); 1820 1.1 matt } 1821 1.1 matt 1822 1.1 matt int 1823 1.1 matt gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, 1824 1.6 matt enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) 1825 1.1 matt { 1826 1.1 matt uint64_t he; 1827 1.1 matt uint64_t *maybe_he_p = NULL; 1828 1.1 matt int limit; 1829 1.1 matt int hash; 1830 1.1 matt int maybe_hash = 0; 1831 1.1 matt 1832 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); 1833 1.1 matt 1834 1.1 matt hash = gfe_hash_compute(sc, eaddr); 1835 1.1 matt 1836 1.1 matt if (sc->sc_hashtable == NULL) { 1837 1.36 kiyohara panic("%s:%d: hashtable == NULL!", device_xname(sc->sc_dev), 1838 1.1 matt __LINE__); 1839 1.1 matt } 1840 1.1 matt 1841 1.1 matt /* 1842 1.1 matt * Assume we are going to insert so create the hash entry we 1843 1.1 matt * are going to insert. We also use it to match entries we 1844 1.1 matt * will be removing. 1845 1.1 matt */ 1846 1.1 matt he = ((uint64_t) eaddr[5] << 43) | 1847 1.1 matt ((uint64_t) eaddr[4] << 35) | 1848 1.1 matt ((uint64_t) eaddr[3] << 27) | 1849 1.1 matt ((uint64_t) eaddr[2] << 19) | 1850 1.1 matt ((uint64_t) eaddr[1] << 11) | 1851 1.1 matt ((uint64_t) eaddr[0] << 3) | 1852 1.1 matt HSH_PRIO_INS(prio) | HSH_V | HSH_R; 1853 1.1 matt 1854 1.1 matt /* 1855 1.1 matt * The GT will search upto 12 entries for a hit, so we must mimic that. 1856 1.1 matt */ 1857 1.1 matt hash &= sc->sc_hashmask / sizeof(he); 1858 1.1 matt for (limit = HSH_LIMIT; limit > 0 ; --limit) { 1859 1.1 matt /* 1860 1.1 matt * Does the GT wrap at the end, stop at the, or overrun the 1861 1.16 perry * end? Assume it wraps for now. Stash a copy of the 1862 1.1 matt * current hash entry. 1863 1.1 matt */ 1864 1.1 matt uint64_t *he_p = &sc->sc_hashtable[hash]; 1865 1.1 matt uint64_t thishe = *he_p; 1866 1.1 matt 1867 1.1 matt /* 1868 1.1 matt * If the hash entry isn't valid, that break the chain. And 1869 1.1 matt * this entry a good candidate for reuse. 1870 1.1 matt */ 1871 1.1 matt if ((thishe & HSH_V) == 0) { 1872 1.1 matt maybe_he_p = he_p; 1873 1.1 matt break; 1874 1.1 matt } 1875 1.1 matt 1876 1.1 matt /* 1877 1.1 matt * If the hash entry has the same address we are looking for 1878 1.1 matt * then ... if we are removing and the skip bit is set, its 1879 1.1 matt * already been removed. if are adding and the skip bit is 1880 1.1 matt * clear, then its already added. In either return EBUSY 1881 1.1 matt * indicating the op has already been done. Otherwise flip 1882 1.1 matt * the skip bit and return 0. 1883 1.1 matt */ 1884 1.1 matt if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { 1885 1.1 matt if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || 1886 1.1 matt ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) 1887 1.1 matt return EBUSY; 1888 1.1 matt *he_p = thishe ^ HSH_S; 1889 1.1 matt bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1890 1.2 matt hash * sizeof(he), sizeof(he), 1891 1.2 matt BUS_DMASYNC_PREWRITE); 1892 1.1 matt GE_FUNC_EXIT(sc, "^"); 1893 1.1 matt return 0; 1894 1.1 matt } 1895 1.1 matt 1896 1.1 matt /* 1897 1.1 matt * If we haven't found a slot for the entry and this entry 1898 1.1 matt * is currently being skipped, return this entry. 1899 1.1 matt */ 1900 1.1 matt if (maybe_he_p == NULL && (thishe & HSH_S)) { 1901 1.1 matt maybe_he_p = he_p; 1902 1.1 matt maybe_hash = hash; 1903 1.1 matt } 1904 1.16 perry 1905 1.1 matt hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); 1906 1.1 matt } 1907 1.1 matt 1908 1.1 matt /* 1909 1.1 matt * If we got here, then there was no entry to remove. 1910 1.1 matt */ 1911 1.1 matt if (op == GE_HASH_REMOVE) { 1912 1.1 matt GE_FUNC_EXIT(sc, "?"); 1913 1.1 matt return ENOENT; 1914 1.1 matt } 1915 1.1 matt 1916 1.1 matt /* 1917 1.1 matt * If we couldn't find a slot, return an error. 1918 1.1 matt */ 1919 1.1 matt if (maybe_he_p == NULL) { 1920 1.1 matt GE_FUNC_EXIT(sc, "!"); 1921 1.1 matt return ENOSPC; 1922 1.1 matt } 1923 1.1 matt 1924 1.1 matt /* Update the entry. 1925 1.1 matt */ 1926 1.1 matt *maybe_he_p = he; 1927 1.1 matt bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 1928 1.2 matt maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); 1929 1.1 matt GE_FUNC_EXIT(sc, "+"); 1930 1.1 matt return 0; 1931 1.1 matt } 1932 1.1 matt 1933 1.1 matt int 1934 1.36 kiyohara gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, 1935 1.36 kiyohara u_long cmd) 1936 1.1 matt { 1937 1.36 kiyohara struct gfe_softc *sc = ec->ec_if.if_softc; 1938 1.1 matt int error; 1939 1.1 matt enum gfe_hash_op op; 1940 1.1 matt enum gfe_rxprio prio; 1941 1.1 matt 1942 1.1 matt GE_FUNC_ENTER(sc, "hash_multichg"); 1943 1.1 matt /* 1944 1.1 matt * Is this a wildcard entry? If so and its being removed, recompute. 1945 1.1 matt */ 1946 1.1 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1947 1.1 matt if (cmd == SIOCDELMULTI) { 1948 1.1 matt GE_FUNC_EXIT(sc, ""); 1949 1.1 matt return ENETRESET; 1950 1.1 matt } 1951 1.1 matt 1952 1.1 matt /* 1953 1.1 matt * Switch in 1954 1.1 matt */ 1955 1.1 matt sc->sc_flags |= GE_ALLMULTI; 1956 1.1 matt if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { 1957 1.1 matt sc->sc_pcr |= ETH_EPCR_PM; 1958 1.36 kiyohara GE_WRITE(sc, ETH_EPCR, sc->sc_pcr); 1959 1.1 matt GE_FUNC_EXIT(sc, ""); 1960 1.1 matt return 0; 1961 1.1 matt } 1962 1.1 matt GE_FUNC_EXIT(sc, ""); 1963 1.1 matt return ENETRESET; 1964 1.1 matt } 1965 1.1 matt 1966 1.1 matt prio = GE_RXPRIO_MEDLO; 1967 1.1 matt op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); 1968 1.1 matt 1969 1.1 matt if (sc->sc_hashtable == NULL) { 1970 1.1 matt GE_FUNC_EXIT(sc, ""); 1971 1.1 matt return 0; 1972 1.1 matt } 1973 1.1 matt 1974 1.1 matt error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); 1975 1.1 matt if (error == EBUSY) { 1976 1.36 kiyohara aprint_error_dev(sc->sc_dev, "multichg: tried to %s %s again\n", 1977 1.36 kiyohara cmd == SIOCDELMULTI ? "remove" : "add", 1978 1.36 kiyohara ether_sprintf(enm->enm_addrlo)); 1979 1.1 matt GE_FUNC_EXIT(sc, ""); 1980 1.1 matt return 0; 1981 1.1 matt } 1982 1.1 matt 1983 1.1 matt if (error == ENOENT) { 1984 1.36 kiyohara aprint_error_dev(sc->sc_dev, 1985 1.36 kiyohara "multichg: failed to remove %s: not in table\n", 1986 1.36 kiyohara ether_sprintf(enm->enm_addrlo)); 1987 1.1 matt GE_FUNC_EXIT(sc, ""); 1988 1.1 matt return 0; 1989 1.1 matt } 1990 1.1 matt 1991 1.1 matt if (error == ENOSPC) { 1992 1.36 kiyohara aprint_error_dev(sc->sc_dev, "multichg:" 1993 1.36 kiyohara " failed to add %s: no space; regenerating table\n", 1994 1.36 kiyohara ether_sprintf(enm->enm_addrlo)); 1995 1.1 matt GE_FUNC_EXIT(sc, ""); 1996 1.1 matt return ENETRESET; 1997 1.1 matt } 1998 1.1 matt GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", 1999 1.36 kiyohara device_xname(sc->sc_dev), 2000 1.36 kiyohara cmd == SIOCDELMULTI ? "remove" : "add", 2001 1.36 kiyohara ether_sprintf(enm->enm_addrlo))); 2002 1.1 matt GE_FUNC_EXIT(sc, ""); 2003 1.1 matt return 0; 2004 1.1 matt } 2005 1.1 matt 2006 1.1 matt int 2007 1.1 matt gfe_hash_fill(struct gfe_softc *sc) 2008 1.1 matt { 2009 1.52 msaitoh struct ethercom *ec = &sc->sc_ec; 2010 1.1 matt struct ether_multistep step; 2011 1.1 matt struct ether_multi *enm; 2012 1.1 matt int error; 2013 1.1 matt 2014 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_fill"); 2015 1.1 matt 2016 1.1 matt error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, 2017 1.52 msaitoh CLLADDR(ec->ec_if.if_sadl)); 2018 1.43 christos if (error) { 2019 1.1 matt GE_FUNC_EXIT(sc, "!"); 2020 1.1 matt return error; 2021 1.43 christos } 2022 1.1 matt 2023 1.1 matt sc->sc_flags &= ~GE_ALLMULTI; 2024 1.52 msaitoh if ((ec->ec_if.if_flags & IFF_PROMISC) == 0) 2025 1.1 matt sc->sc_pcr &= ~ETH_EPCR_PM; 2026 1.54 msaitoh ETHER_LOCK(ec); 2027 1.52 msaitoh ETHER_FIRST_MULTI(step, ec, enm); 2028 1.1 matt while (enm != NULL) { 2029 1.1 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2030 1.1 matt sc->sc_flags |= GE_ALLMULTI; 2031 1.1 matt sc->sc_pcr |= ETH_EPCR_PM; 2032 1.1 matt } else { 2033 1.1 matt error = gfe_hash_entry_op(sc, GE_HASH_ADD, 2034 1.1 matt GE_RXPRIO_MEDLO, enm->enm_addrlo); 2035 1.1 matt if (error == ENOSPC) 2036 1.1 matt break; 2037 1.1 matt } 2038 1.1 matt ETHER_NEXT_MULTI(step, enm); 2039 1.1 matt } 2040 1.54 msaitoh ETHER_UNLOCK(ec); 2041 1.1 matt 2042 1.1 matt GE_FUNC_EXIT(sc, ""); 2043 1.1 matt return error; 2044 1.1 matt } 2045 1.1 matt 2046 1.1 matt int 2047 1.1 matt gfe_hash_alloc(struct gfe_softc *sc) 2048 1.1 matt { 2049 1.1 matt int error; 2050 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_alloc"); 2051 1.1 matt sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; 2052 1.2 matt error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, 2053 1.2 matt BUS_DMA_NOCACHE); 2054 1.1 matt if (error) { 2055 1.36 kiyohara aprint_error_dev(sc->sc_dev, 2056 1.36 kiyohara "failed to allocate %d bytes for hash table: %d\n", 2057 1.36 kiyohara sc->sc_hashmask + 1, error); 2058 1.1 matt GE_FUNC_EXIT(sc, ""); 2059 1.1 matt return error; 2060 1.1 matt } 2061 1.1 matt sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; 2062 1.1 matt memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); 2063 1.1 matt bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, 2064 1.2 matt 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); 2065 1.1 matt GE_FUNC_EXIT(sc, ""); 2066 1.1 matt return 0; 2067 1.1 matt } 2068