1 /* $NetBSD: if_mvxpe.c,v 1.44 2025/10/04 04:44:20 thorpej Exp $ */ 2 /* 3 * Copyright (c) 2015 Internet Initiative Japan Inc. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: if_mvxpe.c,v 1.44 2025/10/04 04:44:20 thorpej Exp $"); 29 30 #include "opt_multiprocessor.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/callout.h> 35 #include <sys/device.h> 36 #include <sys/endian.h> 37 #include <sys/errno.h> 38 #include <sys/evcnt.h> 39 #include <sys/kernel.h> 40 #include <sys/kmem.h> 41 #include <sys/mutex.h> 42 #include <sys/sockio.h> 43 #include <sys/sysctl.h> 44 #include <sys/syslog.h> 45 #include <sys/rndsource.h> 46 47 #include <net/if.h> 48 #include <net/if_ether.h> 49 #include <net/if_media.h> 50 #include <net/bpf.h> 51 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/ip.h> 55 56 #include <dev/mii/mii.h> 57 #include <dev/mii/miivar.h> 58 59 #include <dev/marvell/marvellreg.h> 60 #include <dev/marvell/marvellvar.h> 61 #include <dev/marvell/mvxpbmvar.h> 62 #include <dev/marvell/if_mvxpereg.h> 63 #include <dev/marvell/if_mvxpevar.h> 64 65 #include "locators.h" 66 67 #if BYTE_ORDER == BIG_ENDIAN 68 #error "BIG ENDIAN not supported" 69 #endif 70 71 #ifdef MVXPE_DEBUG 72 #define STATIC /* nothing */ 73 #else 74 #define STATIC static 75 #endif 76 77 /* autoconf(9) */ 78 STATIC int mvxpe_match(device_t, struct cfdata *, void *); 79 STATIC void mvxpe_attach(device_t, device_t, void *); 80 STATIC int mvxpe_evcnt_attach(struct mvxpe_softc *); 81 CFATTACH_DECL_NEW(mvxpe_mbus, sizeof(struct mvxpe_softc), 82 mvxpe_match, mvxpe_attach, NULL, NULL); 83 STATIC void mvxpe_sc_lock(struct mvxpe_softc *); 84 STATIC void mvxpe_sc_unlock(struct mvxpe_softc *); 85 86 /* MII */ 87 STATIC int mvxpe_miibus_readreg(device_t, int, int, uint16_t *); 88 STATIC int mvxpe_miibus_writereg(device_t, int, int, uint16_t); 89 STATIC void mvxpe_miibus_statchg(struct ifnet *); 90 91 /* Address Decoding Window */ 92 STATIC void mvxpe_wininit(struct mvxpe_softc *, enum marvell_tags *); 93 94 /* Device Register Initialization */ 95 STATIC int mvxpe_initreg(struct ifnet *); 96 97 /* Descriptor Ring Control for each of queues */ 98 STATIC void *mvxpe_dma_memalloc(struct mvxpe_softc *, bus_dmamap_t *, size_t); 99 STATIC int mvxpe_ring_alloc_queue(struct mvxpe_softc *, int); 100 STATIC void mvxpe_ring_dealloc_queue(struct mvxpe_softc *, int); 101 STATIC void mvxpe_ring_init_queue(struct mvxpe_softc *, int); 102 STATIC void mvxpe_ring_flush_queue(struct mvxpe_softc *, int); 103 STATIC void mvxpe_ring_sync_rx(struct mvxpe_softc *, int, int, int, int); 104 STATIC void mvxpe_ring_sync_tx(struct mvxpe_softc *, int, int, int, int); 105 106 /* Rx/Tx Queue Control */ 107 STATIC int mvxpe_rx_queue_init(struct ifnet *, int); 108 STATIC int mvxpe_tx_queue_init(struct ifnet *, int); 109 STATIC int mvxpe_rx_queue_enable(struct ifnet *, int); 110 STATIC int mvxpe_tx_queue_enable(struct ifnet *, int); 111 STATIC void mvxpe_rx_lockq(struct mvxpe_softc *, int); 112 STATIC void mvxpe_rx_unlockq(struct mvxpe_softc *, int); 113 STATIC void mvxpe_tx_lockq(struct mvxpe_softc *, int); 114 STATIC void mvxpe_tx_unlockq(struct mvxpe_softc *, int); 115 116 /* Interrupt Handlers */ 117 STATIC void mvxpe_disable_intr(struct mvxpe_softc *); 118 STATIC void mvxpe_enable_intr(struct mvxpe_softc *); 119 STATIC int mvxpe_rxtxth_intr(void *); 120 STATIC int mvxpe_misc_intr(void *); 121 STATIC int mvxpe_rxtx_intr(void *); 122 STATIC void mvxpe_tick(void *); 123 124 /* struct ifnet and mii callbacks*/ 125 STATIC void mvxpe_start(struct ifnet *); 126 STATIC int mvxpe_ioctl(struct ifnet *, u_long, void *); 127 STATIC int mvxpe_init(struct ifnet *); 128 STATIC void mvxpe_stop(struct ifnet *, int); 129 STATIC void mvxpe_watchdog(struct ifnet *); 130 STATIC int mvxpe_ifflags_cb(struct ethercom *); 131 STATIC int mvxpe_mediachange(struct ifnet *); 132 STATIC void mvxpe_mediastatus(struct ifnet *, struct ifmediareq *); 133 134 /* Link State Notify */ 135 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc); 136 STATIC void mvxpe_linkup(struct mvxpe_softc *); 137 STATIC void mvxpe_linkdown(struct mvxpe_softc *); 138 STATIC void mvxpe_linkreset(struct mvxpe_softc *); 139 140 /* Tx Subroutines */ 141 STATIC int mvxpe_tx_queue_select(struct mvxpe_softc *, struct mbuf *); 142 STATIC int mvxpe_tx_queue(struct mvxpe_softc *, struct mbuf *, int); 143 STATIC void mvxpe_tx_set_csumflag(struct ifnet *, 144 struct mvxpe_tx_desc *, struct mbuf *); 145 STATIC void mvxpe_tx_complete(struct mvxpe_softc *, uint32_t); 146 STATIC void mvxpe_tx_queue_complete(struct mvxpe_softc *, int); 147 148 /* Rx Subroutines */ 149 STATIC void mvxpe_rx(struct mvxpe_softc *, uint32_t); 150 STATIC void mvxpe_rx_queue(struct mvxpe_softc *, int, int); 151 STATIC int mvxpe_rx_queue_select(struct mvxpe_softc *, uint32_t, int *); 152 STATIC void mvxpe_rx_refill(struct mvxpe_softc *, uint32_t); 153 STATIC void mvxpe_rx_queue_refill(struct mvxpe_softc *, int); 154 STATIC int mvxpe_rx_queue_add(struct mvxpe_softc *, int); 155 STATIC void mvxpe_rx_set_csumflag(struct ifnet *, 156 struct mvxpe_rx_desc *, struct mbuf *); 157 158 /* MAC address filter */ 159 STATIC uint8_t mvxpe_crc8(const uint8_t *, size_t); 160 STATIC void mvxpe_filter_setup(struct mvxpe_softc *); 161 162 /* sysctl(9) */ 163 STATIC int sysctl_read_mib(SYSCTLFN_PROTO); 164 STATIC int sysctl_clear_mib(SYSCTLFN_PROTO); 165 STATIC int sysctl_set_queue_length(SYSCTLFN_PROTO); 166 STATIC int sysctl_set_queue_rxthtime(SYSCTLFN_PROTO); 167 STATIC void sysctl_mvxpe_init(struct mvxpe_softc *); 168 169 /* MIB */ 170 STATIC void mvxpe_clear_mib(struct mvxpe_softc *); 171 STATIC void mvxpe_update_mib(struct mvxpe_softc *); 172 173 /* for Debug */ 174 STATIC void mvxpe_dump_txdesc(struct mvxpe_tx_desc *, int) __attribute__((__unused__)); 175 STATIC void mvxpe_dump_rxdesc(struct mvxpe_rx_desc *, int) __attribute__((__unused__)); 176 177 STATIC int mvxpe_root_num; 178 STATIC kmutex_t mii_mutex; 179 STATIC int mii_init = 0; 180 #ifdef MVXPE_DEBUG 181 STATIC int mvxpe_debug = MVXPE_DEBUG; 182 #endif 183 184 /* 185 * List of MIB register and names 186 */ 187 STATIC struct mvxpe_mib_def { 188 uint32_t regnum; 189 int reg64; 190 const char *sysctl_name; 191 const char *desc; 192 int ext; 193 #define MVXPE_MIBEXT_IF_OERRORS 1 194 #define MVXPE_MIBEXT_IF_IERRORS 2 195 #define MVXPE_MIBEXT_IF_COLLISIONS 3 196 } mvxpe_mib_list[] = { 197 {MVXPE_MIB_RX_GOOD_OCT, 1, "rx_good_oct", 198 "Good Octets Rx", 0}, 199 {MVXPE_MIB_RX_BAD_OCT, 0, "rx_bad_oct", 200 "Bad Octets Rx", 0}, 201 {MVXPE_MIB_TX_MAC_TRNS_ERR, 0, "tx_mac_err", 202 "MAC Transmit Error", MVXPE_MIBEXT_IF_OERRORS}, 203 {MVXPE_MIB_RX_GOOD_FRAME, 0, "rx_good_frame", 204 "Good Frames Rx", 0}, 205 {MVXPE_MIB_RX_BAD_FRAME, 0, "rx_bad_frame", 206 "Bad Frames Rx", 0}, 207 {MVXPE_MIB_RX_BCAST_FRAME, 0, "rx_bcast_frame", 208 "Broadcast Frames Rx", 0}, 209 {MVXPE_MIB_RX_MCAST_FRAME, 0, "rx_mcast_frame", 210 "Multicast Frames Rx", 0}, 211 {MVXPE_MIB_RX_FRAME64_OCT, 0, "rx_frame_1_64", 212 "Frame Size 1 - 64", 0}, 213 {MVXPE_MIB_RX_FRAME127_OCT, 0, "rx_frame_65_127", 214 "Frame Size 65 - 127", 0}, 215 {MVXPE_MIB_RX_FRAME255_OCT, 0, "rx_frame_128_255", 216 "Frame Size 128 - 255", 0}, 217 {MVXPE_MIB_RX_FRAME511_OCT, 0, "rx_frame_256_511", 218 "Frame Size 256 - 511"}, 219 {MVXPE_MIB_RX_FRAME1023_OCT, 0, "rx_frame_512_1023", 220 "Frame Size 512 - 1023", 0}, 221 {MVXPE_MIB_RX_FRAMEMAX_OCT, 0, "rx_frame_1024_max", 222 "Frame Size 1024 - Max", 0}, 223 {MVXPE_MIB_TX_GOOD_OCT, 1, "tx_good_oct", 224 "Good Octets Tx", 0}, 225 {MVXPE_MIB_TX_GOOD_FRAME, 0, "tx_good_frame", 226 "Good Frames Tx", 0}, 227 {MVXPE_MIB_TX_EXCES_COL, 0, "tx_exces_collision", 228 "Excessive Collision", MVXPE_MIBEXT_IF_OERRORS}, 229 {MVXPE_MIB_TX_MCAST_FRAME, 0, "tx_mcast_frame", 230 "Multicast Frames Tx"}, 231 {MVXPE_MIB_TX_BCAST_FRAME, 0, "tx_bcast_frame", 232 "Broadcast Frames Tx"}, 233 {MVXPE_MIB_TX_MAC_CTL_ERR, 0, "tx_mac_err", 234 "Unknown MAC Control", 0}, 235 {MVXPE_MIB_FC_SENT, 0, "fc_tx", 236 "Flow Control Tx", 0}, 237 {MVXPE_MIB_FC_GOOD, 0, "fc_rx_good", 238 "Good Flow Control Rx", 0}, 239 {MVXPE_MIB_FC_BAD, 0, "fc_rx_bad", 240 "Bad Flow Control Rx", 0}, 241 {MVXPE_MIB_PKT_UNDERSIZE, 0, "pkt_undersize", 242 "Undersized Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 243 {MVXPE_MIB_PKT_FRAGMENT, 0, "pkt_fragment", 244 "Fragmented Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 245 {MVXPE_MIB_PKT_OVERSIZE, 0, "pkt_oversize", 246 "Oversized Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 247 {MVXPE_MIB_PKT_JABBER, 0, "pkt_jabber", 248 "Jabber Packets Rx", MVXPE_MIBEXT_IF_IERRORS}, 249 {MVXPE_MIB_MAC_RX_ERR, 0, "mac_rx_err", 250 "MAC Rx Errors", MVXPE_MIBEXT_IF_IERRORS}, 251 {MVXPE_MIB_MAC_CRC_ERR, 0, "mac_crc_err", 252 "MAC CRC Errors", MVXPE_MIBEXT_IF_IERRORS}, 253 {MVXPE_MIB_MAC_COL, 0, "mac_collision", 254 "MAC Collision", MVXPE_MIBEXT_IF_COLLISIONS}, 255 {MVXPE_MIB_MAC_LATE_COL, 0, "mac_late_collision", 256 "MAC Late Collision", MVXPE_MIBEXT_IF_OERRORS}, 257 }; 258 259 /* 260 * autoconf(9) 261 */ 262 /* ARGSUSED */ 263 STATIC int 264 mvxpe_match(device_t parent, cfdata_t match, void *aux) 265 { 266 struct marvell_attach_args *mva = aux; 267 bus_size_t pv_off; 268 uint32_t pv; 269 270 if (strcmp(mva->mva_name, match->cf_name) != 0) 271 return 0; 272 if (mva->mva_offset == MVA_OFFSET_DEFAULT) 273 return 0; 274 275 /* check port version */ 276 pv_off = mva->mva_offset + MVXPE_PV; 277 pv = bus_space_read_4(mva->mva_iot, mva->mva_ioh, pv_off); 278 if (MVXPE_PV_GET_VERSION(pv) < 0x10) 279 return 0; /* old version is not supported */ 280 281 return 1; 282 } 283 284 /* ARGSUSED */ 285 STATIC void 286 mvxpe_attach(device_t parent, device_t self, void *aux) 287 { 288 struct mvxpe_softc *sc = device_private(self); 289 struct mii_softc *child; 290 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 291 struct mii_data * const mii = &sc->sc_mii; 292 struct marvell_attach_args *mva = aux; 293 uint32_t phyaddr, maddrh, maddrl; 294 uint8_t enaddr[ETHER_ADDR_LEN]; 295 int q; 296 297 aprint_naive("\n"); 298 aprint_normal(": Marvell ARMADA GbE Controller\n"); 299 memset(sc, 0, sizeof(*sc)); 300 sc->sc_dev = self; 301 sc->sc_port = mva->mva_unit; 302 sc->sc_iot = mva->mva_iot; 303 sc->sc_dmat = mva->mva_dmat; 304 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 305 callout_init(&sc->sc_tick_ch, 0); 306 callout_setfunc(&sc->sc_tick_ch, mvxpe_tick, sc); 307 308 /* 309 * BUS space 310 */ 311 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 312 mva->mva_offset, mva->mva_size, &sc->sc_ioh)) { 313 aprint_error_dev(self, "Cannot map registers\n"); 314 goto fail; 315 } 316 if (bus_space_subregion(mva->mva_iot, mva->mva_ioh, 317 mva->mva_offset + MVXPE_PORTMIB_BASE, MVXPE_PORTMIB_SIZE, 318 &sc->sc_mibh)) { 319 aprint_error_dev(self, 320 "Cannot map destination address filter registers\n"); 321 goto fail; 322 } 323 sc->sc_version = MVXPE_READ(sc, MVXPE_PV); 324 aprint_normal_dev(self, "Port Version %#x\n", sc->sc_version); 325 326 /* 327 * Buffer Manager(BM) subsystem. 328 */ 329 sc->sc_bm = mvxpbm_device(mva); 330 if (sc->sc_bm == NULL) { 331 aprint_error_dev(self, "no Buffer Manager.\n"); 332 goto fail; 333 } 334 aprint_normal_dev(self, 335 "Using Buffer Manager: %s\n", mvxpbm_xname(sc->sc_bm)); 336 aprint_normal_dev(sc->sc_dev, 337 "%zu kbytes managed buffer, %zu bytes * %u entries allocated.\n", 338 mvxpbm_buf_size(sc->sc_bm) / 1024, 339 mvxpbm_chunk_size(sc->sc_bm), mvxpbm_chunk_count(sc->sc_bm)); 340 341 /* 342 * make sure DMA engines are in reset state 343 */ 344 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001); 345 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001); 346 347 /* 348 * Address decoding window 349 */ 350 mvxpe_wininit(sc, mva->mva_tags); 351 352 /* 353 * MAC address 354 */ 355 if (ether_getaddr(self, enaddr)) { 356 maddrh = enaddr[0] << 24; 357 maddrh |= enaddr[1] << 16; 358 maddrh |= enaddr[2] << 8; 359 maddrh |= enaddr[3]; 360 maddrl = enaddr[4] << 8; 361 maddrl |= enaddr[5]; 362 MVXPE_WRITE(sc, MVXPE_MACAH, maddrh); 363 MVXPE_WRITE(sc, MVXPE_MACAL, maddrl); 364 } 365 else { 366 /* 367 * even if enaddr is not found in device properties, 368 * the port may be initialized by IPL program such as U-BOOT. 369 */ 370 maddrh = MVXPE_READ(sc, MVXPE_MACAH); 371 maddrl = MVXPE_READ(sc, MVXPE_MACAL); 372 if ((maddrh | maddrl) == 0) { 373 aprint_error_dev(self, "No Ethernet address\n"); 374 return; 375 } 376 } 377 sc->sc_enaddr[0] = maddrh >> 24; 378 sc->sc_enaddr[1] = maddrh >> 16; 379 sc->sc_enaddr[2] = maddrh >> 8; 380 sc->sc_enaddr[3] = maddrh >> 0; 381 sc->sc_enaddr[4] = maddrl >> 8; 382 sc->sc_enaddr[5] = maddrl >> 0; 383 aprint_normal_dev(self, "Ethernet address %s\n", 384 ether_sprintf(sc->sc_enaddr)); 385 386 /* 387 * Register interrupt handlers 388 * XXX: handle Ethernet unit intr. and Error intr. 389 */ 390 mvxpe_disable_intr(sc); 391 marvell_intr_establish(mva->mva_irq, IPL_NET, mvxpe_rxtxth_intr, sc); 392 393 /* 394 * MIB buffer allocation 395 */ 396 sc->sc_sysctl_mib_size = 397 __arraycount(mvxpe_mib_list) * sizeof(struct mvxpe_sysctl_mib); 398 sc->sc_sysctl_mib = kmem_alloc(sc->sc_sysctl_mib_size, KM_SLEEP); 399 memset(sc->sc_sysctl_mib, 0, sc->sc_sysctl_mib_size); 400 401 /* 402 * Device DMA Buffer allocation 403 */ 404 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 405 if (mvxpe_ring_alloc_queue(sc, q) != 0) 406 goto fail; 407 mvxpe_ring_init_queue(sc, q); 408 } 409 410 /* 411 * We can support 802.1Q VLAN-sized frames and jumbo 412 * Ethernet frames. 413 */ 414 sc->sc_ethercom.ec_capabilities |= 415 ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; 416 ifp->if_softc = sc; 417 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 418 ifp->if_start = mvxpe_start; 419 ifp->if_ioctl = mvxpe_ioctl; 420 ifp->if_init = mvxpe_init; 421 ifp->if_stop = mvxpe_stop; 422 ifp->if_watchdog = mvxpe_watchdog; 423 424 /* 425 * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware. 426 */ 427 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx; 428 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx; 429 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Tx; 430 ifp->if_capabilities |= IFCAP_CSUM_TCPv4_Rx; 431 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Tx; 432 ifp->if_capabilities |= IFCAP_CSUM_UDPv4_Rx; 433 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Tx; 434 ifp->if_capabilities |= IFCAP_CSUM_TCPv6_Rx; 435 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Tx; 436 ifp->if_capabilities |= IFCAP_CSUM_UDPv6_Rx; 437 438 /* 439 * Initialize struct ifnet 440 */ 441 IFQ_SET_MAXLEN(&ifp->if_snd, uimax(MVXPE_TX_RING_CNT - 1, IFQ_MAXLEN)); 442 IFQ_SET_READY(&ifp->if_snd); 443 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname)); 444 445 /* 446 * Enable DMA engines and Initialize Device Registers. 447 */ 448 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000); 449 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000); 450 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM); 451 mvxpe_sc_lock(sc); /* XXX */ 452 mvxpe_filter_setup(sc); 453 mvxpe_sc_unlock(sc); 454 mvxpe_initreg(ifp); 455 456 /* 457 * Now MAC is working, setup MII. 458 */ 459 if (mii_init == 0) { 460 /* 461 * MII bus is shared by all MACs and all PHYs in SoC. 462 * serializing the bus access should be safe. 463 */ 464 mutex_init(&mii_mutex, MUTEX_DEFAULT, IPL_NET); 465 mii_init = 1; 466 } 467 mii->mii_ifp = ifp; 468 mii->mii_readreg = mvxpe_miibus_readreg; 469 mii->mii_writereg = mvxpe_miibus_writereg; 470 mii->mii_statchg = mvxpe_miibus_statchg; 471 472 sc->sc_ethercom.ec_mii = mii; 473 ifmedia_init(&mii->mii_media, 0, mvxpe_mediachange, mvxpe_mediastatus); 474 /* 475 * XXX: phy addressing highly depends on Board Design. 476 * we assume phyaddress == MAC unit number here, 477 * but some boards may not. 478 */ 479 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, device_unit(sc->sc_dev), 480 0); 481 child = LIST_FIRST(&mii->mii_phys); 482 if (child == NULL) { 483 aprint_error_dev(self, "no PHY found!\n"); 484 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 485 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 486 } else { 487 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 488 phyaddr = MVXPE_PHYADDR_PHYAD(child->mii_phy); 489 MVXPE_WRITE(sc, MVXPE_PHYADDR, phyaddr); 490 DPRINTSC(sc, 1, "PHYADDR: %#x\n", MVXPE_READ(sc, MVXPE_PHYADDR)); 491 } 492 493 /* 494 * Call MI attach routines. 495 */ 496 if_attach(ifp); 497 if_deferred_start_init(ifp, NULL); 498 499 ether_ifattach(ifp, sc->sc_enaddr); 500 ether_set_ifflags_cb(&sc->sc_ethercom, mvxpe_ifflags_cb); 501 502 sysctl_mvxpe_init(sc); 503 mvxpe_evcnt_attach(sc); 504 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 505 RND_TYPE_NET, RND_FLAG_DEFAULT); 506 507 return; 508 509 fail: 510 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) 511 mvxpe_ring_dealloc_queue(sc, q); 512 if (sc->sc_sysctl_mib) 513 kmem_free(sc->sc_sysctl_mib, sc->sc_sysctl_mib_size); 514 515 return; 516 } 517 518 STATIC int 519 mvxpe_evcnt_attach(struct mvxpe_softc *sc) 520 { 521 #ifdef MVXPE_EVENT_COUNTERS 522 int q; 523 524 /* Master Interrupt Handler */ 525 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtxth, EVCNT_TYPE_INTR, 526 NULL, device_xname(sc->sc_dev), "RxTxTH Intr."); 527 evcnt_attach_dynamic(&sc->sc_ev.ev_i_rxtx, EVCNT_TYPE_INTR, 528 NULL, device_xname(sc->sc_dev), "RxTx Intr."); 529 evcnt_attach_dynamic(&sc->sc_ev.ev_i_misc, EVCNT_TYPE_INTR, 530 NULL, device_xname(sc->sc_dev), "MISC Intr."); 531 532 /* RXTXTH Interrupt */ 533 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtxth_txerr, EVCNT_TYPE_INTR, 534 NULL, device_xname(sc->sc_dev), "RxTxTH Tx error summary"); 535 536 /* MISC Interrupt */ 537 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_phystatuschng, EVCNT_TYPE_INTR, 538 NULL, device_xname(sc->sc_dev), "MISC phy status changed"); 539 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_linkchange, EVCNT_TYPE_INTR, 540 NULL, device_xname(sc->sc_dev), "MISC link status changed"); 541 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_iae, EVCNT_TYPE_INTR, 542 NULL, device_xname(sc->sc_dev), "MISC internal address error"); 543 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxoverrun, EVCNT_TYPE_INTR, 544 NULL, device_xname(sc->sc_dev), "MISC Rx FIFO overrun"); 545 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxcrc, EVCNT_TYPE_INTR, 546 NULL, device_xname(sc->sc_dev), "MISC Rx CRC error"); 547 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_rxlargepacket, EVCNT_TYPE_INTR, 548 NULL, device_xname(sc->sc_dev), "MISC Rx too large frame"); 549 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txunderrun, EVCNT_TYPE_INTR, 550 NULL, device_xname(sc->sc_dev), "MISC Tx FIFO underrun"); 551 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_prbserr, EVCNT_TYPE_INTR, 552 NULL, device_xname(sc->sc_dev), "MISC SERDES loopback test err"); 553 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_srse, EVCNT_TYPE_INTR, 554 NULL, device_xname(sc->sc_dev), "MISC SERDES sync error"); 555 evcnt_attach_dynamic(&sc->sc_ev.ev_misc_txreq, EVCNT_TYPE_INTR, 556 NULL, device_xname(sc->sc_dev), "MISC Tx resource error"); 557 558 /* RxTx Interrupt */ 559 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rreq, EVCNT_TYPE_INTR, 560 NULL, device_xname(sc->sc_dev), "RxTx Rx resource error"); 561 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rpq, EVCNT_TYPE_INTR, 562 NULL, device_xname(sc->sc_dev), "RxTx Rx packet"); 563 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_tbrq, EVCNT_TYPE_INTR, 564 NULL, device_xname(sc->sc_dev), "RxTx Tx complete"); 565 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_rxtxth, EVCNT_TYPE_INTR, 566 NULL, device_xname(sc->sc_dev), "RxTx RxTxTH summary"); 567 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_txerr, EVCNT_TYPE_INTR, 568 NULL, device_xname(sc->sc_dev), "RxTx Tx error summary"); 569 evcnt_attach_dynamic(&sc->sc_ev.ev_rxtx_misc, EVCNT_TYPE_INTR, 570 NULL, device_xname(sc->sc_dev), "RxTx MISC summary"); 571 572 /* Link */ 573 evcnt_attach_dynamic(&sc->sc_ev.ev_link_up, EVCNT_TYPE_MISC, 574 NULL, device_xname(sc->sc_dev), "link up"); 575 evcnt_attach_dynamic(&sc->sc_ev.ev_link_down, EVCNT_TYPE_MISC, 576 NULL, device_xname(sc->sc_dev), "link down"); 577 578 /* Rx Descriptor */ 579 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_ce, EVCNT_TYPE_MISC, 580 NULL, device_xname(sc->sc_dev), "Rx CRC error counter"); 581 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_or, EVCNT_TYPE_MISC, 582 NULL, device_xname(sc->sc_dev), "Rx FIFO overrun counter"); 583 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_mf, EVCNT_TYPE_MISC, 584 NULL, device_xname(sc->sc_dev), "Rx too large frame counter"); 585 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_re, EVCNT_TYPE_MISC, 586 NULL, device_xname(sc->sc_dev), "Rx resource error counter"); 587 evcnt_attach_dynamic(&sc->sc_ev.ev_rxd_scat, EVCNT_TYPE_MISC, 588 NULL, device_xname(sc->sc_dev), "Rx unexpected scatter bufs"); 589 590 /* Tx Descriptor */ 591 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_lc, EVCNT_TYPE_MISC, 592 NULL, device_xname(sc->sc_dev), "Tx late collision counter"); 593 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_rl, EVCNT_TYPE_MISC, 594 NULL, device_xname(sc->sc_dev), "Tx excess. collision counter"); 595 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_ur, EVCNT_TYPE_MISC, 596 NULL, device_xname(sc->sc_dev), "Tx FIFO underrun counter"); 597 evcnt_attach_dynamic(&sc->sc_ev.ev_txd_oth, EVCNT_TYPE_MISC, 598 NULL, device_xname(sc->sc_dev), "Tx unknown error counter"); 599 600 /* Status Registers */ 601 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pdfc, EVCNT_TYPE_MISC, 602 NULL, device_xname(sc->sc_dev), "Rx discard counter"); 603 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_pofc, EVCNT_TYPE_MISC, 604 NULL, device_xname(sc->sc_dev), "Rx overrun counter"); 605 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txbadfcs, EVCNT_TYPE_MISC, 606 NULL, device_xname(sc->sc_dev), "Tx bad FCS counter"); 607 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_txdropped, EVCNT_TYPE_MISC, 608 NULL, device_xname(sc->sc_dev), "Tx dropped counter"); 609 evcnt_attach_dynamic(&sc->sc_ev.ev_reg_lpic, EVCNT_TYPE_MISC, 610 NULL, device_xname(sc->sc_dev), "LP_IDLE counter"); 611 612 /* Device Driver Errors */ 613 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_wdogsoft, EVCNT_TYPE_MISC, 614 NULL, device_xname(sc->sc_dev), "watchdog timer expired"); 615 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txerr, EVCNT_TYPE_MISC, 616 NULL, device_xname(sc->sc_dev), "Tx descriptor alloc failed"); 617 #define MVXPE_QUEUE_DESC(q) "Rx success in queue " # q 618 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 619 static const char *rxq_desc[] = { 620 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 621 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 622 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 623 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 624 }; 625 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxq[q], EVCNT_TYPE_MISC, 626 NULL, device_xname(sc->sc_dev), rxq_desc[q]); 627 } 628 #undef MVXPE_QUEUE_DESC 629 #define MVXPE_QUEUE_DESC(q) "Tx success in queue " # q 630 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 631 static const char *txq_desc[] = { 632 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 633 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 634 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 635 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 636 }; 637 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txq[q], EVCNT_TYPE_MISC, 638 NULL, device_xname(sc->sc_dev), txq_desc[q]); 639 } 640 #undef MVXPE_QUEUE_DESC 641 #define MVXPE_QUEUE_DESC(q) "Rx error in queue " # q 642 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 643 static const char *rxqe_desc[] = { 644 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 645 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 646 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 647 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 648 }; 649 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_rxqe[q], EVCNT_TYPE_MISC, 650 NULL, device_xname(sc->sc_dev), rxqe_desc[q]); 651 } 652 #undef MVXPE_QUEUE_DESC 653 #define MVXPE_QUEUE_DESC(q) "Tx error in queue " # q 654 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 655 static const char *txqe_desc[] = { 656 MVXPE_QUEUE_DESC(0), MVXPE_QUEUE_DESC(1), 657 MVXPE_QUEUE_DESC(2), MVXPE_QUEUE_DESC(3), 658 MVXPE_QUEUE_DESC(4), MVXPE_QUEUE_DESC(5), 659 MVXPE_QUEUE_DESC(6), MVXPE_QUEUE_DESC(7), 660 }; 661 evcnt_attach_dynamic(&sc->sc_ev.ev_drv_txqe[q], EVCNT_TYPE_MISC, 662 NULL, device_xname(sc->sc_dev), txqe_desc[q]); 663 } 664 #undef MVXPE_QUEUE_DESC 665 666 #endif /* MVXPE_EVENT_COUNTERS */ 667 return 0; 668 } 669 670 STATIC void 671 mvxpe_sc_lock(struct mvxpe_softc *sc) 672 { 673 mutex_enter(&sc->sc_mtx); 674 } 675 676 STATIC void 677 mvxpe_sc_unlock(struct mvxpe_softc *sc) 678 { 679 mutex_exit(&sc->sc_mtx); 680 } 681 682 /* 683 * MII 684 */ 685 STATIC int 686 mvxpe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 687 { 688 struct mvxpe_softc *sc = device_private(dev); 689 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 690 uint32_t smi; 691 int i, rv = 0; 692 693 mutex_enter(&mii_mutex); 694 695 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 696 DELAY(1); 697 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 698 break; 699 } 700 if (i == MVXPE_PHY_TIMEOUT) { 701 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 702 rv = ETIMEDOUT; 703 goto out; 704 } 705 706 smi = 707 MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | MVXPE_SMI_OPCODE_READ; 708 MVXPE_WRITE(sc, MVXPE_SMI, smi); 709 710 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 711 DELAY(1); 712 smi = MVXPE_READ(sc, MVXPE_SMI); 713 if (smi & MVXPE_SMI_READVALID) { 714 *val = smi & MVXPE_SMI_DATA_MASK; 715 break; 716 } 717 } 718 DPRINTDEV(dev, 9, "i=%d, timeout=%d\n", i, MVXPE_PHY_TIMEOUT); 719 if (i >= MVXPE_PHY_TIMEOUT) 720 rv = ETIMEDOUT; 721 722 out: 723 mutex_exit(&mii_mutex); 724 725 DPRINTDEV(dev, 9, "phy=%d, reg=%#x, val=%#hx\n", phy, reg, *val); 726 727 return rv; 728 } 729 730 STATIC int 731 mvxpe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 732 { 733 struct mvxpe_softc *sc = device_private(dev); 734 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 735 uint32_t smi; 736 int i, rv = 0; 737 738 DPRINTDEV(dev, 9, "phy=%d reg=%#x val=%#hx\n", phy, reg, val); 739 740 mutex_enter(&mii_mutex); 741 742 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 743 DELAY(1); 744 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 745 break; 746 } 747 if (i == MVXPE_PHY_TIMEOUT) { 748 aprint_error_ifnet(ifp, "SMI busy timeout\n"); 749 rv = ETIMEDOUT; 750 goto out; 751 } 752 753 smi = MVXPE_SMI_PHYAD(phy) | MVXPE_SMI_REGAD(reg) | 754 MVXPE_SMI_OPCODE_WRITE | (val & MVXPE_SMI_DATA_MASK); 755 MVXPE_WRITE(sc, MVXPE_SMI, smi); 756 757 for (i = 0; i < MVXPE_PHY_TIMEOUT; i++) { 758 DELAY(1); 759 if (!(MVXPE_READ(sc, MVXPE_SMI) & MVXPE_SMI_BUSY)) 760 break; 761 } 762 763 if (i == MVXPE_PHY_TIMEOUT) { 764 aprint_error_ifnet(ifp, "phy write timed out\n"); 765 rv = ETIMEDOUT; 766 } 767 768 out: 769 mutex_exit(&mii_mutex); 770 771 return rv; 772 } 773 774 STATIC void 775 mvxpe_miibus_statchg(struct ifnet *ifp) 776 { 777 778 /* nothing to do */ 779 } 780 781 /* 782 * Address Decoding Window 783 */ 784 STATIC void 785 mvxpe_wininit(struct mvxpe_softc *sc, enum marvell_tags *tags) 786 { 787 device_t pdev = device_parent(sc->sc_dev); 788 uint64_t base; 789 uint32_t en, ac, size; 790 int window, target, attr, rv, i; 791 792 /* First disable all address decode windows */ 793 en = MVXPE_BARE_EN_MASK; 794 MVXPE_WRITE(sc, MVXPE_BARE, en); 795 796 ac = 0; 797 for (window = 0, i = 0; 798 tags[i] != MARVELL_TAG_UNDEFINED && window < MVXPE_NWINDOW; i++) { 799 rv = marvell_winparams_by_tag(pdev, tags[i], 800 &target, &attr, &base, &size); 801 if (rv != 0 || size == 0) 802 continue; 803 804 if (base > 0xffffffffULL) { 805 if (window >= MVXPE_NREMAP) { 806 aprint_error_dev(sc->sc_dev, 807 "can't remap window %d\n", window); 808 continue; 809 } 810 MVXPE_WRITE(sc, MVXPE_HA(window), 811 (base >> 32) & 0xffffffff); 812 } 813 814 MVXPE_WRITE(sc, MVXPE_BASEADDR(window), 815 MVXPE_BASEADDR_TARGET(target) | 816 MVXPE_BASEADDR_ATTR(attr) | 817 MVXPE_BASEADDR_BASE(base)); 818 MVXPE_WRITE(sc, MVXPE_S(window), MVXPE_S_SIZE(size)); 819 820 DPRINTSC(sc, 1, "Window %d Base 0x%016llx: Size 0x%08x\n", 821 window, base, size); 822 823 en &= ~(1 << window); 824 /* set full access (r/w) */ 825 ac |= MVXPE_EPAP_EPAR(window, MVXPE_EPAP_AC_FA); 826 window++; 827 } 828 /* allow to access decode window */ 829 MVXPE_WRITE(sc, MVXPE_EPAP, ac); 830 831 MVXPE_WRITE(sc, MVXPE_BARE, en); 832 } 833 834 /* 835 * Device Register Initialization 836 * reset device registers to device driver default value. 837 * the device is not enabled here. 838 */ 839 STATIC int 840 mvxpe_initreg(struct ifnet *ifp) 841 { 842 struct mvxpe_softc *sc = ifp->if_softc; 843 int serdes = 0; 844 uint32_t reg; 845 int q, i; 846 847 DPRINTIFNET(ifp, 1, "initializing device register\n"); 848 849 /* Init TX/RX Queue Registers */ 850 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 851 mvxpe_rx_lockq(sc, q); 852 if (mvxpe_rx_queue_init(ifp, q) != 0) { 853 aprint_error_ifnet(ifp, 854 "initialization failed: cannot initialize queue\n"); 855 mvxpe_rx_unlockq(sc, q); 856 return ENOBUFS; 857 } 858 mvxpe_rx_unlockq(sc, q); 859 860 mvxpe_tx_lockq(sc, q); 861 if (mvxpe_tx_queue_init(ifp, q) != 0) { 862 aprint_error_ifnet(ifp, 863 "initialization failed: cannot initialize queue\n"); 864 mvxpe_tx_unlockq(sc, q); 865 return ENOBUFS; 866 } 867 mvxpe_tx_unlockq(sc, q); 868 } 869 870 /* Tx MTU Limit */ 871 MVXPE_WRITE(sc, MVXPE_TXMTU, MVXPE_MTU); 872 873 /* Check SGMII or SERDES(assume IPL/U-BOOT initialize this) */ 874 reg = MVXPE_READ(sc, MVXPE_PMACC0); 875 if ((reg & MVXPE_PMACC0_PORTTYPE) != 0) 876 serdes = 1; 877 878 /* Ethernet Unit Control */ 879 reg = MVXPE_READ(sc, MVXPE_EUC); 880 reg |= MVXPE_EUC_POLLING; 881 MVXPE_WRITE(sc, MVXPE_EUC, reg); 882 883 /* Auto Negotiation */ 884 reg = MVXPE_PANC_MUSTSET; /* must write 0x1 */ 885 reg |= MVXPE_PANC_FORCELINKFAIL;/* force link state down */ 886 reg |= MVXPE_PANC_ANSPEEDEN; /* interface speed negotiation */ 887 reg |= MVXPE_PANC_ANDUPLEXEN; /* negotiate duplex mode */ 888 if (serdes) { 889 reg |= MVXPE_PANC_INBANDANEN; /* In Band negotiation */ 890 reg |= MVXPE_PANC_INBANDANBYPASSEN; /* bypass negotiation */ 891 reg |= MVXPE_PANC_SETFULLDX; /* set full-duplex on failure */ 892 } 893 MVXPE_WRITE(sc, MVXPE_PANC, reg); 894 895 /* EEE: Low Power Idle */ 896 reg = MVXPE_LPIC0_LILIMIT(MVXPE_LPI_LI); 897 reg |= MVXPE_LPIC0_TSLIMIT(MVXPE_LPI_TS); 898 MVXPE_WRITE(sc, MVXPE_LPIC0, reg); 899 900 reg = MVXPE_LPIC1_TWLIMIT(MVXPE_LPI_TS); 901 MVXPE_WRITE(sc, MVXPE_LPIC1, reg); 902 903 reg = MVXPE_LPIC2_MUSTSET; 904 MVXPE_WRITE(sc, MVXPE_LPIC2, reg); 905 906 /* Port MAC Control set 0 */ 907 reg = MVXPE_PMACC0_MUSTSET; /* must write 0x1 */ 908 reg &= ~MVXPE_PMACC0_PORTEN; /* port is still disabled */ 909 reg |= MVXPE_PMACC0_FRAMESIZELIMIT(MVXPE_MRU); 910 if (serdes) 911 reg |= MVXPE_PMACC0_PORTTYPE; 912 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 913 914 /* Port MAC Control set 1 is only used for loop-back test */ 915 916 /* Port MAC Control set 2 */ 917 reg = MVXPE_READ(sc, MVXPE_PMACC2); 918 reg &= (MVXPE_PMACC2_PCSEN | MVXPE_PMACC2_RGMIIEN); 919 reg |= MVXPE_PMACC2_MUSTSET; 920 MVXPE_WRITE(sc, MVXPE_PMACC2, reg); 921 922 /* Port MAC Control set 3 is used for IPG tune */ 923 924 /* Port MAC Control set 4 is not used */ 925 926 /* Port Configuration */ 927 /* Use queue 0 only */ 928 reg = MVXPE_READ(sc, MVXPE_PXC); 929 reg &= ~(MVXPE_PXC_RXQ_MASK | MVXPE_PXC_RXQARP_MASK | 930 MVXPE_PXC_TCPQ_MASK | MVXPE_PXC_UDPQ_MASK | MVXPE_PXC_BPDUQ_MASK); 931 MVXPE_WRITE(sc, MVXPE_PXC, reg); 932 933 /* Port Configuration Extended: enable Tx CRC generation */ 934 reg = MVXPE_READ(sc, MVXPE_PXCX); 935 reg &= ~MVXPE_PXCX_TXCRCDIS; 936 MVXPE_WRITE(sc, MVXPE_PXCX, reg); 937 938 /* clear MIB counter registers(clear by read) */ 939 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) 940 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum)); 941 942 /* Set SDC register except IPGINT bits */ 943 reg = MVXPE_SDC_RXBSZ_16_64BITWORDS; 944 reg |= MVXPE_SDC_TXBSZ_16_64BITWORDS; 945 reg |= MVXPE_SDC_BLMR; 946 reg |= MVXPE_SDC_BLMT; 947 MVXPE_WRITE(sc, MVXPE_SDC, reg); 948 949 return 0; 950 } 951 952 /* 953 * Descriptor Ring Controls for each of queues 954 */ 955 STATIC void * 956 mvxpe_dma_memalloc(struct mvxpe_softc *sc, bus_dmamap_t *map, size_t size) 957 { 958 bus_dma_segment_t segs; 959 void *kva = NULL; 960 int nsegs; 961 962 /* 963 * Allocate the descriptor queues. 964 * struct mvxpe_ring_data contains array of descriptors per queue. 965 */ 966 if (bus_dmamem_alloc(sc->sc_dmat, 967 size, PAGE_SIZE, 0, &segs, 1, &nsegs, BUS_DMA_NOWAIT)) { 968 aprint_error_dev(sc->sc_dev, 969 "can't alloc device memory (%zu bytes)\n", size); 970 return NULL; 971 } 972 if (bus_dmamem_map(sc->sc_dmat, 973 &segs, nsegs, size, &kva, BUS_DMA_NOWAIT)) { 974 aprint_error_dev(sc->sc_dev, 975 "can't map dma buffers (%zu bytes)\n", size); 976 goto fail1; 977 } 978 979 if (bus_dmamap_create(sc->sc_dmat, 980 size, 1, size, 0, BUS_DMA_NOWAIT, map)) { 981 aprint_error_dev(sc->sc_dev, "can't create dma map\n"); 982 goto fail2; 983 } 984 if (bus_dmamap_load(sc->sc_dmat, 985 *map, kva, size, NULL, BUS_DMA_NOWAIT)) { 986 aprint_error_dev(sc->sc_dev, "can't load dma map\n"); 987 goto fail3; 988 } 989 memset(kva, 0, size); 990 return kva; 991 992 fail3: 993 bus_dmamap_destroy(sc->sc_dmat, *map); 994 memset(map, 0, sizeof(*map)); 995 fail2: 996 bus_dmamem_unmap(sc->sc_dmat, kva, size); 997 fail1: 998 bus_dmamem_free(sc->sc_dmat, &segs, nsegs); 999 return NULL; 1000 } 1001 1002 STATIC int 1003 mvxpe_ring_alloc_queue(struct mvxpe_softc *sc, int q) 1004 { 1005 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1006 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1007 1008 /* 1009 * MVXPE_RX_RING_CNT and MVXPE_TX_RING_CNT is a hard limit of 1010 * queue length. real queue length is limited by 1011 * sc->sc_rx_ring[q].rx_queue_len and sc->sc_tx_ring[q].tx_queue_len. 1012 * 1013 * because descriptor ring reallocation needs reprogramming of 1014 * DMA registers, we allocate enough descriptor for hard limit 1015 * of queue length. 1016 */ 1017 rx->rx_descriptors = 1018 mvxpe_dma_memalloc(sc, &rx->rx_descriptors_map, 1019 (sizeof(struct mvxpe_rx_desc) * MVXPE_RX_RING_CNT)); 1020 if (rx->rx_descriptors == NULL) 1021 goto fail; 1022 1023 tx->tx_descriptors = 1024 mvxpe_dma_memalloc(sc, &tx->tx_descriptors_map, 1025 (sizeof(struct mvxpe_tx_desc) * MVXPE_TX_RING_CNT)); 1026 if (tx->tx_descriptors == NULL) 1027 goto fail; 1028 1029 return 0; 1030 fail: 1031 mvxpe_ring_dealloc_queue(sc, q); 1032 aprint_error_dev(sc->sc_dev, "DMA Ring buffer allocation failure.\n"); 1033 return ENOMEM; 1034 } 1035 1036 STATIC void 1037 mvxpe_ring_dealloc_queue(struct mvxpe_softc *sc, int q) 1038 { 1039 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1040 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1041 bus_dma_segment_t *segs; 1042 bus_size_t size; 1043 void *kva; 1044 int nsegs; 1045 1046 /* Rx */ 1047 kva = (void *)MVXPE_RX_RING_MEM_VA(sc, q); 1048 if (kva) { 1049 segs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_segs; 1050 nsegs = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_nsegs; 1051 size = MVXPE_RX_RING_MEM_MAP(sc, q)->dm_mapsize; 1052 1053 bus_dmamap_unload(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q)); 1054 bus_dmamap_destroy(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q)); 1055 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1056 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 1057 } 1058 1059 /* Tx */ 1060 kva = (void *)MVXPE_TX_RING_MEM_VA(sc, q); 1061 if (kva) { 1062 segs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_segs; 1063 nsegs = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_nsegs; 1064 size = MVXPE_TX_RING_MEM_MAP(sc, q)->dm_mapsize; 1065 1066 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q)); 1067 bus_dmamap_destroy(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q)); 1068 bus_dmamem_unmap(sc->sc_dmat, kva, size); 1069 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 1070 } 1071 1072 /* Clear doungling pointers all */ 1073 memset(rx, 0, sizeof(*rx)); 1074 memset(tx, 0, sizeof(*tx)); 1075 } 1076 1077 STATIC void 1078 mvxpe_ring_init_queue(struct mvxpe_softc *sc, int q) 1079 { 1080 struct mvxpe_rx_desc *rxd = MVXPE_RX_RING_MEM_VA(sc, q); 1081 struct mvxpe_tx_desc *txd = MVXPE_TX_RING_MEM_VA(sc, q); 1082 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1083 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1084 static const int rx_default_queue_len[] = { 1085 MVXPE_RX_QUEUE_LIMIT_0, MVXPE_RX_QUEUE_LIMIT_1, 1086 MVXPE_RX_QUEUE_LIMIT_2, MVXPE_RX_QUEUE_LIMIT_3, 1087 MVXPE_RX_QUEUE_LIMIT_4, MVXPE_RX_QUEUE_LIMIT_5, 1088 MVXPE_RX_QUEUE_LIMIT_6, MVXPE_RX_QUEUE_LIMIT_7, 1089 }; 1090 static const int tx_default_queue_len[] = { 1091 MVXPE_TX_QUEUE_LIMIT_0, MVXPE_TX_QUEUE_LIMIT_1, 1092 MVXPE_TX_QUEUE_LIMIT_2, MVXPE_TX_QUEUE_LIMIT_3, 1093 MVXPE_TX_QUEUE_LIMIT_4, MVXPE_TX_QUEUE_LIMIT_5, 1094 MVXPE_TX_QUEUE_LIMIT_6, MVXPE_TX_QUEUE_LIMIT_7, 1095 }; 1096 extern uint32_t mvTclk; 1097 int i; 1098 1099 /* Rx handle */ 1100 for (i = 0; i < MVXPE_RX_RING_CNT; i++) { 1101 MVXPE_RX_DESC(sc, q, i) = &rxd[i]; 1102 MVXPE_RX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_rx_desc) * i; 1103 MVXPE_RX_PKTBUF(sc, q, i) = NULL; 1104 } 1105 mutex_init(&rx->rx_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1106 rx->rx_dma = rx->rx_cpu = 0; 1107 rx->rx_queue_len = rx_default_queue_len[q]; 1108 if (rx->rx_queue_len > MVXPE_RX_RING_CNT) 1109 rx->rx_queue_len = MVXPE_RX_RING_CNT; 1110 rx->rx_queue_th_received = rx->rx_queue_len / MVXPE_RXTH_RATIO; 1111 rx->rx_queue_th_free = rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO; 1112 rx->rx_queue_th_time = (mvTclk / 1000) / 2; /* 0.5 [ms] */ 1113 1114 /* Tx handle */ 1115 for (i = 0; i < MVXPE_TX_RING_CNT; i++) { 1116 MVXPE_TX_DESC(sc, q, i) = &txd[i]; 1117 MVXPE_TX_DESC_OFF(sc, q, i) = sizeof(struct mvxpe_tx_desc) * i; 1118 MVXPE_TX_MBUF(sc, q, i) = NULL; 1119 /* Tx handle needs DMA map for busdma_load_mbuf() */ 1120 if (bus_dmamap_create(sc->sc_dmat, 1121 mvxpbm_chunk_size(sc->sc_bm), 1122 MVXPE_TX_SEGLIMIT, mvxpbm_chunk_size(sc->sc_bm), 0, 1123 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1124 &MVXPE_TX_MAP(sc, q, i))) { 1125 aprint_error_dev(sc->sc_dev, 1126 "can't create dma map (tx ring %d)\n", i); 1127 } 1128 } 1129 mutex_init(&tx->tx_ring_mtx, MUTEX_DEFAULT, IPL_NET); 1130 tx->tx_dma = tx->tx_cpu = 0; 1131 tx->tx_queue_len = tx_default_queue_len[q]; 1132 if (tx->tx_queue_len > MVXPE_TX_RING_CNT) 1133 tx->tx_queue_len = MVXPE_TX_RING_CNT; 1134 tx->tx_used = 0; 1135 tx->tx_queue_th_free = tx->tx_queue_len / MVXPE_TXTH_RATIO; 1136 } 1137 1138 STATIC void 1139 mvxpe_ring_flush_queue(struct mvxpe_softc *sc, int q) 1140 { 1141 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1142 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1143 struct mbuf *m; 1144 int i; 1145 1146 KASSERT_RX_MTX(sc, q); 1147 KASSERT_TX_MTX(sc, q); 1148 1149 /* Rx handle */ 1150 for (i = 0; i < MVXPE_RX_RING_CNT; i++) { 1151 if (MVXPE_RX_PKTBUF(sc, q, i) == NULL) 1152 continue; 1153 mvxpbm_free_chunk(MVXPE_RX_PKTBUF(sc, q, i)); 1154 MVXPE_RX_PKTBUF(sc, q, i) = NULL; 1155 } 1156 rx->rx_dma = rx->rx_cpu = 0; 1157 1158 /* Tx handle */ 1159 for (i = 0; i < MVXPE_TX_RING_CNT; i++) { 1160 m = MVXPE_TX_MBUF(sc, q, i); 1161 if (m == NULL) 1162 continue; 1163 MVXPE_TX_MBUF(sc, q, i) = NULL; 1164 bus_dmamap_sync(sc->sc_dmat, 1165 MVXPE_TX_MAP(sc, q, i), 0, m->m_pkthdr.len, 1166 BUS_DMASYNC_POSTWRITE); 1167 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, i)); 1168 m_freem(m); 1169 } 1170 tx->tx_dma = tx->tx_cpu = 0; 1171 tx->tx_used = 0; 1172 } 1173 1174 STATIC void 1175 mvxpe_ring_sync_rx(struct mvxpe_softc *sc, int q, int idx, int count, int ops) 1176 { 1177 int wrap; 1178 1179 KASSERT_RX_MTX(sc, q); 1180 KASSERT(count > 0 && count <= MVXPE_RX_RING_CNT); 1181 KASSERT(idx >= 0 && idx < MVXPE_RX_RING_CNT); 1182 1183 wrap = (idx + count) - MVXPE_RX_RING_CNT; 1184 if (wrap > 0) { 1185 count -= wrap; 1186 KASSERT(count > 0); 1187 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q), 1188 0, sizeof(struct mvxpe_rx_desc) * wrap, ops); 1189 } 1190 bus_dmamap_sync(sc->sc_dmat, MVXPE_RX_RING_MEM_MAP(sc, q), 1191 MVXPE_RX_DESC_OFF(sc, q, idx), 1192 sizeof(struct mvxpe_rx_desc) * count, ops); 1193 } 1194 1195 STATIC void 1196 mvxpe_ring_sync_tx(struct mvxpe_softc *sc, int q, int idx, int count, int ops) 1197 { 1198 int wrap = 0; 1199 1200 KASSERT_TX_MTX(sc, q); 1201 KASSERT(count > 0 && count <= MVXPE_TX_RING_CNT); 1202 KASSERT(idx >= 0 && idx < MVXPE_TX_RING_CNT); 1203 1204 wrap = (idx + count) - MVXPE_TX_RING_CNT; 1205 if (wrap > 0) { 1206 count -= wrap; 1207 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q), 1208 0, sizeof(struct mvxpe_tx_desc) * wrap, ops); 1209 } 1210 bus_dmamap_sync(sc->sc_dmat, MVXPE_TX_RING_MEM_MAP(sc, q), 1211 MVXPE_TX_DESC_OFF(sc, q, idx), 1212 sizeof(struct mvxpe_tx_desc) * count, ops); 1213 } 1214 1215 /* 1216 * Rx/Tx Queue Control 1217 */ 1218 STATIC int 1219 mvxpe_rx_queue_init(struct ifnet *ifp, int q) 1220 { 1221 struct mvxpe_softc *sc = ifp->if_softc; 1222 uint32_t reg; 1223 1224 KASSERT_RX_MTX(sc, q); 1225 KASSERT(MVXPE_RX_RING_MEM_PA(sc, q) != 0); 1226 1227 /* descriptor address */ 1228 MVXPE_WRITE(sc, MVXPE_PRXDQA(q), MVXPE_RX_RING_MEM_PA(sc, q)); 1229 1230 /* Rx buffer size and descriptor ring size */ 1231 reg = MVXPE_PRXDQS_BUFFERSIZE(mvxpbm_chunk_size(sc->sc_bm) >> 3); 1232 reg |= MVXPE_PRXDQS_DESCRIPTORSQUEUESIZE(MVXPE_RX_RING_CNT); 1233 MVXPE_WRITE(sc, MVXPE_PRXDQS(q), reg); 1234 DPRINTIFNET(ifp, 1, "PRXDQS(%d): %#x\n", 1235 q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); 1236 1237 /* Rx packet offset address */ 1238 reg = MVXPE_PRXC_PACKETOFFSET(mvxpbm_packet_offset(sc->sc_bm) >> 3); 1239 MVXPE_WRITE(sc, MVXPE_PRXC(q), reg); 1240 DPRINTIFNET(ifp, 1, "PRXC(%d): %#x\n", 1241 q, MVXPE_READ(sc, MVXPE_PRXC(q))); 1242 1243 /* Rx DMA SNOOP */ 1244 reg = MVXPE_PRXSNP_SNOOPNOOFBYTES(MVXPE_MRU); 1245 reg |= MVXPE_PRXSNP_L2DEPOSITNOOFBYTES(MVXPE_MRU); 1246 MVXPE_WRITE(sc, MVXPE_PRXSNP(q), reg); 1247 1248 /* if DMA is not working, register is not updated */ 1249 KASSERT(MVXPE_READ(sc, MVXPE_PRXDQA(q)) == MVXPE_RX_RING_MEM_PA(sc, q)); 1250 return 0; 1251 } 1252 1253 STATIC int 1254 mvxpe_tx_queue_init(struct ifnet *ifp, int q) 1255 { 1256 struct mvxpe_softc *sc = ifp->if_softc; 1257 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1258 uint32_t reg; 1259 1260 KASSERT_TX_MTX(sc, q); 1261 KASSERT(MVXPE_TX_RING_MEM_PA(sc, q) != 0); 1262 1263 /* descriptor address */ 1264 MVXPE_WRITE(sc, MVXPE_PTXDQA(q), MVXPE_TX_RING_MEM_PA(sc, q)); 1265 1266 /* Tx threshold, and descriptor ring size */ 1267 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 1268 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); 1269 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg); 1270 DPRINTIFNET(ifp, 1, "PTXDQS(%d): %#x\n", 1271 q, MVXPE_READ(sc, MVXPE_PTXDQS(q))); 1272 1273 /* if DMA is not working, register is not updated */ 1274 KASSERT(MVXPE_READ(sc, MVXPE_PTXDQA(q)) == MVXPE_TX_RING_MEM_PA(sc, q)); 1275 return 0; 1276 } 1277 1278 STATIC int 1279 mvxpe_rx_queue_enable(struct ifnet *ifp, int q) 1280 { 1281 struct mvxpe_softc *sc = ifp->if_softc; 1282 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1283 uint32_t reg; 1284 1285 KASSERT_RX_MTX(sc, q); 1286 1287 /* Set Rx interrupt threshold */ 1288 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 1289 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); 1290 MVXPE_WRITE(sc, MVXPE_PRXDQTH(q), reg); 1291 1292 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time); 1293 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); 1294 1295 /* Unmask RXTX_TH Intr. */ 1296 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1297 reg |= MVXPE_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalesce */ 1298 reg |= MVXPE_PRXTXTI_RDTAQ(q); /* Rx Descriptor Alert */ 1299 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1300 1301 /* Enable Rx queue */ 1302 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK; 1303 reg |= MVXPE_RQC_ENQ(q); 1304 MVXPE_WRITE(sc, MVXPE_RQC, reg); 1305 1306 return 0; 1307 } 1308 1309 STATIC int 1310 mvxpe_tx_queue_enable(struct ifnet *ifp, int q) 1311 { 1312 struct mvxpe_softc *sc = ifp->if_softc; 1313 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1314 uint32_t reg; 1315 1316 KASSERT_TX_MTX(sc, q); 1317 1318 /* Set Tx interrupt threshold */ 1319 reg = MVXPE_READ(sc, MVXPE_PTXDQS(q)); 1320 reg &= ~MVXPE_PTXDQS_TBT_MASK; /* keep queue size */ 1321 reg |= MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 1322 MVXPE_WRITE(sc, MVXPE_PTXDQS(q), reg); 1323 1324 /* Unmask RXTX_TH Intr. */ 1325 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1326 reg |= MVXPE_PRXTXTI_TBTCQ(q); /* Tx Threshold cross */ 1327 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1328 1329 /* Don't update MVXPE_TQC here, there is no packet yet. */ 1330 return 0; 1331 } 1332 1333 STATIC void 1334 mvxpe_rx_lockq(struct mvxpe_softc *sc, int q) 1335 { 1336 KASSERT(q >= 0); 1337 KASSERT(q < MVXPE_QUEUE_SIZE); 1338 mutex_enter(&sc->sc_rx_ring[q].rx_ring_mtx); 1339 } 1340 1341 STATIC void 1342 mvxpe_rx_unlockq(struct mvxpe_softc *sc, int q) 1343 { 1344 KASSERT(q >= 0); 1345 KASSERT(q < MVXPE_QUEUE_SIZE); 1346 mutex_exit(&sc->sc_rx_ring[q].rx_ring_mtx); 1347 } 1348 1349 STATIC void 1350 mvxpe_tx_lockq(struct mvxpe_softc *sc, int q) 1351 { 1352 KASSERT(q >= 0); 1353 KASSERT(q < MVXPE_QUEUE_SIZE); 1354 mutex_enter(&sc->sc_tx_ring[q].tx_ring_mtx); 1355 } 1356 1357 STATIC void 1358 mvxpe_tx_unlockq(struct mvxpe_softc *sc, int q) 1359 { 1360 KASSERT(q >= 0); 1361 KASSERT(q < MVXPE_QUEUE_SIZE); 1362 mutex_exit(&sc->sc_tx_ring[q].tx_ring_mtx); 1363 } 1364 1365 /* 1366 * Interrupt Handlers 1367 */ 1368 STATIC void 1369 mvxpe_disable_intr(struct mvxpe_softc *sc) 1370 { 1371 MVXPE_WRITE(sc, MVXPE_EUIM, 0); 1372 MVXPE_WRITE(sc, MVXPE_EUIC, 0); 1373 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, 0); 1374 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, 0); 1375 MVXPE_WRITE(sc, MVXPE_PRXTXIM, 0); 1376 MVXPE_WRITE(sc, MVXPE_PRXTXIC, 0); 1377 MVXPE_WRITE(sc, MVXPE_PMIM, 0); 1378 MVXPE_WRITE(sc, MVXPE_PMIC, 0); 1379 MVXPE_WRITE(sc, MVXPE_PIE, 0); 1380 } 1381 1382 STATIC void 1383 mvxpe_enable_intr(struct mvxpe_softc *sc) 1384 { 1385 uint32_t reg; 1386 1387 /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */ 1388 reg = MVXPE_READ(sc, MVXPE_PMIM); 1389 reg |= MVXPE_PMI_PHYSTATUSCHNG; 1390 reg |= MVXPE_PMI_LINKCHANGE; 1391 reg |= MVXPE_PMI_IAE; 1392 reg |= MVXPE_PMI_RXOVERRUN; 1393 reg |= MVXPE_PMI_RXCRCERROR; 1394 reg |= MVXPE_PMI_RXLARGEPACKET; 1395 reg |= MVXPE_PMI_TXUNDRN; 1396 #if 0 1397 /* 1398 * The device may raise false interrupts for SERDES even if the device 1399 * is not configured to use SERDES connection. 1400 */ 1401 reg |= MVXPE_PMI_PRBSERROR; 1402 reg |= MVXPE_PMI_SRSE; 1403 #else 1404 reg &= ~MVXPE_PMI_PRBSERROR; 1405 reg &= ~MVXPE_PMI_SRSE; 1406 #endif 1407 reg |= MVXPE_PMI_TREQ_MASK; 1408 MVXPE_WRITE(sc, MVXPE_PMIM, reg); 1409 1410 /* Enable Summary Bit to check all interrupt cause. */ 1411 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1412 reg |= MVXPE_PRXTXTI_PMISCICSUMMARY; 1413 reg |= MVXPE_PRXTXTI_PTXERRORSUMMARY; 1414 reg |= MVXPE_PRXTXTI_PRXTXICSUMMARY; 1415 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1416 1417 /* Enable All Queue Interrupt */ 1418 reg = MVXPE_READ(sc, MVXPE_PIE); 1419 reg |= MVXPE_PIE_RXPKTINTRPTENB_MASK; 1420 reg |= MVXPE_PIE_TXPKTINTRPTENB_MASK; 1421 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1422 } 1423 1424 STATIC int 1425 mvxpe_rxtxth_intr(void *arg) 1426 { 1427 struct mvxpe_softc *sc = arg; 1428 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1429 uint32_t ic, queues, datum = 0; 1430 1431 DPRINTSC(sc, 2, "got RXTX_TH_Intr\n"); 1432 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtxth); 1433 1434 mvxpe_sc_lock(sc); 1435 ic = MVXPE_READ(sc, MVXPE_PRXTXTIC); 1436 if (ic == 0) { 1437 mvxpe_sc_unlock(sc); 1438 return 0; 1439 } 1440 MVXPE_WRITE(sc, MVXPE_PRXTXTIC, ~ic); 1441 datum = datum ^ ic; 1442 1443 DPRINTIFNET(ifp, 2, "PRXTXTIC: %#x\n", ic); 1444 1445 /* ack maintenance interrupt first */ 1446 if (ic & MVXPE_PRXTXTI_PTXERRORSUMMARY) { 1447 DPRINTIFNET(ifp, 1, "PRXTXTIC: +PTXERRORSUMMARY\n"); 1448 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtxth_txerr); 1449 } 1450 if ((ic & MVXPE_PRXTXTI_PMISCICSUMMARY)) { 1451 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PMISCICSUMMARY\n"); 1452 mvxpe_misc_intr(sc); 1453 } 1454 if (ic & MVXPE_PRXTXTI_PRXTXICSUMMARY) { 1455 DPRINTIFNET(ifp, 2, "PTXTXTIC: +PRXTXICSUMMARY\n"); 1456 mvxpe_rxtx_intr(sc); 1457 } 1458 if (!(ifp->if_flags & IFF_RUNNING)) { 1459 mvxpe_sc_unlock(sc); 1460 return 1; 1461 } 1462 1463 /* RxTxTH interrupt */ 1464 queues = MVXPE_PRXTXTI_GET_RBICTAPQ(ic); 1465 if (queues) { 1466 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RXEOF\n"); 1467 mvxpe_rx(sc, queues); 1468 } 1469 queues = MVXPE_PRXTXTI_GET_TBTCQ(ic); 1470 if (queues) { 1471 DPRINTIFNET(ifp, 2, "PRXTXTIC: +TBTCQ\n"); 1472 mvxpe_tx_complete(sc, queues); 1473 } 1474 queues = MVXPE_PRXTXTI_GET_RDTAQ(ic); 1475 if (queues) { 1476 DPRINTIFNET(ifp, 2, "PRXTXTIC: +RDTAQ\n"); 1477 mvxpe_rx_refill(sc, queues); 1478 } 1479 mvxpe_sc_unlock(sc); 1480 1481 if_schedule_deferred_start(ifp); 1482 1483 rnd_add_uint32(&sc->sc_rnd_source, datum); 1484 1485 return 1; 1486 } 1487 1488 STATIC int 1489 mvxpe_misc_intr(void *arg) 1490 { 1491 struct mvxpe_softc *sc = arg; 1492 #ifdef MVXPE_DEBUG 1493 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1494 #endif 1495 uint32_t ic; 1496 uint32_t datum = 0; 1497 int claimed = 0; 1498 1499 DPRINTSC(sc, 2, "got MISC_INTR\n"); 1500 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_misc); 1501 1502 KASSERT_SC_MTX(sc); 1503 1504 for (;;) { 1505 ic = MVXPE_READ(sc, MVXPE_PMIC); 1506 ic &= MVXPE_READ(sc, MVXPE_PMIM); 1507 if (ic == 0) 1508 break; 1509 MVXPE_WRITE(sc, MVXPE_PMIC, ~ic); 1510 datum = datum ^ ic; 1511 claimed = 1; 1512 1513 DPRINTIFNET(ifp, 2, "PMIC=%#x\n", ic); 1514 if (ic & MVXPE_PMI_PHYSTATUSCHNG) { 1515 DPRINTIFNET(ifp, 2, "+PHYSTATUSCHNG\n"); 1516 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_phystatuschng); 1517 } 1518 if (ic & MVXPE_PMI_LINKCHANGE) { 1519 DPRINTIFNET(ifp, 2, "+LINKCHANGE\n"); 1520 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_linkchange); 1521 mvxpe_linkupdate(sc); 1522 } 1523 if (ic & MVXPE_PMI_IAE) { 1524 DPRINTIFNET(ifp, 2, "+IAE\n"); 1525 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_iae); 1526 } 1527 if (ic & MVXPE_PMI_RXOVERRUN) { 1528 DPRINTIFNET(ifp, 2, "+RXOVERRUN\n"); 1529 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxoverrun); 1530 } 1531 if (ic & MVXPE_PMI_RXCRCERROR) { 1532 DPRINTIFNET(ifp, 2, "+RXCRCERROR\n"); 1533 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxcrc); 1534 } 1535 if (ic & MVXPE_PMI_RXLARGEPACKET) { 1536 DPRINTIFNET(ifp, 2, "+RXLARGEPACKET\n"); 1537 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_rxlargepacket); 1538 } 1539 if (ic & MVXPE_PMI_TXUNDRN) { 1540 DPRINTIFNET(ifp, 2, "+TXUNDRN\n"); 1541 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txunderrun); 1542 } 1543 if (ic & MVXPE_PMI_PRBSERROR) { 1544 DPRINTIFNET(ifp, 2, "+PRBSERROR\n"); 1545 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_prbserr); 1546 } 1547 if (ic & MVXPE_PMI_TREQ_MASK) { 1548 DPRINTIFNET(ifp, 2, "+TREQ\n"); 1549 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_misc_txreq); 1550 } 1551 } 1552 if (datum) 1553 rnd_add_uint32(&sc->sc_rnd_source, datum); 1554 1555 return claimed; 1556 } 1557 1558 STATIC int 1559 mvxpe_rxtx_intr(void *arg) 1560 { 1561 struct mvxpe_softc *sc = arg; 1562 #ifdef MVXPE_DEBUG 1563 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1564 #endif 1565 uint32_t datum = 0; 1566 uint32_t prxtxic; 1567 int claimed = 0; 1568 1569 DPRINTSC(sc, 2, "got RXTX_Intr\n"); 1570 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_i_rxtx); 1571 1572 KASSERT_SC_MTX(sc); 1573 1574 for (;;) { 1575 prxtxic = MVXPE_READ(sc, MVXPE_PRXTXIC); 1576 prxtxic &= MVXPE_READ(sc, MVXPE_PRXTXIM); 1577 if (prxtxic == 0) 1578 break; 1579 MVXPE_WRITE(sc, MVXPE_PRXTXIC, ~prxtxic); 1580 datum = datum ^ prxtxic; 1581 claimed = 1; 1582 1583 DPRINTSC(sc, 2, "PRXTXIC: %#x\n", prxtxic); 1584 1585 if (prxtxic & MVXPE_PRXTXI_RREQ_MASK) { 1586 DPRINTIFNET(ifp, 1, "Rx Resource Error.\n"); 1587 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rreq); 1588 } 1589 if (prxtxic & MVXPE_PRXTXI_RPQ_MASK) { 1590 DPRINTIFNET(ifp, 1, "Rx Packet in Queue.\n"); 1591 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rpq); 1592 } 1593 if (prxtxic & MVXPE_PRXTXI_TBRQ_MASK) { 1594 DPRINTIFNET(ifp, 1, "Tx Buffer Return.\n"); 1595 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_tbrq); 1596 } 1597 if (prxtxic & MVXPE_PRXTXI_PRXTXTHICSUMMARY) { 1598 DPRINTIFNET(ifp, 1, "PRXTXTHIC Summary\n"); 1599 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_rxtxth); 1600 } 1601 if (prxtxic & MVXPE_PRXTXI_PTXERRORSUMMARY) { 1602 DPRINTIFNET(ifp, 1, "PTXERROR Summary\n"); 1603 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_txerr); 1604 } 1605 if (prxtxic & MVXPE_PRXTXI_PMISCICSUMMARY) { 1606 DPRINTIFNET(ifp, 1, "PMISCIC Summary\n"); 1607 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxtx_misc); 1608 } 1609 } 1610 if (datum) 1611 rnd_add_uint32(&sc->sc_rnd_source, datum); 1612 1613 return claimed; 1614 } 1615 1616 STATIC void 1617 mvxpe_tick(void *arg) 1618 { 1619 struct mvxpe_softc *sc = arg; 1620 struct mii_data *mii = &sc->sc_mii; 1621 1622 mvxpe_sc_lock(sc); 1623 1624 mii_tick(mii); 1625 mii_pollstat(&sc->sc_mii); 1626 1627 /* read mib registers(clear by read) */ 1628 mvxpe_update_mib(sc); 1629 1630 /* read counter registers(clear by read) */ 1631 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pdfc, 1632 MVXPE_READ(sc, MVXPE_PDFC)); 1633 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_pofc, 1634 MVXPE_READ(sc, MVXPE_POFC)); 1635 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txbadfcs, 1636 MVXPE_READ(sc, MVXPE_TXBADFCS)); 1637 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_txdropped, 1638 MVXPE_READ(sc, MVXPE_TXDROPPED)); 1639 MVXPE_EVCNT_ADD(&sc->sc_ev.ev_reg_lpic, 1640 MVXPE_READ(sc, MVXPE_LPIC)); 1641 1642 mvxpe_sc_unlock(sc); 1643 1644 callout_schedule(&sc->sc_tick_ch, hz); 1645 } 1646 1647 1648 /* 1649 * struct ifnet and mii callbacks 1650 */ 1651 STATIC void 1652 mvxpe_start(struct ifnet *ifp) 1653 { 1654 struct mvxpe_softc *sc = ifp->if_softc; 1655 struct mbuf *m; 1656 int q; 1657 1658 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 1659 DPRINTIFNET(ifp, 1, "not running\n"); 1660 return; 1661 } 1662 1663 mvxpe_sc_lock(sc); 1664 if (!MVXPE_IS_LINKUP(sc)) { 1665 /* If Link is DOWN, can't start TX */ 1666 DPRINTIFNET(ifp, 1, "link fail\n"); 1667 for (;;) { 1668 /* 1669 * discard stale packets all. 1670 * these may confuse DAD, ARP or timer based protocols. 1671 */ 1672 IFQ_DEQUEUE(&ifp->if_snd, m); 1673 if (m == NULL) 1674 break; 1675 m_freem(m); 1676 } 1677 mvxpe_sc_unlock(sc); 1678 return; 1679 } 1680 for (;;) { 1681 /* 1682 * don't use IFQ_POLL(). 1683 * there is lock problem between IFQ_POLL and IFQ_DEQUEUE 1684 * on SMP enabled networking stack. 1685 */ 1686 IFQ_DEQUEUE(&ifp->if_snd, m); 1687 if (m == NULL) 1688 break; 1689 1690 q = mvxpe_tx_queue_select(sc, m); 1691 if (q < 0) 1692 break; 1693 /* mutex is held in mvxpe_tx_queue_select() */ 1694 1695 if (mvxpe_tx_queue(sc, m, q) != 0) { 1696 DPRINTIFNET(ifp, 1, "cannot add packet to tx ring\n"); 1697 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txerr); 1698 mvxpe_tx_unlockq(sc, q); 1699 break; 1700 } 1701 mvxpe_tx_unlockq(sc, q); 1702 KASSERT(sc->sc_tx_ring[q].tx_used >= 0); 1703 KASSERT(sc->sc_tx_ring[q].tx_used <= 1704 sc->sc_tx_ring[q].tx_queue_len); 1705 DPRINTIFNET(ifp, 1, "a packet is added to tx ring\n"); 1706 sc->sc_tx_pending++; 1707 if_statinc(ifp, if_opackets); 1708 ifp->if_timer = 1; 1709 sc->sc_wdogsoft = 1; 1710 bpf_mtap(ifp, m, BPF_D_OUT); 1711 } 1712 mvxpe_sc_unlock(sc); 1713 1714 return; 1715 } 1716 1717 STATIC int 1718 mvxpe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1719 { 1720 struct mvxpe_softc *sc = ifp->if_softc; 1721 int error = 0; 1722 1723 switch (cmd) { 1724 default: 1725 DPRINTIFNET(ifp, 2, "mvxpe_ioctl ETHER\n"); 1726 error = ether_ioctl(ifp, cmd, data); 1727 if (error == ENETRESET) { 1728 if (ifp->if_flags & IFF_RUNNING) { 1729 mvxpe_sc_lock(sc); 1730 mvxpe_filter_setup(sc); 1731 mvxpe_sc_unlock(sc); 1732 } 1733 error = 0; 1734 } 1735 break; 1736 } 1737 1738 return error; 1739 } 1740 1741 STATIC int 1742 mvxpe_init(struct ifnet *ifp) 1743 { 1744 struct mvxpe_softc *sc = ifp->if_softc; 1745 struct mii_data *mii = &sc->sc_mii; 1746 uint32_t reg; 1747 int q; 1748 1749 mvxpe_sc_lock(sc); 1750 1751 /* Start DMA Engine */ 1752 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000000); 1753 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000000); 1754 MVXPE_WRITE(sc, MVXPE_PACC, MVXPE_PACC_ACCELERATIONMODE_EDM); 1755 1756 /* Enable port */ 1757 reg = MVXPE_READ(sc, MVXPE_PMACC0); 1758 reg |= MVXPE_PMACC0_PORTEN; 1759 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 1760 1761 /* Link up */ 1762 mvxpe_linkup(sc); 1763 1764 /* Enable All Queue and interrupt of each Queue */ 1765 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1766 mvxpe_rx_lockq(sc, q); 1767 mvxpe_rx_queue_enable(ifp, q); 1768 mvxpe_rx_queue_refill(sc, q); 1769 mvxpe_rx_unlockq(sc, q); 1770 1771 mvxpe_tx_lockq(sc, q); 1772 mvxpe_tx_queue_enable(ifp, q); 1773 mvxpe_tx_unlockq(sc, q); 1774 } 1775 1776 /* Enable interrupt */ 1777 mvxpe_enable_intr(sc); 1778 1779 /* Set Counter */ 1780 callout_schedule(&sc->sc_tick_ch, hz); 1781 1782 /* Media check */ 1783 mii_mediachg(mii); 1784 1785 ifp->if_flags |= IFF_RUNNING; 1786 ifp->if_flags &= ~IFF_OACTIVE; 1787 1788 mvxpe_sc_unlock(sc); 1789 return 0; 1790 } 1791 1792 /* ARGSUSED */ 1793 STATIC void 1794 mvxpe_stop(struct ifnet *ifp, int disable) 1795 { 1796 struct mvxpe_softc *sc = ifp->if_softc; 1797 uint32_t reg; 1798 int q, cnt; 1799 1800 DPRINTIFNET(ifp, 1, "stop device dma and interrupts.\n"); 1801 1802 mvxpe_sc_lock(sc); 1803 1804 callout_stop(&sc->sc_tick_ch); 1805 1806 /* Link down */ 1807 mvxpe_linkdown(sc); 1808 1809 /* Disable Rx interrupt */ 1810 reg = MVXPE_READ(sc, MVXPE_PIE); 1811 reg &= ~MVXPE_PIE_RXPKTINTRPTENB_MASK; 1812 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1813 1814 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1815 reg &= ~MVXPE_PRXTXTI_RBICTAPQ_MASK; 1816 reg &= ~MVXPE_PRXTXTI_RDTAQ_MASK; 1817 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1818 1819 /* Wait for all Rx activity to terminate. */ 1820 reg = MVXPE_READ(sc, MVXPE_RQC) & MVXPE_RQC_EN_MASK; 1821 reg = MVXPE_RQC_DIS(reg); 1822 MVXPE_WRITE(sc, MVXPE_RQC, reg); 1823 cnt = 0; 1824 do { 1825 if (cnt >= RX_DISABLE_TIMEOUT) { 1826 aprint_error_ifnet(ifp, 1827 "timeout for RX stopped. rqc 0x%x\n", reg); 1828 break; 1829 } 1830 cnt++; 1831 reg = MVXPE_READ(sc, MVXPE_RQC); 1832 } while (reg & MVXPE_RQC_EN_MASK); 1833 1834 /* Wait for all Tx activity to terminate. */ 1835 reg = MVXPE_READ(sc, MVXPE_PIE); 1836 reg &= ~MVXPE_PIE_TXPKTINTRPTENB_MASK; 1837 MVXPE_WRITE(sc, MVXPE_PIE, reg); 1838 1839 reg = MVXPE_READ(sc, MVXPE_PRXTXTIM); 1840 reg &= ~MVXPE_PRXTXTI_TBTCQ_MASK; 1841 MVXPE_WRITE(sc, MVXPE_PRXTXTIM, reg); 1842 1843 reg = MVXPE_READ(sc, MVXPE_TQC) & MVXPE_TQC_EN_MASK; 1844 reg = MVXPE_TQC_DIS(reg); 1845 MVXPE_WRITE(sc, MVXPE_TQC, reg); 1846 cnt = 0; 1847 do { 1848 if (cnt >= TX_DISABLE_TIMEOUT) { 1849 aprint_error_ifnet(ifp, 1850 "timeout for TX stopped. tqc 0x%x\n", reg); 1851 break; 1852 } 1853 cnt++; 1854 reg = MVXPE_READ(sc, MVXPE_TQC); 1855 } while (reg & MVXPE_TQC_EN_MASK); 1856 1857 /* Wait for all Tx FIFO is empty */ 1858 cnt = 0; 1859 do { 1860 if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { 1861 aprint_error_ifnet(ifp, 1862 "timeout for TX FIFO drained. ps0 0x%x\n", reg); 1863 break; 1864 } 1865 cnt++; 1866 reg = MVXPE_READ(sc, MVXPE_PS0); 1867 } while (!(reg & MVXPE_PS0_TXFIFOEMP) && (reg & MVXPE_PS0_TXINPROG)); 1868 1869 /* Reset the MAC Port Enable bit */ 1870 reg = MVXPE_READ(sc, MVXPE_PMACC0); 1871 reg &= ~MVXPE_PMACC0_PORTEN; 1872 MVXPE_WRITE(sc, MVXPE_PMACC0, reg); 1873 1874 /* Disable each of queue */ 1875 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1876 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 1877 1878 mvxpe_rx_lockq(sc, q); 1879 mvxpe_tx_lockq(sc, q); 1880 1881 /* Disable Rx packet buffer refill request */ 1882 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 1883 reg |= MVXPE_PRXDQTH_NODT(0); 1884 MVXPE_WRITE(sc, MVXPE_PRXITTH(q), reg); 1885 1886 if (disable) { 1887 /* 1888 * Hold Reset state of DMA Engine 1889 * (must write 0x0 to restart it) 1890 */ 1891 MVXPE_WRITE(sc, MVXPE_PRXINIT, 0x00000001); 1892 MVXPE_WRITE(sc, MVXPE_PTXINIT, 0x00000001); 1893 mvxpe_ring_flush_queue(sc, q); 1894 } 1895 1896 mvxpe_tx_unlockq(sc, q); 1897 mvxpe_rx_unlockq(sc, q); 1898 } 1899 1900 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1901 1902 mvxpe_sc_unlock(sc); 1903 } 1904 1905 STATIC void 1906 mvxpe_watchdog(struct ifnet *ifp) 1907 { 1908 struct mvxpe_softc *sc = ifp->if_softc; 1909 int q; 1910 1911 mvxpe_sc_lock(sc); 1912 1913 /* 1914 * Reclaim first as there is a possibility of losing Tx completion 1915 * interrupts. 1916 */ 1917 mvxpe_tx_complete(sc, 0xff); 1918 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 1919 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 1920 1921 if (tx->tx_dma != tx->tx_cpu) { 1922 if (sc->sc_wdogsoft) { 1923 /* 1924 * There is race condition between CPU and DMA 1925 * engine. When DMA engine encounters queue end, 1926 * it clears MVXPE_TQC_ENQ bit. 1927 * XXX: how about enhanced mode? 1928 */ 1929 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q)); 1930 ifp->if_timer = 5; 1931 sc->sc_wdogsoft = 0; 1932 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_wdogsoft); 1933 } else { 1934 aprint_error_ifnet(ifp, "watchdog timeout\n"); 1935 if_statinc(ifp, if_oerrors); 1936 mvxpe_linkreset(sc); 1937 mvxpe_sc_unlock(sc); 1938 1939 /* trigger reinitialize sequence */ 1940 mvxpe_stop(ifp, 1); 1941 mvxpe_init(ifp); 1942 1943 mvxpe_sc_lock(sc); 1944 } 1945 } 1946 } 1947 mvxpe_sc_unlock(sc); 1948 } 1949 1950 STATIC int 1951 mvxpe_ifflags_cb(struct ethercom *ec) 1952 { 1953 struct ifnet *ifp = &ec->ec_if; 1954 struct mvxpe_softc *sc = ifp->if_softc; 1955 u_short change = ifp->if_flags ^ sc->sc_if_flags; 1956 1957 mvxpe_sc_lock(sc); 1958 1959 if (change != 0) 1960 sc->sc_if_flags = ifp->if_flags; 1961 1962 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) { 1963 mvxpe_sc_unlock(sc); 1964 return ENETRESET; 1965 } 1966 1967 if ((change & IFF_PROMISC) != 0) 1968 mvxpe_filter_setup(sc); 1969 1970 if ((change & IFF_UP) != 0) 1971 mvxpe_linkreset(sc); 1972 1973 mvxpe_sc_unlock(sc); 1974 return 0; 1975 } 1976 1977 STATIC int 1978 mvxpe_mediachange(struct ifnet *ifp) 1979 { 1980 return ether_mediachange(ifp); 1981 } 1982 1983 STATIC void 1984 mvxpe_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1985 { 1986 ether_mediastatus(ifp, ifmr); 1987 } 1988 1989 /* 1990 * Link State Notify 1991 */ 1992 STATIC void mvxpe_linkupdate(struct mvxpe_softc *sc) 1993 { 1994 int linkup; /* bool */ 1995 1996 KASSERT_SC_MTX(sc); 1997 1998 /* tell miibus */ 1999 mii_pollstat(&sc->sc_mii); 2000 2001 /* syslog */ 2002 linkup = MVXPE_IS_LINKUP(sc); 2003 if (sc->sc_linkstate == linkup) 2004 return; 2005 2006 #ifdef DEBUG 2007 log(LOG_DEBUG, 2008 "%s: link %s\n", device_xname(sc->sc_dev), linkup ? "up" : "down"); 2009 #endif 2010 if (linkup) 2011 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_up); 2012 else 2013 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_link_down); 2014 2015 sc->sc_linkstate = linkup; 2016 } 2017 2018 STATIC void 2019 mvxpe_linkup(struct mvxpe_softc *sc) 2020 { 2021 uint32_t reg; 2022 2023 KASSERT_SC_MTX(sc); 2024 2025 /* set EEE parameters */ 2026 reg = MVXPE_READ(sc, MVXPE_LPIC1); 2027 if (sc->sc_cf.cf_lpi) 2028 reg |= MVXPE_LPIC1_LPIRE; 2029 else 2030 reg &= ~MVXPE_LPIC1_LPIRE; 2031 MVXPE_WRITE(sc, MVXPE_LPIC1, reg); 2032 2033 /* set auto-negotiation parameters */ 2034 reg = MVXPE_READ(sc, MVXPE_PANC); 2035 if (sc->sc_cf.cf_fc) { 2036 /* flow control negotiation */ 2037 reg |= MVXPE_PANC_PAUSEADV; 2038 reg |= MVXPE_PANC_ANFCEN; 2039 } 2040 else { 2041 reg &= ~MVXPE_PANC_PAUSEADV; 2042 reg &= ~MVXPE_PANC_ANFCEN; 2043 } 2044 reg &= ~MVXPE_PANC_FORCELINKFAIL; 2045 reg &= ~MVXPE_PANC_FORCELINKPASS; 2046 MVXPE_WRITE(sc, MVXPE_PANC, reg); 2047 2048 mii_mediachg(&sc->sc_mii); 2049 } 2050 2051 STATIC void 2052 mvxpe_linkdown(struct mvxpe_softc *sc) 2053 { 2054 struct mii_softc *mii; 2055 uint32_t reg; 2056 2057 KASSERT_SC_MTX(sc); 2058 return; 2059 2060 reg = MVXPE_READ(sc, MVXPE_PANC); 2061 reg |= MVXPE_PANC_FORCELINKFAIL; 2062 reg &= MVXPE_PANC_FORCELINKPASS; 2063 MVXPE_WRITE(sc, MVXPE_PANC, reg); 2064 2065 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 2066 if (mii) 2067 mii_phy_down(mii); 2068 } 2069 2070 STATIC void 2071 mvxpe_linkreset(struct mvxpe_softc *sc) 2072 { 2073 struct mii_softc *mii; 2074 2075 KASSERT_SC_MTX(sc); 2076 2077 /* force reset PHY first */ 2078 mii = LIST_FIRST(&sc->sc_mii.mii_phys); 2079 if (mii) 2080 mii_phy_reset(mii); 2081 2082 /* reinit MAC and PHY */ 2083 mvxpe_linkdown(sc); 2084 if ((sc->sc_if_flags & IFF_UP) != 0) 2085 mvxpe_linkup(sc); 2086 } 2087 2088 /* 2089 * Tx Subroutines 2090 */ 2091 STATIC int 2092 mvxpe_tx_queue_select(struct mvxpe_softc *sc, struct mbuf *m) 2093 { 2094 int q = 0; 2095 2096 /* XXX: get attribute from ALTQ framework? */ 2097 mvxpe_tx_lockq(sc, q); 2098 return 0; 2099 } 2100 2101 STATIC int 2102 mvxpe_tx_queue(struct mvxpe_softc *sc, struct mbuf *m, int q) 2103 { 2104 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2105 bus_dma_segment_t *txsegs; 2106 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 2107 struct mvxpe_tx_desc *t = NULL; 2108 uint32_t ptxsu; 2109 int txnsegs; 2110 int start, used; 2111 int i; 2112 2113 KASSERT_TX_MTX(sc, q); 2114 KASSERT(tx->tx_used >= 0); 2115 KASSERT(tx->tx_used <= tx->tx_queue_len); 2116 2117 /* load mbuf using dmamap of 1st descriptor */ 2118 if (bus_dmamap_load_mbuf(sc->sc_dmat, 2119 MVXPE_TX_MAP(sc, q, tx->tx_cpu), m, BUS_DMA_NOWAIT) != 0) { 2120 m_freem(m); 2121 return ENOBUFS; 2122 } 2123 txsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_segs; 2124 txnsegs = MVXPE_TX_MAP(sc, q, tx->tx_cpu)->dm_nsegs; 2125 if (txnsegs <= 0 || (txnsegs + tx->tx_used) > tx->tx_queue_len) { 2126 /* we have no enough descriptors or mbuf is broken */ 2127 bus_dmamap_unload(sc->sc_dmat, MVXPE_TX_MAP(sc, q, tx->tx_cpu)); 2128 m_freem(m); 2129 return ENOBUFS; 2130 } 2131 DPRINTSC(sc, 2, "send packet %p descriptor %d\n", m, tx->tx_cpu); 2132 KASSERT(MVXPE_TX_MBUF(sc, q, tx->tx_cpu) == NULL); 2133 2134 /* remember mbuf using 1st descriptor */ 2135 MVXPE_TX_MBUF(sc, q, tx->tx_cpu) = m; 2136 bus_dmamap_sync(sc->sc_dmat, 2137 MVXPE_TX_MAP(sc, q, tx->tx_cpu), 0, m->m_pkthdr.len, 2138 BUS_DMASYNC_PREWRITE); 2139 2140 /* load to tx descriptors */ 2141 start = tx->tx_cpu; 2142 used = 0; 2143 for (i = 0; i < txnsegs; i++) { 2144 if (__predict_false(txsegs[i].ds_len == 0)) 2145 continue; 2146 t = MVXPE_TX_DESC(sc, q, tx->tx_cpu); 2147 t->command = 0; 2148 t->l4ichk = 0; 2149 t->flags = 0; 2150 if (i == 0) { 2151 /* 1st descriptor */ 2152 t->command |= MVXPE_TX_CMD_W_PACKET_OFFSET(0); 2153 t->command |= MVXPE_TX_CMD_PADDING; 2154 t->command |= MVXPE_TX_CMD_F; 2155 mvxpe_tx_set_csumflag(ifp, t, m); 2156 } 2157 t->bufptr = txsegs[i].ds_addr; 2158 t->bytecnt = txsegs[i].ds_len; 2159 tx->tx_cpu = tx_counter_adv(tx->tx_cpu, 1); 2160 tx->tx_used++; 2161 used++; 2162 } 2163 /* t is last descriptor here */ 2164 KASSERT(t != NULL); 2165 t->command |= MVXPE_TX_CMD_L; 2166 2167 DPRINTSC(sc, 2, "queue %d, %d descriptors used\n", q, used); 2168 #ifdef MVXPE_DEBUG 2169 if (mvxpe_debug > 2) 2170 for (i = start; i <= tx->tx_cpu; i++) { 2171 t = MVXPE_TX_DESC(sc, q, i); 2172 mvxpe_dump_txdesc(t, i); 2173 } 2174 #endif 2175 mvxpe_ring_sync_tx(sc, q, start, used, 2176 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2177 2178 while (used > 255) { 2179 ptxsu = MVXPE_PTXSU_NOWD(255); 2180 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2181 used -= 255; 2182 } 2183 if (used > 0) { 2184 ptxsu = MVXPE_PTXSU_NOWD(used); 2185 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2186 } 2187 MVXPE_WRITE(sc, MVXPE_TQC, MVXPE_TQC_ENQ(q)); 2188 2189 DPRINTSC(sc, 2, 2190 "PTXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQA(q))); 2191 DPRINTSC(sc, 2, 2192 "PTXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXDQS(q))); 2193 DPRINTSC(sc, 2, 2194 "PTXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PTXS(q))); 2195 DPRINTSC(sc, 2, 2196 "PTXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PTXDI(q))); 2197 DPRINTSC(sc, 2, "TQC: %#x\n", MVXPE_READ(sc, MVXPE_TQC)); 2198 DPRINTIFNET(ifp, 2, 2199 "Tx: tx_cpu = %d, tx_dma = %d, tx_used = %d\n", 2200 tx->tx_cpu, tx->tx_dma, tx->tx_used); 2201 return 0; 2202 } 2203 2204 STATIC void 2205 mvxpe_tx_set_csumflag(struct ifnet *ifp, 2206 struct mvxpe_tx_desc *t, struct mbuf *m) 2207 { 2208 struct ether_header *eh; 2209 int csum_flags; 2210 uint32_t iphl = 0, ipoff = 0; 2211 2212 csum_flags = ifp->if_csum_flags_tx & m->m_pkthdr.csum_flags; 2213 2214 eh = mtod(m, struct ether_header *); 2215 switch (htons(eh->ether_type)) { 2216 case ETHERTYPE_IP: 2217 case ETHERTYPE_IPV6: 2218 ipoff = ETHER_HDR_LEN; 2219 break; 2220 case ETHERTYPE_VLAN: 2221 ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2222 break; 2223 } 2224 2225 if (csum_flags & (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 2226 iphl = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2227 t->command |= MVXPE_TX_CMD_L3_IP4; 2228 } 2229 else if (csum_flags & (M_CSUM_TCPv6 | M_CSUM_UDPv6)) { 2230 iphl = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2231 t->command |= MVXPE_TX_CMD_L3_IP6; 2232 } 2233 else { 2234 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; 2235 return; 2236 } 2237 2238 2239 /* L3 */ 2240 if (csum_flags & M_CSUM_IPv4) { 2241 t->command |= MVXPE_TX_CMD_IP4_CHECKSUM; 2242 } 2243 2244 /* L4 */ 2245 if ((csum_flags & 2246 (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) == 0) { 2247 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NONE; 2248 } 2249 else if (csum_flags & M_CSUM_TCPv4) { 2250 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2251 t->command |= MVXPE_TX_CMD_L4_TCP; 2252 } 2253 else if (csum_flags & M_CSUM_UDPv4) { 2254 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2255 t->command |= MVXPE_TX_CMD_L4_UDP; 2256 } 2257 else if (csum_flags & M_CSUM_TCPv6) { 2258 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2259 t->command |= MVXPE_TX_CMD_L4_TCP; 2260 } 2261 else if (csum_flags & M_CSUM_UDPv6) { 2262 t->command |= MVXPE_TX_CMD_L4_CHECKSUM_NOFRAG; 2263 t->command |= MVXPE_TX_CMD_L4_UDP; 2264 } 2265 2266 t->l4ichk = 0; 2267 t->command |= MVXPE_TX_CMD_IP_HEADER_LEN(iphl >> 2); 2268 t->command |= MVXPE_TX_CMD_L3_OFFSET(ipoff); 2269 } 2270 2271 STATIC void 2272 mvxpe_tx_complete(struct mvxpe_softc *sc, uint32_t queues) 2273 { 2274 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2275 int q; 2276 2277 DPRINTSC(sc, 2, "tx completed.\n"); 2278 2279 KASSERT_SC_MTX(sc); 2280 2281 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 2282 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2283 continue; 2284 mvxpe_tx_lockq(sc, q); 2285 mvxpe_tx_queue_complete(sc, q); 2286 mvxpe_tx_unlockq(sc, q); 2287 } 2288 KASSERT(sc->sc_tx_pending >= 0); 2289 if (sc->sc_tx_pending == 0) 2290 ifp->if_timer = 0; 2291 } 2292 2293 STATIC void 2294 mvxpe_tx_queue_complete(struct mvxpe_softc *sc, int q) 2295 { 2296 struct mvxpe_tx_ring *tx = MVXPE_TX_RING(sc, q); 2297 struct mvxpe_tx_desc *t; 2298 struct mbuf *m; 2299 uint32_t ptxs, ptxsu, ndesc; 2300 int i; 2301 2302 KASSERT_TX_MTX(sc, q); 2303 2304 ptxs = MVXPE_READ(sc, MVXPE_PTXS(q)); 2305 ndesc = MVXPE_PTXS_GET_TBC(ptxs); 2306 if (ndesc == 0) 2307 return; 2308 2309 DPRINTSC(sc, 2, 2310 "tx complete queue %d, %d descriptors.\n", q, ndesc); 2311 2312 mvxpe_ring_sync_tx(sc, q, tx->tx_dma, ndesc, 2313 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2314 2315 for (i = 0; i < ndesc; i++) { 2316 int error = 0; 2317 2318 t = MVXPE_TX_DESC(sc, q, tx->tx_dma); 2319 if (t->flags & MVXPE_TX_F_ES) { 2320 DPRINTSC(sc, 1, 2321 "tx error queue %d desc %d\n", 2322 q, tx->tx_dma); 2323 switch (t->flags & MVXPE_TX_F_EC_MASK) { 2324 case MVXPE_TX_F_EC_LC: 2325 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_lc); 2326 break; 2327 case MVXPE_TX_F_EC_UR: 2328 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_ur); 2329 break; 2330 case MVXPE_TX_F_EC_RL: 2331 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_rl); 2332 break; 2333 default: 2334 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_txd_oth); 2335 break; 2336 } 2337 error = 1; 2338 } 2339 m = MVXPE_TX_MBUF(sc, q, tx->tx_dma); 2340 if (m != NULL) { 2341 KASSERT((t->command & MVXPE_TX_CMD_F) != 0); 2342 MVXPE_TX_MBUF(sc, q, tx->tx_dma) = NULL; 2343 bus_dmamap_sync(sc->sc_dmat, 2344 MVXPE_TX_MAP(sc, q, tx->tx_dma), 0, m->m_pkthdr.len, 2345 BUS_DMASYNC_POSTWRITE); 2346 bus_dmamap_unload(sc->sc_dmat, 2347 MVXPE_TX_MAP(sc, q, tx->tx_dma)); 2348 m_freem(m); 2349 sc->sc_tx_pending--; 2350 } 2351 else 2352 KASSERT((t->flags & MVXPE_TX_CMD_F) == 0); 2353 tx->tx_dma = tx_counter_adv(tx->tx_dma, 1); 2354 tx->tx_used--; 2355 if (error) 2356 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txqe[q]); 2357 else 2358 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_txq[q]); 2359 } 2360 KASSERT(tx->tx_used >= 0); 2361 KASSERT(tx->tx_used <= tx->tx_queue_len); 2362 while (ndesc > 255) { 2363 ptxsu = MVXPE_PTXSU_NORB(255); 2364 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2365 ndesc -= 255; 2366 } 2367 if (ndesc > 0) { 2368 ptxsu = MVXPE_PTXSU_NORB(ndesc); 2369 MVXPE_WRITE(sc, MVXPE_PTXSU(q), ptxsu); 2370 } 2371 DPRINTSC(sc, 2, 2372 "Tx complete q %d, tx_cpu = %d, tx_dma = %d, tx_used = %d\n", 2373 q, tx->tx_cpu, tx->tx_dma, tx->tx_used); 2374 } 2375 2376 /* 2377 * Rx Subroutines 2378 */ 2379 STATIC void 2380 mvxpe_rx(struct mvxpe_softc *sc, uint32_t queues) 2381 { 2382 int q, npkt; 2383 2384 KASSERT_SC_MTX(sc); 2385 2386 while ( (npkt = mvxpe_rx_queue_select(sc, queues, &q))) { 2387 /* mutex is held by rx_queue_select */ 2388 mvxpe_rx_queue(sc, q, npkt); 2389 mvxpe_rx_unlockq(sc, q); 2390 } 2391 } 2392 2393 STATIC void 2394 mvxpe_rx_queue(struct mvxpe_softc *sc, int q, int npkt) 2395 { 2396 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2397 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2398 struct mvxpe_rx_desc *r; 2399 struct mvxpbm_chunk *chunk; 2400 struct mbuf *m; 2401 uint32_t prxsu; 2402 int error = 0; 2403 int i; 2404 2405 KASSERT_RX_MTX(sc, q); 2406 2407 mvxpe_ring_sync_rx(sc, q, rx->rx_dma, npkt, 2408 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2409 2410 for (i = 0; i < npkt; i++) { 2411 /* get descriptor and packet */ 2412 chunk = MVXPE_RX_PKTBUF(sc, q, rx->rx_dma); 2413 MVXPE_RX_PKTBUF(sc, q, rx->rx_dma) = NULL; 2414 r = MVXPE_RX_DESC(sc, q, rx->rx_dma); 2415 mvxpbm_dmamap_sync(chunk, r->bytecnt, BUS_DMASYNC_POSTREAD); 2416 2417 /* check errors */ 2418 if (r->status & MVXPE_RX_ES) { 2419 switch (r->status & MVXPE_RX_EC_MASK) { 2420 case MVXPE_RX_EC_CE: 2421 DPRINTIFNET(ifp, 1, "CRC error\n"); 2422 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_ce); 2423 break; 2424 case MVXPE_RX_EC_OR: 2425 DPRINTIFNET(ifp, 1, "Rx FIFO overrun\n"); 2426 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_or); 2427 break; 2428 case MVXPE_RX_EC_MF: 2429 DPRINTIFNET(ifp, 1, "Rx too large frame\n"); 2430 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_mf); 2431 break; 2432 case MVXPE_RX_EC_RE: 2433 DPRINTIFNET(ifp, 1, "Rx resource error\n"); 2434 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_re); 2435 break; 2436 } 2437 error = 1; 2438 goto rx_done; 2439 } 2440 if (!(r->status & MVXPE_RX_F) || !(r->status & MVXPE_RX_L)) { 2441 DPRINTIFNET(ifp, 1, "not support scatter buf\n"); 2442 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_rxd_scat); 2443 error = 1; 2444 goto rx_done; 2445 } 2446 2447 if (chunk == NULL) { 2448 device_printf(sc->sc_dev, 2449 "got rx interrupt, but no chunk\n"); 2450 error = 1; 2451 goto rx_done; 2452 } 2453 2454 /* extract packet buffer */ 2455 if (mvxpbm_init_mbuf_hdr(chunk) != 0) { 2456 error = 1; 2457 goto rx_done; 2458 } 2459 m = chunk->m; 2460 m_set_rcvif(m, ifp); 2461 m->m_pkthdr.len = m->m_len = r->bytecnt - ETHER_CRC_LEN; 2462 m_adj(m, MVXPE_HWHEADER_SIZE); /* strip MH */ 2463 mvxpe_rx_set_csumflag(ifp, r, m); 2464 if_percpuq_enqueue(ifp->if_percpuq, m); 2465 chunk = NULL; /* the BM chunk goes to networking stack now */ 2466 rx_done: 2467 if (chunk) { 2468 /* rx error. just return the chunk to BM. */ 2469 mvxpbm_free_chunk(chunk); 2470 } 2471 if (error) 2472 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxqe[q]); 2473 else 2474 MVXPE_EVCNT_INCR(&sc->sc_ev.ev_drv_rxq[q]); 2475 rx->rx_dma = rx_counter_adv(rx->rx_dma, 1); 2476 } 2477 /* DMA status update */ 2478 DPRINTSC(sc, 2, "%d packets received from queue %d\n", npkt, q); 2479 while (npkt > 255) { 2480 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(255); 2481 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2482 npkt -= 255; 2483 } 2484 if (npkt > 0) { 2485 prxsu = MVXPE_PRXSU_NOOFPROCESSEDDESCRIPTORS(npkt); 2486 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2487 } 2488 2489 DPRINTSC(sc, 2, 2490 "PRXDQA: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQA(q))); 2491 DPRINTSC(sc, 2, 2492 "PRXDQS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXDQS(q))); 2493 DPRINTSC(sc, 2, 2494 "PRXS: queue %d, %#x\n", q, MVXPE_READ(sc, MVXPE_PRXS(q))); 2495 DPRINTSC(sc, 2, 2496 "PRXDI: queue %d, %d\n", q, MVXPE_READ(sc, MVXPE_PRXDI(q))); 2497 DPRINTSC(sc, 2, "RQC: %#x\n", MVXPE_READ(sc, MVXPE_RQC)); 2498 DPRINTIFNET(ifp, 2, "Rx: rx_cpu = %d, rx_dma = %d\n", 2499 rx->rx_cpu, rx->rx_dma); 2500 } 2501 2502 STATIC int 2503 mvxpe_rx_queue_select(struct mvxpe_softc *sc, uint32_t queues, int *queue) 2504 { 2505 uint32_t prxs, npkt; 2506 int q; 2507 2508 KASSERT_SC_MTX(sc); 2509 KASSERT(queue != NULL); 2510 DPRINTSC(sc, 2, "selecting rx queue\n"); 2511 2512 for (q = MVXPE_QUEUE_SIZE - 1; q >= 0; q--) { 2513 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2514 continue; 2515 2516 prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); 2517 npkt = MVXPE_PRXS_GET_ODC(prxs); 2518 if (npkt == 0) 2519 continue; 2520 2521 DPRINTSC(sc, 2, 2522 "queue %d selected: prxs=%#x, %u packet received.\n", 2523 q, prxs, npkt); 2524 *queue = q; 2525 mvxpe_rx_lockq(sc, q); 2526 return npkt; 2527 } 2528 2529 return 0; 2530 } 2531 2532 STATIC void 2533 mvxpe_rx_refill(struct mvxpe_softc *sc, uint32_t queues) 2534 { 2535 int q; 2536 2537 KASSERT_SC_MTX(sc); 2538 2539 /* XXX: check rx bit array */ 2540 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 2541 if (!MVXPE_IS_QUEUE_BUSY(queues, q)) 2542 continue; 2543 2544 mvxpe_rx_lockq(sc, q); 2545 mvxpe_rx_queue_refill(sc, q); 2546 mvxpe_rx_unlockq(sc, q); 2547 } 2548 } 2549 2550 STATIC void 2551 mvxpe_rx_queue_refill(struct mvxpe_softc *sc, int q) 2552 { 2553 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2554 uint32_t prxs, prxsu, ndesc; 2555 int idx, refill = 0; 2556 int npkt; 2557 2558 KASSERT_RX_MTX(sc, q); 2559 2560 prxs = MVXPE_READ(sc, MVXPE_PRXS(q)); 2561 ndesc = MVXPE_PRXS_GET_NODC(prxs) + MVXPE_PRXS_GET_ODC(prxs); 2562 refill = rx->rx_queue_len - ndesc; 2563 if (refill <= 0) 2564 return; 2565 DPRINTPRXS(2, q); 2566 DPRINTSC(sc, 2, "%d buffers to refill.\n", refill); 2567 2568 idx = rx->rx_cpu; 2569 for (npkt = 0; npkt < refill; npkt++) 2570 if (mvxpe_rx_queue_add(sc, q) != 0) 2571 break; 2572 DPRINTSC(sc, 2, "queue %d, %d buffer refilled.\n", q, npkt); 2573 if (npkt == 0) 2574 return; 2575 2576 mvxpe_ring_sync_rx(sc, q, idx, npkt, 2577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2578 2579 while (npkt > 255) { 2580 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(255); 2581 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2582 npkt -= 255; 2583 } 2584 if (npkt > 0) { 2585 prxsu = MVXPE_PRXSU_NOOFNEWDESCRIPTORS(npkt); 2586 MVXPE_WRITE(sc, MVXPE_PRXSU(q), prxsu); 2587 } 2588 DPRINTPRXS(2, q); 2589 return; 2590 } 2591 2592 STATIC int 2593 mvxpe_rx_queue_add(struct mvxpe_softc *sc, int q) 2594 { 2595 struct mvxpe_rx_ring *rx = MVXPE_RX_RING(sc, q); 2596 struct mvxpe_rx_desc *r; 2597 struct mvxpbm_chunk *chunk = NULL; 2598 2599 KASSERT_RX_MTX(sc, q); 2600 2601 /* Allocate the packet buffer */ 2602 chunk = mvxpbm_alloc(sc->sc_bm); 2603 if (chunk == NULL) { 2604 DPRINTSC(sc, 1, "BM chunk allocation failed.\n"); 2605 return ENOBUFS; 2606 } 2607 2608 /* Add the packet to descriptor */ 2609 KASSERT(MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) == NULL); 2610 MVXPE_RX_PKTBUF(sc, q, rx->rx_cpu) = chunk; 2611 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD); 2612 2613 r = MVXPE_RX_DESC(sc, q, rx->rx_cpu); 2614 r->bufptr = chunk->buf_pa; 2615 DPRINTSC(sc, 9, "chunk added to index %d\n", rx->rx_cpu); 2616 rx->rx_cpu = rx_counter_adv(rx->rx_cpu, 1); 2617 return 0; 2618 } 2619 2620 STATIC void 2621 mvxpe_rx_set_csumflag(struct ifnet *ifp, 2622 struct mvxpe_rx_desc *r, struct mbuf *m0) 2623 { 2624 uint32_t csum_flags = 0; 2625 2626 if ((r->status & (MVXPE_RX_IP_HEADER_OK | MVXPE_RX_L3_IP)) == 0) 2627 return; /* not a IP packet */ 2628 2629 /* L3 */ 2630 if (r->status & MVXPE_RX_L3_IP) { 2631 csum_flags |= M_CSUM_IPv4 & ifp->if_csum_flags_rx; 2632 if ((r->status & MVXPE_RX_IP_HEADER_OK) == 0 && 2633 (csum_flags & M_CSUM_IPv4) != 0) { 2634 csum_flags |= M_CSUM_IPv4_BAD; 2635 goto finish; 2636 } 2637 else if (r->status & MVXPE_RX_IPV4_FRAGMENT) { 2638 /* 2639 * r->l4chk has partial checksum of each fragment. 2640 * but there is no way to use it in NetBSD. 2641 */ 2642 return; 2643 } 2644 } 2645 2646 /* L4 */ 2647 switch (r->status & MVXPE_RX_L4_MASK) { 2648 case MVXPE_RX_L4_TCP: 2649 if (r->status & MVXPE_RX_L3_IP) 2650 csum_flags |= M_CSUM_TCPv4 & ifp->if_csum_flags_rx; 2651 else 2652 csum_flags |= M_CSUM_TCPv6 & ifp->if_csum_flags_rx; 2653 break; 2654 case MVXPE_RX_L4_UDP: 2655 if (r->status & MVXPE_RX_L3_IP) 2656 csum_flags |= M_CSUM_UDPv4 & ifp->if_csum_flags_rx; 2657 else 2658 csum_flags |= M_CSUM_UDPv6 & ifp->if_csum_flags_rx; 2659 break; 2660 case MVXPE_RX_L4_OTH: 2661 default: 2662 break; 2663 } 2664 if ((r->status & MVXPE_RX_L4_CHECKSUM_OK) == 0 && (csum_flags & 2665 (M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0) 2666 csum_flags |= M_CSUM_TCP_UDP_BAD; 2667 finish: 2668 m0->m_pkthdr.csum_flags = csum_flags; 2669 } 2670 2671 /* 2672 * MAC address filter 2673 */ 2674 STATIC uint8_t 2675 mvxpe_crc8(const uint8_t *data, size_t size) 2676 { 2677 int bit; 2678 uint8_t byte; 2679 uint8_t crc = 0; 2680 const uint8_t poly = 0x07; 2681 2682 while (size--) 2683 for (byte = *data++, bit = NBBY-1; bit >= 0; bit--) 2684 crc = (crc << 1) ^ ((((crc >> 7) ^ (byte >> bit)) & 1) ? poly : 0); 2685 2686 return crc; 2687 } 2688 2689 CTASSERT(MVXPE_NDFSMT == MVXPE_NDFOMT); 2690 2691 STATIC void 2692 mvxpe_filter_setup(struct mvxpe_softc *sc) 2693 { 2694 struct ethercom *ec = &sc->sc_ethercom; 2695 struct ifnet *ifp= &sc->sc_ethercom.ec_if; 2696 struct ether_multi *enm; 2697 struct ether_multistep step; 2698 uint32_t dfut[MVXPE_NDFUT], dfsmt[MVXPE_NDFSMT], dfomt[MVXPE_NDFOMT]; 2699 uint32_t pxc; 2700 int i; 2701 const uint8_t special[ETHER_ADDR_LEN] = {0x01,0x00,0x5e,0x00,0x00,0x00}; 2702 2703 KASSERT_SC_MTX(sc); 2704 2705 memset(dfut, 0, sizeof(dfut)); 2706 memset(dfsmt, 0, sizeof(dfsmt)); 2707 memset(dfomt, 0, sizeof(dfomt)); 2708 2709 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 2710 goto allmulti; 2711 } 2712 2713 ETHER_LOCK(ec); 2714 ETHER_FIRST_MULTI(step, ec, enm); 2715 while (enm != NULL) { 2716 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2717 /* ranges are complex and somewhat rare */ 2718 ETHER_UNLOCK(ec); 2719 goto allmulti; 2720 } 2721 /* chip handles some IPv4 multicast specially */ 2722 if (memcmp(enm->enm_addrlo, special, 5) == 0) { 2723 i = enm->enm_addrlo[5]; 2724 dfsmt[i>>2] |= 2725 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2726 } else { 2727 i = mvxpe_crc8(enm->enm_addrlo, ETHER_ADDR_LEN); 2728 dfomt[i>>2] |= 2729 MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2730 } 2731 2732 ETHER_NEXT_MULTI(step, enm); 2733 } 2734 ETHER_UNLOCK(ec); 2735 goto set; 2736 2737 allmulti: 2738 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 2739 for (i = 0; i < MVXPE_NDFSMT; i++) { 2740 dfsmt[i] = dfomt[i] = 2741 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2742 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2743 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2744 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2745 } 2746 } 2747 2748 set: 2749 pxc = MVXPE_READ(sc, MVXPE_PXC); 2750 pxc &= ~MVXPE_PXC_UPM; 2751 pxc |= MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP; 2752 if (ifp->if_flags & IFF_BROADCAST) { 2753 pxc &= ~(MVXPE_PXC_RB | MVXPE_PXC_RBIP | MVXPE_PXC_RBARP); 2754 } 2755 if (ifp->if_flags & IFF_PROMISC) { 2756 pxc |= MVXPE_PXC_UPM; 2757 } 2758 MVXPE_WRITE(sc, MVXPE_PXC, pxc); 2759 2760 /* Set Destination Address Filter Unicast Table */ 2761 if (ifp->if_flags & IFF_PROMISC) { 2762 /* pass all unicast addresses */ 2763 for (i = 0; i < MVXPE_NDFUT; i++) { 2764 dfut[i] = 2765 MVXPE_DF(0, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2766 MVXPE_DF(1, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2767 MVXPE_DF(2, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS) | 2768 MVXPE_DF(3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2769 } 2770 } 2771 else { 2772 i = sc->sc_enaddr[5] & 0xf; /* last nibble */ 2773 dfut[i>>2] = MVXPE_DF(i&3, MVXPE_DF_QUEUE(0) | MVXPE_DF_PASS); 2774 } 2775 MVXPE_WRITE_REGION(sc, MVXPE_DFUT(0), dfut, MVXPE_NDFUT); 2776 2777 /* Set Destination Address Filter Multicast Tables */ 2778 MVXPE_WRITE_REGION(sc, MVXPE_DFSMT(0), dfsmt, MVXPE_NDFSMT); 2779 MVXPE_WRITE_REGION(sc, MVXPE_DFOMT(0), dfomt, MVXPE_NDFOMT); 2780 } 2781 2782 /* 2783 * sysctl(9) 2784 */ 2785 SYSCTL_SETUP(sysctl_mvxpe, "sysctl mvxpe subtree setup") 2786 { 2787 int rc; 2788 const struct sysctlnode *node; 2789 2790 if ((rc = sysctl_createv(clog, 0, NULL, &node, 2791 0, CTLTYPE_NODE, "mvxpe", 2792 SYSCTL_DESCR("mvxpe interface controls"), 2793 NULL, 0, NULL, 0, 2794 CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 2795 goto err; 2796 } 2797 2798 mvxpe_root_num = node->sysctl_num; 2799 return; 2800 2801 err: 2802 aprint_error("%s: syctl_createv failed (rc = %d)\n", __func__, rc); 2803 } 2804 2805 STATIC int 2806 sysctl_read_mib(SYSCTLFN_ARGS) 2807 { 2808 struct mvxpe_sysctl_mib *arg; 2809 struct mvxpe_softc *sc; 2810 struct sysctlnode node; 2811 uint64_t val; 2812 int err; 2813 2814 node = *rnode; 2815 arg = (struct mvxpe_sysctl_mib *)rnode->sysctl_data; 2816 if (arg == NULL) 2817 return EINVAL; 2818 2819 sc = arg->sc; 2820 if (sc == NULL) 2821 return EINVAL; 2822 if (arg->index < 0 || arg->index > __arraycount(mvxpe_mib_list)) 2823 return EINVAL; 2824 2825 mvxpe_sc_lock(sc); 2826 val = arg->counter; 2827 mvxpe_sc_unlock(sc); 2828 2829 node.sysctl_data = &val; 2830 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2831 if (err) 2832 return err; 2833 if (newp) 2834 return EINVAL; 2835 2836 return 0; 2837 } 2838 2839 2840 STATIC int 2841 sysctl_clear_mib(SYSCTLFN_ARGS) 2842 { 2843 struct mvxpe_softc *sc; 2844 struct sysctlnode node; 2845 int val; 2846 int err; 2847 2848 node = *rnode; 2849 sc = (struct mvxpe_softc *)rnode->sysctl_data; 2850 if (sc == NULL) 2851 return EINVAL; 2852 2853 val = 0; 2854 node.sysctl_data = &val; 2855 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2856 if (err || newp == NULL) 2857 return err; 2858 if (val < 0 || val > 1) 2859 return EINVAL; 2860 if (val == 1) { 2861 mvxpe_sc_lock(sc); 2862 mvxpe_clear_mib(sc); 2863 mvxpe_sc_unlock(sc); 2864 } 2865 2866 return 0; 2867 } 2868 2869 STATIC int 2870 sysctl_set_queue_length(SYSCTLFN_ARGS) 2871 { 2872 struct mvxpe_sysctl_queue *arg; 2873 struct mvxpe_rx_ring *rx = NULL; 2874 struct mvxpe_tx_ring *tx = NULL; 2875 struct mvxpe_softc *sc; 2876 struct sysctlnode node; 2877 uint32_t reg; 2878 int val; 2879 int err; 2880 2881 node = *rnode; 2882 2883 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data; 2884 if (arg == NULL) 2885 return EINVAL; 2886 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT) 2887 return EINVAL; 2888 if (arg->rxtx != MVXPE_SYSCTL_RX && arg->rxtx != MVXPE_SYSCTL_TX) 2889 return EINVAL; 2890 2891 sc = arg->sc; 2892 if (sc == NULL) 2893 return EINVAL; 2894 2895 /* read queue length */ 2896 mvxpe_sc_lock(sc); 2897 switch (arg->rxtx) { 2898 case MVXPE_SYSCTL_RX: 2899 mvxpe_rx_lockq(sc, arg->queue); 2900 rx = MVXPE_RX_RING(sc, arg->queue); 2901 val = rx->rx_queue_len; 2902 mvxpe_rx_unlockq(sc, arg->queue); 2903 break; 2904 case MVXPE_SYSCTL_TX: 2905 mvxpe_tx_lockq(sc, arg->queue); 2906 tx = MVXPE_TX_RING(sc, arg->queue); 2907 val = tx->tx_queue_len; 2908 mvxpe_tx_unlockq(sc, arg->queue); 2909 break; 2910 } 2911 2912 node.sysctl_data = &val; 2913 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2914 if (err || newp == NULL) { 2915 mvxpe_sc_unlock(sc); 2916 return err; 2917 } 2918 2919 /* update queue length */ 2920 if (val < 8 || val > MVXPE_RX_RING_CNT) { 2921 mvxpe_sc_unlock(sc); 2922 return EINVAL; 2923 } 2924 switch (arg->rxtx) { 2925 case MVXPE_SYSCTL_RX: 2926 mvxpe_rx_lockq(sc, arg->queue); 2927 rx->rx_queue_len = val; 2928 rx->rx_queue_th_received = 2929 rx->rx_queue_len / MVXPE_RXTH_RATIO; 2930 rx->rx_queue_th_free = 2931 rx->rx_queue_len / MVXPE_RXTH_REFILL_RATIO; 2932 2933 reg = MVXPE_PRXDQTH_ODT(rx->rx_queue_th_received); 2934 reg |= MVXPE_PRXDQTH_NODT(rx->rx_queue_th_free); 2935 MVXPE_WRITE(sc, MVXPE_PRXDQTH(arg->queue), reg); 2936 2937 mvxpe_rx_unlockq(sc, arg->queue); 2938 break; 2939 case MVXPE_SYSCTL_TX: 2940 mvxpe_tx_lockq(sc, arg->queue); 2941 tx->tx_queue_len = val; 2942 tx->tx_queue_th_free = 2943 tx->tx_queue_len / MVXPE_TXTH_RATIO; 2944 2945 reg = MVXPE_PTXDQS_TBT(tx->tx_queue_th_free); 2946 reg |= MVXPE_PTXDQS_DQS(MVXPE_TX_RING_CNT); 2947 MVXPE_WRITE(sc, MVXPE_PTXDQS(arg->queue), reg); 2948 2949 mvxpe_tx_unlockq(sc, arg->queue); 2950 break; 2951 } 2952 mvxpe_sc_unlock(sc); 2953 2954 return 0; 2955 } 2956 2957 STATIC int 2958 sysctl_set_queue_rxthtime(SYSCTLFN_ARGS) 2959 { 2960 struct mvxpe_sysctl_queue *arg; 2961 struct mvxpe_rx_ring *rx = NULL; 2962 struct mvxpe_softc *sc; 2963 struct sysctlnode node; 2964 extern uint32_t mvTclk; 2965 uint32_t reg, time_mvtclk; 2966 int time_us; 2967 int err; 2968 2969 node = *rnode; 2970 2971 arg = (struct mvxpe_sysctl_queue *)rnode->sysctl_data; 2972 if (arg == NULL) 2973 return EINVAL; 2974 if (arg->queue < 0 || arg->queue > MVXPE_RX_RING_CNT) 2975 return EINVAL; 2976 if (arg->rxtx != MVXPE_SYSCTL_RX) 2977 return EINVAL; 2978 2979 sc = arg->sc; 2980 if (sc == NULL) 2981 return EINVAL; 2982 2983 /* read queue length */ 2984 mvxpe_sc_lock(sc); 2985 mvxpe_rx_lockq(sc, arg->queue); 2986 rx = MVXPE_RX_RING(sc, arg->queue); 2987 time_mvtclk = rx->rx_queue_th_time; 2988 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / mvTclk; 2989 node.sysctl_data = &time_us; 2990 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", 2991 arg->queue, MVXPE_READ(sc, MVXPE_PRXITTH(arg->queue))); 2992 err = sysctl_lookup(SYSCTLFN_CALL(&node)); 2993 if (err || newp == NULL) { 2994 mvxpe_rx_unlockq(sc, arg->queue); 2995 mvxpe_sc_unlock(sc); 2996 return err; 2997 } 2998 2999 /* update queue length (0[sec] - 1[sec]) */ 3000 if (time_us < 0 || time_us > (1000 * 1000)) { 3001 mvxpe_rx_unlockq(sc, arg->queue); 3002 mvxpe_sc_unlock(sc); 3003 return EINVAL; 3004 } 3005 time_mvtclk = 3006 (uint64_t)mvTclk * (uint64_t)time_us / (1000ULL * 1000ULL); 3007 rx->rx_queue_th_time = time_mvtclk; 3008 reg = MVXPE_PRXITTH_RITT(rx->rx_queue_th_time); 3009 MVXPE_WRITE(sc, MVXPE_PRXITTH(arg->queue), reg); 3010 DPRINTSC(sc, 1, "RXITTH(%d) => %#x\n", arg->queue, reg); 3011 mvxpe_rx_unlockq(sc, arg->queue); 3012 mvxpe_sc_unlock(sc); 3013 3014 return 0; 3015 } 3016 3017 3018 STATIC void 3019 sysctl_mvxpe_init(struct mvxpe_softc *sc) 3020 { 3021 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3022 const struct sysctlnode *node; 3023 int mvxpe_nodenum; 3024 int mvxpe_mibnum; 3025 int mvxpe_rxqueuenum; 3026 int mvxpe_txqueuenum; 3027 int q, i; 3028 3029 /* hw.mvxpe.mvxpe[unit] */ 3030 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3031 0, CTLTYPE_NODE, ifp->if_xname, 3032 SYSCTL_DESCR("mvxpe per-controller controls"), 3033 NULL, 0, NULL, 0, 3034 CTL_HW, mvxpe_root_num, CTL_CREATE, 3035 CTL_EOL) != 0) { 3036 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3037 return; 3038 } 3039 mvxpe_nodenum = node->sysctl_num; 3040 3041 /* hw.mvxpe.mvxpe[unit].mib */ 3042 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3043 0, CTLTYPE_NODE, "mib", 3044 SYSCTL_DESCR("mvxpe per-controller MIB counters"), 3045 NULL, 0, NULL, 0, 3046 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, 3047 CTL_EOL) != 0) { 3048 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3049 return; 3050 } 3051 mvxpe_mibnum = node->sysctl_num; 3052 3053 /* hw.mvxpe.mvxpe[unit].rx */ 3054 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3055 0, CTLTYPE_NODE, "rx", 3056 SYSCTL_DESCR("Rx Queues"), 3057 NULL, 0, NULL, 0, 3058 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) { 3059 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3060 return; 3061 } 3062 mvxpe_rxqueuenum = node->sysctl_num; 3063 3064 /* hw.mvxpe.mvxpe[unit].tx */ 3065 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3066 0, CTLTYPE_NODE, "tx", 3067 SYSCTL_DESCR("Tx Queues"), 3068 NULL, 0, NULL, 0, 3069 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, CTL_EOL) != 0) { 3070 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3071 return; 3072 } 3073 mvxpe_txqueuenum = node->sysctl_num; 3074 3075 #ifdef MVXPE_DEBUG 3076 /* hw.mvxpe.debug */ 3077 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3078 CTLFLAG_READWRITE, CTLTYPE_INT, "debug", 3079 SYSCTL_DESCR("mvxpe device driver debug control"), 3080 NULL, 0, &mvxpe_debug, 0, 3081 CTL_HW, mvxpe_root_num, CTL_CREATE, CTL_EOL) != 0) { 3082 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3083 return; 3084 } 3085 #endif 3086 /* 3087 * MIB access 3088 */ 3089 /* hw.mvxpe.mvxpe[unit].mib.<mibs> */ 3090 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3091 const char *name = mvxpe_mib_list[i].sysctl_name; 3092 const char *desc = mvxpe_mib_list[i].desc; 3093 struct mvxpe_sysctl_mib *mib_arg = &sc->sc_sysctl_mib[i]; 3094 3095 mib_arg->sc = sc; 3096 mib_arg->index = i; 3097 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3098 CTLFLAG_READONLY, CTLTYPE_QUAD, name, desc, 3099 sysctl_read_mib, 0, (void *)mib_arg, 0, 3100 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_mibnum, 3101 CTL_CREATE, CTL_EOL) != 0) { 3102 aprint_normal_dev(sc->sc_dev, 3103 "couldn't create sysctl node\n"); 3104 break; 3105 } 3106 } 3107 3108 for (q = 0; q < MVXPE_QUEUE_SIZE; q++) { 3109 struct mvxpe_sysctl_queue *rxarg = &sc->sc_sysctl_rx_queue[q]; 3110 struct mvxpe_sysctl_queue *txarg = &sc->sc_sysctl_tx_queue[q]; 3111 #define MVXPE_SYSCTL_NAME(num) "queue" # num 3112 static const char *sysctl_queue_names[] = { 3113 MVXPE_SYSCTL_NAME(0), MVXPE_SYSCTL_NAME(1), 3114 MVXPE_SYSCTL_NAME(2), MVXPE_SYSCTL_NAME(3), 3115 MVXPE_SYSCTL_NAME(4), MVXPE_SYSCTL_NAME(5), 3116 MVXPE_SYSCTL_NAME(6), MVXPE_SYSCTL_NAME(7), 3117 }; 3118 #undef MVXPE_SYSCTL_NAME 3119 #ifdef SYSCTL_INCLUDE_DESCR 3120 #define MVXPE_SYSCTL_DESCR(num) "configuration parameters for queue " # num 3121 static const char *sysctl_queue_descrs[] = { 3122 MVXPE_SYSCTL_DESCR(0), MVXPE_SYSCTL_DESCR(1), 3123 MVXPE_SYSCTL_DESCR(2), MVXPE_SYSCTL_DESCR(3), 3124 MVXPE_SYSCTL_DESCR(4), MVXPE_SYSCTL_DESCR(5), 3125 MVXPE_SYSCTL_DESCR(6), MVXPE_SYSCTL_DESCR(7), 3126 }; 3127 #undef MVXPE_SYSCTL_DESCR 3128 #endif /* SYSCTL_INCLUDE_DESCR */ 3129 int mvxpe_curnum; 3130 3131 rxarg->sc = txarg->sc = sc; 3132 rxarg->queue = txarg->queue = q; 3133 rxarg->rxtx = MVXPE_SYSCTL_RX; 3134 txarg->rxtx = MVXPE_SYSCTL_TX; 3135 3136 /* hw.mvxpe.mvxpe[unit].rx.[queue] */ 3137 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3138 0, CTLTYPE_NODE, 3139 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descrs[q]), 3140 NULL, 0, NULL, 0, 3141 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3142 CTL_CREATE, CTL_EOL) != 0) { 3143 aprint_normal_dev(sc->sc_dev, 3144 "couldn't create sysctl node\n"); 3145 break; 3146 } 3147 mvxpe_curnum = node->sysctl_num; 3148 3149 /* hw.mvxpe.mvxpe[unit].rx.[queue].length */ 3150 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3151 CTLFLAG_READWRITE, CTLTYPE_INT, "length", 3152 SYSCTL_DESCR("maximum length of the queue"), 3153 sysctl_set_queue_length, 0, (void *)rxarg, 0, 3154 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3155 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3156 aprint_normal_dev(sc->sc_dev, 3157 "couldn't create sysctl node\n"); 3158 break; 3159 } 3160 3161 /* hw.mvxpe.mvxpe[unit].rx.[queue].threshold_timer_us */ 3162 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3163 CTLFLAG_READWRITE, CTLTYPE_INT, "threshold_timer_us", 3164 SYSCTL_DESCR("interrupt coalescing threshold timer [us]"), 3165 sysctl_set_queue_rxthtime, 0, (void *)rxarg, 0, 3166 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_rxqueuenum, 3167 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3168 aprint_normal_dev(sc->sc_dev, 3169 "couldn't create sysctl node\n"); 3170 break; 3171 } 3172 3173 /* hw.mvxpe.mvxpe[unit].tx.[queue] */ 3174 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3175 0, CTLTYPE_NODE, 3176 sysctl_queue_names[q], SYSCTL_DESCR(sysctl_queue_descs[q]), 3177 NULL, 0, NULL, 0, 3178 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum, 3179 CTL_CREATE, CTL_EOL) != 0) { 3180 aprint_normal_dev(sc->sc_dev, 3181 "couldn't create sysctl node\n"); 3182 break; 3183 } 3184 mvxpe_curnum = node->sysctl_num; 3185 3186 /* hw.mvxpe.mvxpe[unit].tx.length[queue] */ 3187 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3188 CTLFLAG_READWRITE, CTLTYPE_INT, "length", 3189 SYSCTL_DESCR("maximum length of the queue"), 3190 sysctl_set_queue_length, 0, (void *)txarg, 0, 3191 CTL_HW, mvxpe_root_num, mvxpe_nodenum, mvxpe_txqueuenum, 3192 mvxpe_curnum, CTL_CREATE, CTL_EOL) != 0) { 3193 aprint_normal_dev(sc->sc_dev, 3194 "couldn't create sysctl node\n"); 3195 break; 3196 } 3197 } 3198 3199 /* hw.mvxpe.mvxpe[unit].clear_mib */ 3200 if (sysctl_createv(&sc->sc_mvxpe_clog, 0, NULL, &node, 3201 CTLFLAG_READWRITE, CTLTYPE_INT, "clear_mib", 3202 SYSCTL_DESCR("mvxpe device driver debug control"), 3203 sysctl_clear_mib, 0, (void *)sc, 0, 3204 CTL_HW, mvxpe_root_num, mvxpe_nodenum, CTL_CREATE, 3205 CTL_EOL) != 0) { 3206 aprint_normal_dev(sc->sc_dev, "couldn't create sysctl node\n"); 3207 return; 3208 } 3209 3210 } 3211 3212 /* 3213 * MIB 3214 */ 3215 STATIC void 3216 mvxpe_clear_mib(struct mvxpe_softc *sc) 3217 { 3218 int i; 3219 3220 KASSERT_SC_MTX(sc); 3221 3222 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3223 if (mvxpe_mib_list[i].reg64) 3224 MVXPE_READ_MIB(sc, (mvxpe_mib_list[i].regnum + 4)); 3225 MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3226 sc->sc_sysctl_mib[i].counter = 0; 3227 } 3228 } 3229 3230 STATIC void 3231 mvxpe_update_mib(struct mvxpe_softc *sc) 3232 { 3233 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3234 int i; 3235 3236 KASSERT_SC_MTX(sc); 3237 3238 for (i = 0; i < __arraycount(mvxpe_mib_list); i++) { 3239 uint32_t val_hi; 3240 uint32_t val_lo; 3241 uint64_t val; 3242 3243 if (mvxpe_mib_list[i].reg64) { 3244 /* XXX: implement bus_space_read_8() */ 3245 val_lo = MVXPE_READ_MIB(sc, 3246 (mvxpe_mib_list[i].regnum + 4)); 3247 val_hi = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3248 } 3249 else { 3250 val_lo = MVXPE_READ_MIB(sc, mvxpe_mib_list[i].regnum); 3251 val_hi = 0; 3252 } 3253 3254 if ((val_lo | val_hi) == 0) 3255 continue; 3256 3257 val = ((uint64_t)val_hi << 32) | (uint64_t)val_lo; 3258 sc->sc_sysctl_mib[i].counter += val; 3259 3260 switch (mvxpe_mib_list[i].ext) { 3261 case MVXPE_MIBEXT_IF_OERRORS: 3262 if_statadd(ifp, if_oerrors, val); 3263 break; 3264 case MVXPE_MIBEXT_IF_IERRORS: 3265 if_statadd(ifp, if_ierrors, val); 3266 break; 3267 case MVXPE_MIBEXT_IF_COLLISIONS: 3268 if_statadd(ifp, if_collisions, val); 3269 break; 3270 default: 3271 break; 3272 } 3273 3274 } 3275 } 3276 3277 /* 3278 * for Debug 3279 */ 3280 STATIC void 3281 mvxpe_dump_txdesc(struct mvxpe_tx_desc *desc, int idx) 3282 { 3283 #define DESC_PRINT(X) \ 3284 if (X) \ 3285 printf("txdesc[%d]." #X "=%#x\n", idx, X); 3286 3287 DESC_PRINT(desc->command); 3288 DESC_PRINT(desc->l4ichk); 3289 DESC_PRINT(desc->bytecnt); 3290 DESC_PRINT(desc->bufptr); 3291 DESC_PRINT(desc->flags); 3292 #undef DESC_PRINT 3293 } 3294 3295 STATIC void 3296 mvxpe_dump_rxdesc(struct mvxpe_rx_desc *desc, int idx) 3297 { 3298 #define DESC_PRINT(X) \ 3299 if (X) \ 3300 printf("rxdesc[%d]." #X "=%#x\n", idx, X); 3301 3302 DESC_PRINT(desc->status); 3303 DESC_PRINT(desc->bytecnt); 3304 DESC_PRINT(desc->bufptr); 3305 DESC_PRINT(desc->l4chk); 3306 #undef DESC_PRINT 3307 } 3308