1 1.61 andvar /* $NetBSD: pq3etsec.c,v 1.61 2025/01/07 17:39:45 andvar Exp $ */ 2 1.2 matt /*- 3 1.2 matt * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 1.2 matt * All rights reserved. 5 1.2 matt * 6 1.2 matt * This code is derived from software contributed to The NetBSD Foundation 7 1.2 matt * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8 1.2 matt * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9 1.2 matt * 10 1.2 matt * This material is based upon work supported by the Defense Advanced Research 11 1.2 matt * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12 1.2 matt * Contract No. N66001-09-C-2073. 13 1.2 matt * Approved for Public Release, Distribution Unlimited 14 1.2 matt * 15 1.2 matt * Redistribution and use in source and binary forms, with or without 16 1.2 matt * modification, are permitted provided that the following conditions 17 1.2 matt * are met: 18 1.2 matt * 1. Redistributions of source code must retain the above copyright 19 1.2 matt * notice, this list of conditions and the following disclaimer. 20 1.2 matt * 2. Redistributions in binary form must reproduce the above copyright 21 1.2 matt * notice, this list of conditions and the following disclaimer in the 22 1.2 matt * documentation and/or other materials provided with the distribution. 23 1.2 matt * 24 1.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 1.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 1.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 1.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 1.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 1.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 1.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 1.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 1.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 1.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 1.2 matt * POSSIBILITY OF SUCH DAMAGE. 35 1.2 matt */ 36 1.2 matt 37 1.50 rin #include <sys/cdefs.h> 38 1.61 andvar __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.61 2025/01/07 17:39:45 andvar Exp $"); 39 1.50 rin 40 1.50 rin #ifdef _KERNEL_OPT 41 1.2 matt #include "opt_inet.h" 42 1.15 matt #include "opt_mpc85xx.h" 43 1.24 nonaka #include "opt_multiprocessor.h" 44 1.24 nonaka #include "opt_net_mpsafe.h" 45 1.50 rin #endif 46 1.7 matt 47 1.2 matt #include <sys/param.h> 48 1.2 matt #include <sys/cpu.h> 49 1.2 matt #include <sys/device.h> 50 1.2 matt #include <sys/mbuf.h> 51 1.2 matt #include <sys/ioctl.h> 52 1.2 matt #include <sys/intr.h> 53 1.2 matt #include <sys/bus.h> 54 1.2 matt #include <sys/kernel.h> 55 1.2 matt #include <sys/kmem.h> 56 1.2 matt #include <sys/proc.h> 57 1.2 matt #include <sys/atomic.h> 58 1.2 matt #include <sys/callout.h> 59 1.25 nonaka #include <sys/sysctl.h> 60 1.2 matt 61 1.52 rin #include <sys/rndsource.h> 62 1.52 rin 63 1.2 matt #include <net/if.h> 64 1.2 matt #include <net/if_dl.h> 65 1.2 matt #include <net/if_ether.h> 66 1.2 matt #include <net/if_media.h> 67 1.43 msaitoh #include <net/bpf.h> 68 1.2 matt 69 1.2 matt #include <dev/mii/miivar.h> 70 1.2 matt 71 1.2 matt #ifdef INET 72 1.2 matt #include <netinet/in.h> 73 1.2 matt #include <netinet/in_systm.h> 74 1.2 matt #include <netinet/ip.h> 75 1.2 matt #include <netinet/in_offload.h> 76 1.2 matt #endif /* INET */ 77 1.2 matt #ifdef INET6 78 1.2 matt #include <netinet6/in6.h> 79 1.2 matt #include <netinet/ip6.h> 80 1.2 matt #endif 81 1.2 matt #include <netinet6/in6_offload.h> 82 1.2 matt 83 1.2 matt #include <powerpc/spr.h> 84 1.2 matt #include <powerpc/booke/spr.h> 85 1.2 matt #include <powerpc/booke/cpuvar.h> 86 1.2 matt #include <powerpc/booke/e500var.h> 87 1.2 matt #include <powerpc/booke/e500reg.h> 88 1.2 matt #include <powerpc/booke/etsecreg.h> 89 1.2 matt 90 1.2 matt #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 91 1.2 matt 92 1.2 matt #define ETSEC_MAXTXMBUFS 30 93 1.2 matt #define ETSEC_NTXSEGS 30 94 1.2 matt #define ETSEC_MAXRXMBUFS 511 95 1.2 matt #define ETSEC_MINRXMBUFS 32 96 1.2 matt #define ETSEC_NRXSEGS 1 97 1.2 matt 98 1.2 matt #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 99 1.44 msaitoh #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx \ 100 1.44 msaitoh | IFCAP_CSUM_UDPv4_Rx \ 101 1.44 msaitoh | IFCAP_CSUM_TCPv6_Rx \ 102 1.44 msaitoh | IFCAP_CSUM_UDPv6_Rx) 103 1.2 matt 104 1.2 matt #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 105 1.44 msaitoh #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx \ 106 1.44 msaitoh | IFCAP_CSUM_UDPv4_Tx \ 107 1.44 msaitoh | IFCAP_CSUM_TCPv6_Tx \ 108 1.44 msaitoh | IFCAP_CSUM_UDPv6_Tx) 109 1.44 msaitoh 110 1.44 msaitoh #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN | IFCAP_RCTRL_TUCSEN \ 111 1.44 msaitoh | IFCAP_TCTRL_IPCSEN | IFCAP_TCTRL_TUCSEN) 112 1.44 msaitoh 113 1.44 msaitoh #define M_CSUM_IP (M_CSUM_CIP | M_CSUM_CTU) 114 1.44 msaitoh #define M_CSUM_IP6 (M_CSUM_TCPv6 | M_CSUM_UDPv6) 115 1.44 msaitoh #define M_CSUM_TUP (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 116 1.44 msaitoh #define M_CSUM_UDP (M_CSUM_UDPv4 | M_CSUM_UDPv6) 117 1.44 msaitoh #define M_CSUM_IP4 (M_CSUM_IPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4) 118 1.44 msaitoh #define M_CSUM_CIP (M_CSUM_IPv4) 119 1.44 msaitoh #define M_CSUM_CTU (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 120 1.2 matt 121 1.2 matt struct pq3etsec_txqueue { 122 1.2 matt bus_dmamap_t txq_descmap; 123 1.2 matt volatile struct txbd *txq_consumer; 124 1.2 matt volatile struct txbd *txq_producer; 125 1.2 matt volatile struct txbd *txq_first; 126 1.2 matt volatile struct txbd *txq_last; 127 1.2 matt struct ifqueue txq_mbufs; 128 1.2 matt struct mbuf *txq_next; 129 1.2 matt #ifdef ETSEC_DEBUG 130 1.2 matt struct mbuf *txq_lmbufs[512]; 131 1.2 matt #endif 132 1.2 matt uint32_t txq_qmask; 133 1.2 matt uint32_t txq_free; 134 1.2 matt uint32_t txq_threshold; 135 1.2 matt uint32_t txq_lastintr; 136 1.2 matt bus_size_t txq_reg_tbase; 137 1.2 matt bus_dma_segment_t txq_descmap_seg; 138 1.2 matt }; 139 1.2 matt 140 1.2 matt struct pq3etsec_rxqueue { 141 1.2 matt bus_dmamap_t rxq_descmap; 142 1.2 matt volatile struct rxbd *rxq_consumer; 143 1.2 matt volatile struct rxbd *rxq_producer; 144 1.2 matt volatile struct rxbd *rxq_first; 145 1.2 matt volatile struct rxbd *rxq_last; 146 1.2 matt struct mbuf *rxq_mhead; 147 1.2 matt struct mbuf **rxq_mtail; 148 1.2 matt struct mbuf *rxq_mconsumer; 149 1.2 matt #ifdef ETSEC_DEBUG 150 1.2 matt struct mbuf *rxq_mbufs[512]; 151 1.2 matt #endif 152 1.2 matt uint32_t rxq_qmask; 153 1.2 matt uint32_t rxq_inuse; 154 1.2 matt uint32_t rxq_threshold; 155 1.2 matt bus_size_t rxq_reg_rbase; 156 1.2 matt bus_size_t rxq_reg_rbptr; 157 1.2 matt bus_dma_segment_t rxq_descmap_seg; 158 1.2 matt }; 159 1.2 matt 160 1.2 matt struct pq3etsec_mapcache { 161 1.2 matt u_int dmc_nmaps; 162 1.2 matt u_int dmc_maxseg; 163 1.2 matt u_int dmc_maxmaps; 164 1.2 matt u_int dmc_maxmapsize; 165 1.2 matt bus_dmamap_t dmc_maps[0]; 166 1.2 matt }; 167 1.2 matt 168 1.2 matt struct pq3etsec_softc { 169 1.2 matt device_t sc_dev; 170 1.16 matt device_t sc_mdio_dev; 171 1.2 matt struct ethercom sc_ec; 172 1.2 matt #define sc_if sc_ec.ec_if 173 1.2 matt struct mii_data sc_mii; 174 1.2 matt bus_space_tag_t sc_bst; 175 1.2 matt bus_space_handle_t sc_bsh; 176 1.15 matt bus_space_handle_t sc_mdio_bsh; 177 1.2 matt bus_dma_tag_t sc_dmat; 178 1.2 matt int sc_phy_addr; 179 1.2 matt prop_dictionary_t sc_intrmap; 180 1.2 matt uint32_t sc_intrmask; 181 1.2 matt 182 1.2 matt uint32_t sc_soft_flags; 183 1.2 matt #define SOFT_RESET 0x0001 184 1.2 matt #define SOFT_RXINTR 0x0010 185 1.2 matt #define SOFT_RXBSY 0x0020 186 1.2 matt #define SOFT_TXINTR 0x0100 187 1.2 matt #define SOFT_TXERROR 0x0200 188 1.2 matt 189 1.2 matt struct pq3etsec_txqueue sc_txq; 190 1.2 matt struct pq3etsec_rxqueue sc_rxq; 191 1.2 matt uint32_t sc_txerrors; 192 1.2 matt uint32_t sc_rxerrors; 193 1.2 matt 194 1.2 matt size_t sc_rx_adjlen; 195 1.2 matt 196 1.2 matt /* 197 1.2 matt * Copies of various ETSEC registers. 198 1.2 matt */ 199 1.2 matt uint32_t sc_imask; 200 1.2 matt uint32_t sc_maccfg1; 201 1.2 matt uint32_t sc_maccfg2; 202 1.2 matt uint32_t sc_maxfrm; 203 1.2 matt uint32_t sc_ecntrl; 204 1.2 matt uint32_t sc_dmactrl; 205 1.2 matt uint32_t sc_macstnaddr1; 206 1.2 matt uint32_t sc_macstnaddr2; 207 1.2 matt uint32_t sc_tctrl; 208 1.2 matt uint32_t sc_rctrl; 209 1.2 matt uint32_t sc_gaddr[16]; 210 1.2 matt uint64_t sc_macaddrs[15]; 211 1.2 matt 212 1.2 matt void *sc_tx_ih; 213 1.2 matt void *sc_rx_ih; 214 1.2 matt void *sc_error_ih; 215 1.2 matt void *sc_soft_ih; 216 1.2 matt 217 1.2 matt kmutex_t *sc_lock; 218 1.23 nonaka kmutex_t *sc_hwlock; 219 1.2 matt 220 1.2 matt struct evcnt sc_ev_tx_stall; 221 1.2 matt struct evcnt sc_ev_tx_intr; 222 1.2 matt struct evcnt sc_ev_rx_stall; 223 1.2 matt struct evcnt sc_ev_rx_intr; 224 1.2 matt struct evcnt sc_ev_error_intr; 225 1.2 matt struct evcnt sc_ev_soft_intr; 226 1.2 matt struct evcnt sc_ev_tx_pause; 227 1.2 matt struct evcnt sc_ev_rx_pause; 228 1.2 matt struct evcnt sc_ev_mii_ticks; 229 1.2 matt 230 1.2 matt struct callout sc_mii_callout; 231 1.2 matt uint64_t sc_mii_last_tick; 232 1.2 matt 233 1.2 matt struct ifqueue sc_rx_bufcache; 234 1.43 msaitoh struct pq3etsec_mapcache *sc_rx_mapcache; 235 1.43 msaitoh struct pq3etsec_mapcache *sc_tx_mapcache; 236 1.25 nonaka 237 1.25 nonaka /* Interrupt Coalescing parameters */ 238 1.25 nonaka int sc_ic_rx_time; 239 1.25 nonaka int sc_ic_rx_count; 240 1.25 nonaka int sc_ic_tx_time; 241 1.25 nonaka int sc_ic_tx_count; 242 1.52 rin 243 1.52 rin krndsource_t rnd_source; 244 1.2 matt }; 245 1.2 matt 246 1.25 nonaka #define ETSEC_IC_RX_ENABLED(sc) \ 247 1.25 nonaka ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 248 1.25 nonaka #define ETSEC_IC_TX_ENABLED(sc) \ 249 1.25 nonaka ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 250 1.25 nonaka 251 1.16 matt struct pq3mdio_softc { 252 1.16 matt device_t mdio_dev; 253 1.16 matt 254 1.16 matt bus_space_tag_t mdio_bst; 255 1.16 matt bus_space_handle_t mdio_bsh; 256 1.16 matt }; 257 1.16 matt 258 1.2 matt static int pq3etsec_match(device_t, cfdata_t, void *); 259 1.2 matt static void pq3etsec_attach(device_t, device_t, void *); 260 1.2 matt 261 1.16 matt static int pq3mdio_match(device_t, cfdata_t, void *); 262 1.16 matt static void pq3mdio_attach(device_t, device_t, void *); 263 1.16 matt 264 1.2 matt static void pq3etsec_ifstart(struct ifnet *); 265 1.2 matt static void pq3etsec_ifwatchdog(struct ifnet *); 266 1.2 matt static int pq3etsec_ifinit(struct ifnet *); 267 1.2 matt static void pq3etsec_ifstop(struct ifnet *, int); 268 1.2 matt static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 269 1.2 matt 270 1.2 matt static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 271 1.10 matt struct pq3etsec_mapcache **, size_t, size_t, size_t); 272 1.2 matt static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 273 1.2 matt struct pq3etsec_mapcache *); 274 1.2 matt static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 275 1.2 matt struct pq3etsec_mapcache *); 276 1.2 matt static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 277 1.2 matt struct pq3etsec_mapcache *, bus_dmamap_t); 278 1.2 matt 279 1.2 matt static int pq3etsec_txq_attach(struct pq3etsec_softc *, 280 1.2 matt struct pq3etsec_txqueue *, u_int); 281 1.2 matt static void pq3etsec_txq_purge(struct pq3etsec_softc *, 282 1.2 matt struct pq3etsec_txqueue *); 283 1.2 matt static void pq3etsec_txq_reset(struct pq3etsec_softc *, 284 1.2 matt struct pq3etsec_txqueue *); 285 1.2 matt static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 286 1.2 matt struct pq3etsec_txqueue *); 287 1.2 matt static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 288 1.2 matt struct pq3etsec_txqueue *, struct mbuf *m); 289 1.2 matt static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 290 1.2 matt struct pq3etsec_txqueue *); 291 1.2 matt 292 1.2 matt static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 293 1.2 matt struct pq3etsec_rxqueue *, u_int); 294 1.2 matt static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 295 1.2 matt struct pq3etsec_rxqueue *); 296 1.2 matt static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 297 1.2 matt struct pq3etsec_rxqueue *, bool); 298 1.2 matt static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 299 1.2 matt struct pq3etsec_rxqueue *); 300 1.2 matt 301 1.2 matt static void pq3etsec_mc_setup(struct pq3etsec_softc *); 302 1.2 matt 303 1.2 matt static void pq3etsec_mii_tick(void *); 304 1.2 matt static int pq3etsec_rx_intr(void *); 305 1.2 matt static int pq3etsec_tx_intr(void *); 306 1.2 matt static int pq3etsec_error_intr(void *); 307 1.2 matt static void pq3etsec_soft_intr(void *); 308 1.2 matt 309 1.25 nonaka static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 310 1.25 nonaka static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 311 1.25 nonaka 312 1.25 nonaka static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 313 1.25 nonaka 314 1.2 matt CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 315 1.2 matt pq3etsec_match, pq3etsec_attach, NULL, NULL); 316 1.2 matt 317 1.16 matt CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 318 1.16 matt pq3mdio_match, pq3mdio_attach, NULL, NULL); 319 1.16 matt 320 1.16 matt CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 321 1.16 matt pq3mdio_match, pq3mdio_attach, NULL, NULL); 322 1.16 matt 323 1.16 matt static inline uint32_t 324 1.16 matt etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 325 1.2 matt { 326 1.16 matt return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 327 1.16 matt } 328 1.2 matt 329 1.16 matt static inline void 330 1.16 matt etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 331 1.16 matt { 332 1.16 matt bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 333 1.2 matt } 334 1.2 matt 335 1.2 matt static inline uint32_t 336 1.2 matt etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 337 1.2 matt { 338 1.2 matt return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 339 1.2 matt } 340 1.2 matt 341 1.16 matt static int 342 1.16 matt pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 343 1.2 matt { 344 1.16 matt return strcmp(cf->cf_name, "mdio") == 0; 345 1.2 matt } 346 1.2 matt 347 1.16 matt static int 348 1.16 matt pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 349 1.15 matt { 350 1.16 matt const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 351 1.16 matt const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 352 1.16 matt || svr == (SVR_P1016v1 >> 16)); 353 1.16 matt 354 1.16 matt if (device_is_a(parent, "cpunode")) { 355 1.18 nonaka if (!p1025_p 356 1.16 matt || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 357 1.16 matt return 0; 358 1.16 matt 359 1.16 matt return 1; 360 1.16 matt } 361 1.16 matt 362 1.16 matt if (device_is_a(parent, "tsec")) { 363 1.18 nonaka if (p1025_p 364 1.16 matt || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 365 1.16 matt return 0; 366 1.16 matt 367 1.16 matt return 1; 368 1.16 matt } 369 1.16 matt 370 1.16 matt return 0; 371 1.15 matt } 372 1.15 matt 373 1.16 matt static void 374 1.16 matt pq3mdio_attach(device_t parent, device_t self, void *aux) 375 1.15 matt { 376 1.16 matt struct pq3mdio_softc * const mdio = device_private(self); 377 1.16 matt struct cpunode_attach_args * const cna = aux; 378 1.16 matt struct cpunode_locators * const cnl = &cna->cna_locs; 379 1.16 matt 380 1.16 matt mdio->mdio_dev = self; 381 1.16 matt 382 1.16 matt if (device_is_a(parent, "cpunode")) { 383 1.16 matt struct cpunode_softc * const psc = device_private(parent); 384 1.16 matt psc->sc_children |= cna->cna_childmask; 385 1.16 matt 386 1.16 matt mdio->mdio_bst = cna->cna_memt; 387 1.16 matt if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 388 1.16 matt cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 389 1.16 matt aprint_error(": error mapping registers @ %#x\n", 390 1.16 matt cnl->cnl_addr); 391 1.16 matt return; 392 1.16 matt } 393 1.16 matt } else { 394 1.16 matt struct pq3etsec_softc * const sc = device_private(parent); 395 1.16 matt 396 1.16 matt KASSERT(device_is_a(parent, "tsec")); 397 1.16 matt KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 398 1.16 matt || cnl->cnl_addr == ETSEC2_BASE 399 1.16 matt || cnl->cnl_addr == ETSEC3_BASE 400 1.16 matt || cnl->cnl_addr == ETSEC4_BASE, 401 1.16 matt "unknown tsec addr %x", cnl->cnl_addr); 402 1.16 matt 403 1.16 matt mdio->mdio_bst = sc->sc_bst; 404 1.16 matt mdio->mdio_bsh = sc->sc_bsh; 405 1.16 matt } 406 1.16 matt 407 1.16 matt aprint_normal("\n"); 408 1.15 matt } 409 1.15 matt 410 1.2 matt static int 411 1.40 msaitoh pq3mdio_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 412 1.2 matt { 413 1.16 matt struct pq3mdio_softc * const mdio = device_private(self); 414 1.16 matt uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 415 1.2 matt 416 1.16 matt etsec_mdio_write(mdio, MIIMADD, 417 1.2 matt __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 418 1.2 matt 419 1.16 matt etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 420 1.16 matt etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 421 1.2 matt 422 1.16 matt while (etsec_mdio_read(mdio, MIIMIND) != 0) { 423 1.2 matt delay(1); 424 1.2 matt } 425 1.40 msaitoh *val = etsec_mdio_read(mdio, MIIMSTAT) &0xffff; 426 1.2 matt 427 1.2 matt if (miimcom == MIIMCOM_SCAN) 428 1.16 matt etsec_mdio_write(mdio, MIIMCOM, miimcom); 429 1.2 matt 430 1.2 matt #if 0 431 1.16 matt aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 432 1.2 matt __func__, phy, reg, data); 433 1.2 matt #endif 434 1.40 msaitoh return 0; 435 1.2 matt } 436 1.2 matt 437 1.40 msaitoh static int 438 1.40 msaitoh pq3mdio_mii_writereg(device_t self, int phy, int reg, uint16_t data) 439 1.2 matt { 440 1.16 matt struct pq3mdio_softc * const mdio = device_private(self); 441 1.16 matt uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 442 1.2 matt 443 1.2 matt #if 0 444 1.16 matt aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 445 1.2 matt __func__, phy, reg, data); 446 1.2 matt #endif 447 1.2 matt 448 1.16 matt etsec_mdio_write(mdio, MIIMADD, 449 1.2 matt __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 450 1.16 matt etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 451 1.16 matt etsec_mdio_write(mdio, MIIMCON, data); 452 1.2 matt 453 1.2 matt int timo = 1000; /* 1ms */ 454 1.16 matt while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 455 1.2 matt delay(1); 456 1.2 matt } 457 1.2 matt 458 1.2 matt if (miimcom == MIIMCOM_SCAN) 459 1.16 matt etsec_mdio_write(mdio, MIIMCOM, miimcom); 460 1.16 matt 461 1.40 msaitoh return 0; 462 1.16 matt } 463 1.16 matt 464 1.16 matt static inline void 465 1.16 matt etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 466 1.16 matt { 467 1.16 matt bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 468 1.2 matt } 469 1.2 matt 470 1.2 matt static void 471 1.16 matt pq3etsec_mii_statchg(struct ifnet *ifp) 472 1.2 matt { 473 1.16 matt struct pq3etsec_softc * const sc = ifp->if_softc; 474 1.2 matt struct mii_data * const mii = &sc->sc_mii; 475 1.2 matt 476 1.2 matt uint32_t maccfg1 = sc->sc_maccfg1; 477 1.2 matt uint32_t maccfg2 = sc->sc_maccfg2; 478 1.2 matt uint32_t ecntrl = sc->sc_ecntrl; 479 1.2 matt 480 1.44 msaitoh maccfg1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 481 1.44 msaitoh maccfg2 &= ~(MACCFG2_IFMODE | MACCFG2_FD); 482 1.2 matt 483 1.2 matt if (sc->sc_mii.mii_media_active & IFM_FDX) { 484 1.2 matt maccfg2 |= MACCFG2_FD; 485 1.2 matt } 486 1.2 matt 487 1.2 matt /* 488 1.2 matt * Now deal with the flow control bits. 489 1.2 matt */ 490 1.2 matt if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 491 1.2 matt && (mii->mii_media_active & IFM_ETH_FMASK)) { 492 1.2 matt if (mii->mii_media_active & IFM_ETH_RXPAUSE) 493 1.2 matt maccfg1 |= MACCFG1_RX_FLOW; 494 1.2 matt if (mii->mii_media_active & IFM_ETH_TXPAUSE) 495 1.2 matt maccfg1 |= MACCFG1_TX_FLOW; 496 1.2 matt } 497 1.2 matt 498 1.2 matt /* 499 1.2 matt * Now deal with the speed. 500 1.2 matt */ 501 1.2 matt if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 502 1.2 matt maccfg2 |= MACCFG2_IFMODE_GMII; 503 1.2 matt } else { 504 1.2 matt maccfg2 |= MACCFG2_IFMODE_MII; 505 1.2 matt ecntrl &= ~ECNTRL_R100M; 506 1.2 matt if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 507 1.2 matt ecntrl |= ECNTRL_R100M; 508 1.2 matt } 509 1.2 matt } 510 1.2 matt 511 1.2 matt /* 512 1.2 matt * If things are different, re-init things. 513 1.2 matt */ 514 1.2 matt if (maccfg1 != sc->sc_maccfg1 515 1.2 matt || maccfg2 != sc->sc_maccfg2 516 1.2 matt || ecntrl != sc->sc_ecntrl) { 517 1.2 matt if (sc->sc_if.if_flags & IFF_RUNNING) 518 1.2 matt atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 519 1.2 matt sc->sc_maccfg1 = maccfg1; 520 1.2 matt sc->sc_maccfg2 = maccfg2; 521 1.2 matt sc->sc_ecntrl = ecntrl; 522 1.2 matt } 523 1.2 matt } 524 1.2 matt 525 1.2 matt #if 0 526 1.2 matt static void 527 1.2 matt pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 528 1.2 matt { 529 1.2 matt struct pq3etsec_softc * const sc = ifp->if_softc; 530 1.2 matt 531 1.2 matt mii_pollstat(&sc->sc_mii); 532 1.2 matt ether_mediastatus(ifp, ifmr); 533 1.43 msaitoh ifmr->ifm_status = sc->sc_mii.mii_media_status; 534 1.43 msaitoh ifmr->ifm_active = sc->sc_mii.mii_media_active; 535 1.2 matt } 536 1.2 matt 537 1.2 matt static int 538 1.2 matt pq3etsec_mediachange(struct ifnet *ifp) 539 1.2 matt { 540 1.2 matt struct pq3etsec_softc * const sc = ifp->if_softc; 541 1.2 matt 542 1.2 matt if ((ifp->if_flags & IFF_UP) == 0) 543 1.2 matt return 0; 544 1.2 matt 545 1.2 matt int rv = mii_mediachg(&sc->sc_mii); 546 1.2 matt return (rv == ENXIO) ? 0 : rv; 547 1.2 matt } 548 1.2 matt #endif 549 1.2 matt 550 1.16 matt static int 551 1.16 matt pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 552 1.16 matt { 553 1.15 matt 554 1.16 matt if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 555 1.16 matt return 0; 556 1.15 matt 557 1.16 matt return 1; 558 1.15 matt } 559 1.15 matt 560 1.2 matt static void 561 1.2 matt pq3etsec_attach(device_t parent, device_t self, void *aux) 562 1.2 matt { 563 1.2 matt struct cpunode_softc * const psc = device_private(parent); 564 1.2 matt struct pq3etsec_softc * const sc = device_private(self); 565 1.44 msaitoh struct mii_data * const mii = &sc->sc_mii; 566 1.2 matt struct cpunode_attach_args * const cna = aux; 567 1.2 matt struct cpunode_locators * const cnl = &cna->cna_locs; 568 1.5 matt cfdata_t cf = device_cfdata(self); 569 1.2 matt int error; 570 1.2 matt 571 1.2 matt psc->sc_children |= cna->cna_childmask; 572 1.2 matt sc->sc_dev = self; 573 1.2 matt sc->sc_bst = cna->cna_memt; 574 1.2 matt sc->sc_dmat = &booke_bus_dma_tag; 575 1.2 matt 576 1.2 matt /* 577 1.16 matt * Pull out the mdio bus and phy we are supposed to use. 578 1.2 matt */ 579 1.16 matt const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 580 1.16 matt const int phy = cf->cf_loc[CPUNODECF_PHY]; 581 1.16 matt if (mdio != CPUNODECF_MDIO_DEFAULT) 582 1.16 matt aprint_normal(" mdio %d", mdio); 583 1.2 matt 584 1.2 matt /* 585 1.2 matt * See if the phy is in the config file... 586 1.2 matt */ 587 1.16 matt if (phy != CPUNODECF_PHY_DEFAULT) { 588 1.16 matt sc->sc_phy_addr = phy; 589 1.2 matt } else { 590 1.2 matt unsigned char prop_name[20]; 591 1.2 matt snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 592 1.2 matt cnl->cnl_instance); 593 1.2 matt sc->sc_phy_addr = board_info_get_number(prop_name); 594 1.2 matt } 595 1.9 matt if (sc->sc_phy_addr != MII_PHY_ANY) 596 1.9 matt aprint_normal(" phy %d", sc->sc_phy_addr); 597 1.2 matt 598 1.2 matt error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 599 1.2 matt &sc->sc_bsh); 600 1.2 matt if (error) { 601 1.2 matt aprint_error(": error mapping registers: %d\n", error); 602 1.2 matt return; 603 1.2 matt } 604 1.2 matt 605 1.2 matt /* 606 1.2 matt * Assume firmware has aready set the mac address and fetch it 607 1.2 matt * before we reinit it. 608 1.2 matt */ 609 1.2 matt sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 610 1.2 matt sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 611 1.2 matt sc->sc_rctrl = RCTRL_DEFAULT; 612 1.12 matt sc->sc_ecntrl = etsec_read(sc, ECNTRL); 613 1.12 matt sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 614 1.13 matt sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 615 1.2 matt 616 1.2 matt if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 617 1.2 matt size_t len; 618 1.2 matt const uint8_t *mac_addr = 619 1.2 matt board_info_get_data("tsec-mac-addr-base", &len); 620 1.2 matt KASSERT(len == ETHER_ADDR_LEN); 621 1.2 matt sc->sc_macstnaddr2 = 622 1.2 matt (mac_addr[1] << 24) 623 1.2 matt | (mac_addr[0] << 16); 624 1.2 matt sc->sc_macstnaddr1 = 625 1.2 matt ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 626 1.2 matt | (mac_addr[4] << 16) 627 1.2 matt | (mac_addr[3] << 8) 628 1.2 matt | (mac_addr[2] << 0); 629 1.2 matt #if 0 630 1.2 matt aprint_error(": mac-address unknown\n"); 631 1.2 matt return; 632 1.2 matt #endif 633 1.2 matt } 634 1.2 matt 635 1.23 nonaka sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 636 1.23 nonaka sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 637 1.23 nonaka 638 1.23 nonaka callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 639 1.23 nonaka callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 640 1.23 nonaka 641 1.23 nonaka /* Disable interrupts */ 642 1.23 nonaka etsec_write(sc, IMASK, 0); 643 1.2 matt 644 1.2 matt error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 645 1.2 matt if (error) { 646 1.2 matt aprint_error(": failed to init rxq: %d\n", error); 647 1.32 msaitoh goto fail_1; 648 1.2 matt } 649 1.2 matt 650 1.2 matt error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 651 1.2 matt if (error) { 652 1.2 matt aprint_error(": failed to init txq: %d\n", error); 653 1.32 msaitoh goto fail_2; 654 1.2 matt } 655 1.2 matt 656 1.43 msaitoh error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 657 1.10 matt ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 658 1.2 matt if (error) { 659 1.2 matt aprint_error(": failed to allocate rx dmamaps: %d\n", error); 660 1.32 msaitoh goto fail_3; 661 1.2 matt } 662 1.2 matt 663 1.43 msaitoh error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 664 1.10 matt ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 665 1.2 matt if (error) { 666 1.2 matt aprint_error(": failed to allocate tx dmamaps: %d\n", error); 667 1.32 msaitoh goto fail_4; 668 1.2 matt } 669 1.2 matt 670 1.2 matt sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 671 1.2 matt pq3etsec_tx_intr, sc); 672 1.2 matt if (sc->sc_tx_ih == NULL) { 673 1.2 matt aprint_error(": failed to establish tx interrupt: %d\n", 674 1.2 matt cnl->cnl_intrs[0]); 675 1.32 msaitoh goto fail_5; 676 1.2 matt } 677 1.2 matt 678 1.2 matt sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 679 1.2 matt pq3etsec_rx_intr, sc); 680 1.2 matt if (sc->sc_rx_ih == NULL) { 681 1.2 matt aprint_error(": failed to establish rx interrupt: %d\n", 682 1.2 matt cnl->cnl_intrs[1]); 683 1.32 msaitoh goto fail_6; 684 1.2 matt } 685 1.2 matt 686 1.2 matt sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 687 1.2 matt pq3etsec_error_intr, sc); 688 1.2 matt if (sc->sc_error_ih == NULL) { 689 1.2 matt aprint_error(": failed to establish error interrupt: %d\n", 690 1.2 matt cnl->cnl_intrs[2]); 691 1.32 msaitoh goto fail_7; 692 1.2 matt } 693 1.2 matt 694 1.24 nonaka int softint_flags = SOFTINT_NET; 695 1.24 nonaka #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 696 1.24 nonaka softint_flags |= SOFTINT_MPSAFE; 697 1.24 nonaka #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 698 1.24 nonaka sc->sc_soft_ih = softint_establish(softint_flags, 699 1.2 matt pq3etsec_soft_intr, sc); 700 1.2 matt if (sc->sc_soft_ih == NULL) { 701 1.2 matt aprint_error(": failed to establish soft interrupt\n"); 702 1.32 msaitoh goto fail_8; 703 1.2 matt } 704 1.2 matt 705 1.16 matt /* 706 1.19 nonaka * If there was no MDIO 707 1.16 matt */ 708 1.16 matt if (mdio == CPUNODECF_MDIO_DEFAULT) { 709 1.16 matt aprint_normal("\n"); 710 1.54 thorpej cfdata_t mdio_cf = config_search(self, cna, 711 1.56 thorpej CFARGS(.submatch = pq3mdio_find)); 712 1.16 matt if (mdio_cf != NULL) { 713 1.54 thorpej sc->sc_mdio_dev = 714 1.56 thorpej config_attach(self, mdio_cf, cna, NULL, CFARGS_NONE); 715 1.16 matt } 716 1.16 matt } else { 717 1.16 matt sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 718 1.16 matt if (sc->sc_mdio_dev == NULL) { 719 1.16 matt aprint_error(": failed to locate mdio device\n"); 720 1.32 msaitoh goto fail_9; 721 1.16 matt } 722 1.16 matt aprint_normal("\n"); 723 1.16 matt } 724 1.2 matt 725 1.4 matt etsec_write(sc, ATTR, ATTR_DEFAULT); 726 1.4 matt etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 727 1.4 matt 728 1.59 andvar /* Enable interrupt coalescing */ 729 1.25 nonaka sc->sc_ic_rx_time = 768; 730 1.25 nonaka sc->sc_ic_rx_count = 16; 731 1.25 nonaka sc->sc_ic_tx_time = 768; 732 1.25 nonaka sc->sc_ic_tx_count = 16; 733 1.25 nonaka pq3etsec_set_ic_rx(sc); 734 1.25 nonaka pq3etsec_set_ic_tx(sc); 735 1.25 nonaka 736 1.23 nonaka char enaddr[ETHER_ADDR_LEN] = { 737 1.23 nonaka [0] = sc->sc_macstnaddr2 >> 16, 738 1.23 nonaka [1] = sc->sc_macstnaddr2 >> 24, 739 1.43 msaitoh [2] = sc->sc_macstnaddr1 >> 0, 740 1.43 msaitoh [3] = sc->sc_macstnaddr1 >> 8, 741 1.23 nonaka [4] = sc->sc_macstnaddr1 >> 16, 742 1.23 nonaka [5] = sc->sc_macstnaddr1 >> 24, 743 1.23 nonaka }; 744 1.2 matt aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 745 1.2 matt ether_sprintf(enaddr)); 746 1.2 matt 747 1.2 matt const char * const xname = device_xname(sc->sc_dev); 748 1.2 matt struct ethercom * const ec = &sc->sc_ec; 749 1.2 matt struct ifnet * const ifp = &ec->ec_if; 750 1.2 matt 751 1.44 msaitoh ec->ec_mii = mii; 752 1.2 matt 753 1.44 msaitoh mii->mii_ifp = ifp; 754 1.44 msaitoh mii->mii_readreg = pq3mdio_mii_readreg; 755 1.44 msaitoh mii->mii_writereg = pq3mdio_mii_writereg; 756 1.44 msaitoh mii->mii_statchg = pq3etsec_mii_statchg; 757 1.2 matt 758 1.44 msaitoh ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 759 1.2 matt 760 1.16 matt if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 761 1.44 msaitoh mii_attach(sc->sc_mdio_dev, mii, 0xffffffff, 762 1.3 matt sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 763 1.3 matt 764 1.44 msaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) { 765 1.44 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 766 1.44 msaitoh 0, NULL); 767 1.44 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 768 1.3 matt } else { 769 1.3 matt callout_schedule(&sc->sc_mii_callout, hz); 770 1.44 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 771 1.3 matt } 772 1.2 matt } else { 773 1.44 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 774 1.44 msaitoh 0, NULL); 775 1.44 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX); 776 1.2 matt } 777 1.2 matt 778 1.2 matt ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 779 1.2 matt | ETHERCAP_JUMBO_MTU; 780 1.46 msaitoh ec->ec_capenable = ETHERCAP_VLAN_HWTAGGING; 781 1.2 matt 782 1.2 matt strlcpy(ifp->if_xname, xname, IFNAMSIZ); 783 1.2 matt ifp->if_softc = sc; 784 1.2 matt ifp->if_capabilities = IFCAP_ETSEC; 785 1.2 matt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 786 1.2 matt ifp->if_ioctl = pq3etsec_ifioctl; 787 1.2 matt ifp->if_start = pq3etsec_ifstart; 788 1.2 matt ifp->if_watchdog = pq3etsec_ifwatchdog; 789 1.2 matt ifp->if_init = pq3etsec_ifinit; 790 1.2 matt ifp->if_stop = pq3etsec_ifstop; 791 1.2 matt IFQ_SET_READY(&ifp->if_snd); 792 1.2 matt 793 1.2 matt /* 794 1.2 matt * Attach the interface. 795 1.2 matt */ 796 1.55 riastrad if_initialize(ifp); 797 1.32 msaitoh pq3etsec_sysctl_setup(NULL, sc); 798 1.51 rin if_attach(ifp); 799 1.51 rin if_deferred_start_init(ifp, NULL); 800 1.2 matt ether_ifattach(ifp, enaddr); 801 1.2 matt 802 1.52 rin rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 803 1.52 rin RND_FLAG_DEFAULT); 804 1.52 rin 805 1.28 nonaka pq3etsec_ifstop(ifp, true); 806 1.28 nonaka 807 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 808 1.2 matt NULL, xname, "rx stall"); 809 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 810 1.2 matt NULL, xname, "tx stall"); 811 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 812 1.2 matt NULL, xname, "tx intr"); 813 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 814 1.2 matt NULL, xname, "rx intr"); 815 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 816 1.2 matt NULL, xname, "error intr"); 817 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 818 1.2 matt NULL, xname, "soft intr"); 819 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 820 1.2 matt NULL, xname, "tx pause"); 821 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 822 1.2 matt NULL, xname, "rx pause"); 823 1.2 matt evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 824 1.2 matt NULL, xname, "mii ticks"); 825 1.32 msaitoh return; 826 1.32 msaitoh 827 1.32 msaitoh fail_9: 828 1.32 msaitoh softint_disestablish(sc->sc_soft_ih); 829 1.32 msaitoh fail_8: 830 1.32 msaitoh intr_disestablish(sc->sc_error_ih); 831 1.32 msaitoh fail_7: 832 1.32 msaitoh intr_disestablish(sc->sc_rx_ih); 833 1.32 msaitoh fail_6: 834 1.32 msaitoh intr_disestablish(sc->sc_tx_ih); 835 1.32 msaitoh fail_5: 836 1.32 msaitoh pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 837 1.32 msaitoh fail_4: 838 1.32 msaitoh pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache); 839 1.32 msaitoh fail_3: 840 1.32 msaitoh #if 0 /* notyet */ 841 1.32 msaitoh pq3etsec_txq_detach(sc); 842 1.32 msaitoh #endif 843 1.32 msaitoh fail_2: 844 1.32 msaitoh #if 0 /* notyet */ 845 1.32 msaitoh pq3etsec_rxq_detach(sc); 846 1.32 msaitoh #endif 847 1.32 msaitoh fail_1: 848 1.32 msaitoh callout_destroy(&sc->sc_mii_callout); 849 1.32 msaitoh mutex_obj_free(sc->sc_lock); 850 1.32 msaitoh mutex_obj_free(sc->sc_hwlock); 851 1.32 msaitoh bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size); 852 1.2 matt } 853 1.2 matt 854 1.2 matt static uint64_t 855 1.2 matt pq3etsec_macaddr_create(const uint8_t *lladdr) 856 1.2 matt { 857 1.2 matt uint64_t macaddr = 0; 858 1.2 matt 859 1.2 matt lladdr += ETHER_ADDR_LEN; 860 1.2 matt for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 861 1.2 matt macaddr = (macaddr << 8) | *--lladdr; 862 1.2 matt } 863 1.2 matt return macaddr << 16; 864 1.2 matt } 865 1.2 matt 866 1.2 matt static int 867 1.2 matt pq3etsec_ifinit(struct ifnet *ifp) 868 1.2 matt { 869 1.2 matt struct pq3etsec_softc * const sc = ifp->if_softc; 870 1.2 matt int error = 0; 871 1.2 matt 872 1.37 riastrad sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 873 1.2 matt if (ifp->if_mtu > ETHERMTU_JUMBO) 874 1.2 matt return error; 875 1.2 matt 876 1.2 matt KASSERT(ifp->if_flags & IFF_UP); 877 1.2 matt 878 1.2 matt /* 879 1.2 matt * Stop the interface (steps 1 to 4 in the Soft Reset and 880 1.2 matt * Reconfigurating Procedure. 881 1.2 matt */ 882 1.2 matt pq3etsec_ifstop(ifp, 0); 883 1.2 matt 884 1.2 matt /* 885 1.2 matt * If our frame size has changed (or it's our first time through) 886 1.2 matt * destroy the existing transmit mapcache. 887 1.2 matt */ 888 1.2 matt if (sc->sc_tx_mapcache != NULL 889 1.2 matt && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 890 1.2 matt pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 891 1.2 matt sc->sc_tx_mapcache = NULL; 892 1.2 matt } 893 1.2 matt 894 1.2 matt if (sc->sc_tx_mapcache == NULL) { 895 1.2 matt error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 896 1.10 matt ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 897 1.2 matt if (error) 898 1.2 matt return error; 899 1.2 matt } 900 1.2 matt 901 1.2 matt sc->sc_ev_mii_ticks.ev_count++; 902 1.2 matt mii_tick(&sc->sc_mii); 903 1.2 matt 904 1.2 matt if (ifp->if_flags & IFF_PROMISC) { 905 1.2 matt sc->sc_rctrl |= RCTRL_PROM; 906 1.2 matt } else { 907 1.2 matt sc->sc_rctrl &= ~RCTRL_PROM; 908 1.2 matt } 909 1.2 matt 910 1.2 matt uint32_t rctrl_prsdep = 0; 911 1.44 msaitoh sc->sc_rctrl &= 912 1.44 msaitoh ~(RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP); 913 1.2 matt if (VLAN_ATTACHED(&sc->sc_ec)) { 914 1.2 matt sc->sc_rctrl |= RCTRL_VLEX; 915 1.2 matt rctrl_prsdep = RCTRL_PRSDEP_L2; 916 1.2 matt } 917 1.2 matt if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 918 1.2 matt sc->sc_rctrl |= RCTRL_IPCSEN; 919 1.2 matt rctrl_prsdep = RCTRL_PRSDEP_L3; 920 1.2 matt } 921 1.2 matt if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 922 1.2 matt sc->sc_rctrl |= RCTRL_TUCSEN; 923 1.2 matt rctrl_prsdep = RCTRL_PRSDEP_L4; 924 1.2 matt } 925 1.2 matt sc->sc_rctrl |= rctrl_prsdep; 926 1.2 matt #if 0 927 1.44 msaitoh if (sc->sc_rctrl 928 1.44 msaitoh & (RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP)) 929 1.2 matt aprint_normal_dev(sc->sc_dev, 930 1.2 matt "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 931 1.2 matt sc->sc_rctrl, 932 1.2 matt __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 933 1.2 matt __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 934 1.2 matt __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 935 1.2 matt __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 936 1.2 matt #endif 937 1.2 matt 938 1.44 msaitoh sc->sc_tctrl &= ~(TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS); 939 1.2 matt if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 940 1.2 matt sc->sc_tctrl |= TCTRL_VLINS; 941 1.2 matt if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 942 1.2 matt sc->sc_tctrl |= TCTRL_IPCSEN; 943 1.2 matt if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 944 1.2 matt sc->sc_tctrl |= TCTRL_TUCSEN; 945 1.2 matt #if 0 946 1.44 msaitoh if (sc->sc_tctrl & (TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS)) 947 1.2 matt aprint_normal_dev(sc->sc_dev, 948 1.2 matt "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 949 1.2 matt sc->sc_tctrl, 950 1.2 matt __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 951 1.2 matt __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 952 1.2 matt __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 953 1.2 matt #endif 954 1.2 matt 955 1.44 msaitoh sc->sc_maccfg1 &= ~(MACCFG1_TX_EN | MACCFG1_RX_EN); 956 1.2 matt 957 1.2 matt const uint64_t macstnaddr = 958 1.2 matt pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 959 1.2 matt 960 1.2 matt sc->sc_imask = IEVENT_DPE; 961 1.2 matt 962 1.2 matt /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 963 1.2 matt pq3etsec_rxq_reset(sc, &sc->sc_rxq); 964 1.2 matt pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 965 1.2 matt 966 1.2 matt /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 967 1.2 matt pq3etsec_txq_reset(sc, &sc->sc_txq); 968 1.2 matt 969 1.2 matt /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 970 1.2 matt KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 971 1.2 matt etsec_write(sc, MAXFRM, sc->sc_maxfrm); 972 1.2 matt etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 973 1.2 matt etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 974 1.2 matt etsec_write(sc, MACCFG1, sc->sc_maccfg1); 975 1.2 matt etsec_write(sc, MACCFG2, sc->sc_maccfg2); 976 1.2 matt etsec_write(sc, ECNTRL, sc->sc_ecntrl); 977 1.2 matt 978 1.2 matt /* 8. Setup group address hash table (GADDR0-GADDR15) */ 979 1.2 matt pq3etsec_mc_setup(sc); 980 1.2 matt 981 1.2 matt /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 982 1.2 matt etsec_write(sc, MRBLR, MCLBYTES); 983 1.2 matt 984 1.2 matt /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 985 1.2 matt sc->sc_dmactrl |= DMACTRL_DEFAULT; 986 1.2 matt etsec_write(sc, DMACTRL, sc->sc_dmactrl); 987 1.2 matt 988 1.2 matt /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 989 1.2 matt etsec_write(sc, TQUEUE, TQUEUE_EN0); 990 1.44 msaitoh sc->sc_imask |= IEVENT_TXF | IEVENT_TXE | IEVENT_TXC; 991 1.2 matt 992 1.2 matt etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 993 1.2 matt 994 1.2 matt /* 12. Enable receive queues in RQUEUE, */ 995 1.44 msaitoh etsec_write(sc, RQUEUE, RQUEUE_EN0 | RQUEUE_EX0); 996 1.44 msaitoh sc->sc_imask |= IEVENT_RXF | IEVENT_BSY | IEVENT_RXC; 997 1.2 matt 998 1.2 matt /* and optionally set TOE functionality in RCTRL. */ 999 1.2 matt etsec_write(sc, RCTRL, sc->sc_rctrl); 1000 1.2 matt sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 1001 1.2 matt if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 1002 1.2 matt sc->sc_rx_adjlen += sizeof(struct rxfcb); 1003 1.2 matt 1004 1.2 matt /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 1005 1.2 matt etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 1006 1.2 matt 1007 1.2 matt /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 1008 1.2 matt etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 1009 1.2 matt 1010 1.2 matt /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 1011 1.44 msaitoh sc->sc_dmactrl &= ~(DMACTRL_GRS | DMACTRL_GTS); 1012 1.2 matt etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1013 1.2 matt 1014 1.2 matt /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 1015 1.2 matt etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1016 1.2 matt etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1017 1.2 matt 1018 1.2 matt sc->sc_soft_flags = 0; 1019 1.2 matt 1020 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 1021 1.2 matt 1022 1.2 matt ifp->if_flags |= IFF_RUNNING; 1023 1.2 matt 1024 1.2 matt return error; 1025 1.2 matt } 1026 1.2 matt 1027 1.2 matt static void 1028 1.2 matt pq3etsec_ifstop(struct ifnet *ifp, int disable) 1029 1.2 matt { 1030 1.2 matt struct pq3etsec_softc * const sc = ifp->if_softc; 1031 1.2 matt 1032 1.2 matt KASSERT(!cpu_intr_p()); 1033 1.44 msaitoh const uint32_t imask_gsc_mask = IEVENT_GTSC | IEVENT_GRSC; 1034 1.2 matt /* 1035 1.2 matt * Clear the GTSC and GRSC from the interrupt mask until 1036 1.2 matt * we are ready for them. Then clear them from IEVENT, 1037 1.2 matt * request the graceful shutdown, and then enable the 1038 1.2 matt * GTSC and GRSC bits in the mask. This should cause the 1039 1.2 matt * error interrupt to fire which will issue a wakeup to 1040 1.2 matt * allow us to resume. 1041 1.2 matt */ 1042 1.2 matt 1043 1.2 matt /* 1044 1.2 matt * 1. Set GRS/GTS bits in DMACTRL register 1045 1.2 matt */ 1046 1.44 msaitoh sc->sc_dmactrl |= DMACTRL_GRS | DMACTRL_GTS; 1047 1.2 matt etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1048 1.2 matt etsec_write(sc, IEVENT, imask_gsc_mask); 1049 1.2 matt etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1050 1.2 matt 1051 1.44 msaitoh if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN | MACCFG1_RX_EN)) { 1052 1.2 matt /* 1053 1.2 matt * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1054 1.2 matt */ 1055 1.2 matt etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1056 1.2 matt 1057 1.2 matt u_int timo = 1000; 1058 1.2 matt uint32_t ievent = etsec_read(sc, IEVENT); 1059 1.2 matt while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1060 1.2 matt if (--timo == 0) { 1061 1.2 matt aprint_error_dev(sc->sc_dev, 1062 1.2 matt "WARNING: " 1063 1.2 matt "request to stop failed (IEVENT=%#x)\n", 1064 1.2 matt ievent); 1065 1.2 matt break; 1066 1.2 matt } 1067 1.2 matt delay(10); 1068 1.2 matt ievent = etsec_read(sc, IEVENT); 1069 1.2 matt } 1070 1.2 matt } 1071 1.2 matt 1072 1.2 matt /* 1073 1.2 matt * Now reset the controller. 1074 1.2 matt * 1075 1.2 matt * 3. Set SOFT_RESET bit in MACCFG1 register 1076 1.2 matt * 4. Clear SOFT_RESET bit in MACCFG1 register 1077 1.2 matt */ 1078 1.2 matt etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1079 1.2 matt etsec_write(sc, MACCFG1, 0); 1080 1.2 matt etsec_write(sc, IMASK, 0); 1081 1.2 matt etsec_write(sc, IEVENT, ~0); 1082 1.2 matt sc->sc_imask = 0; 1083 1.2 matt ifp->if_flags &= ~IFF_RUNNING; 1084 1.2 matt 1085 1.2 matt uint32_t tbipa = etsec_read(sc, TBIPA); 1086 1.2 matt if (tbipa == sc->sc_phy_addr) { 1087 1.2 matt aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1088 1.2 matt etsec_write(sc, TBIPA, 0x1f); 1089 1.2 matt } 1090 1.2 matt uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1091 1.2 matt etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1092 1.2 matt etsec_write(sc, MIIMCFG, miimcfg); 1093 1.2 matt 1094 1.2 matt /* 1095 1.61 andvar * Let's consume any remaining transmitted packets. And if we are 1096 1.2 matt * disabling the interface, purge ourselves of any untransmitted 1097 1.2 matt * packets. But don't consume any received packets, just drop them. 1098 1.2 matt * If we aren't disabling the interface, save the mbufs in the 1099 1.2 matt * receive queue for reuse. 1100 1.2 matt */ 1101 1.2 matt pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1102 1.2 matt pq3etsec_txq_consume(sc, &sc->sc_txq); 1103 1.2 matt if (disable) { 1104 1.2 matt pq3etsec_txq_purge(sc, &sc->sc_txq); 1105 1.20 nonaka IFQ_PURGE(&ifp->if_snd); 1106 1.2 matt } 1107 1.2 matt } 1108 1.2 matt 1109 1.2 matt static void 1110 1.2 matt pq3etsec_ifwatchdog(struct ifnet *ifp) 1111 1.2 matt { 1112 1.2 matt } 1113 1.2 matt 1114 1.2 matt static void 1115 1.2 matt pq3etsec_mc_setup( 1116 1.2 matt struct pq3etsec_softc *sc) 1117 1.2 matt { 1118 1.2 matt struct ethercom * const ec = &sc->sc_ec; 1119 1.2 matt struct ifnet * const ifp = &sc->sc_if; 1120 1.2 matt struct ether_multi *enm; 1121 1.2 matt struct ether_multistep step; 1122 1.2 matt uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1123 1.2 matt const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1124 1.2 matt 1125 1.2 matt memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1126 1.2 matt memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1127 1.2 matt 1128 1.2 matt ifp->if_flags &= ~IFF_ALLMULTI; 1129 1.2 matt 1130 1.45 msaitoh ETHER_LOCK(ec); 1131 1.2 matt ETHER_FIRST_MULTI(step, ec, enm); 1132 1.2 matt for (u_int i = 0; enm != NULL; ) { 1133 1.2 matt const char *addr = enm->enm_addrlo; 1134 1.2 matt if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1135 1.2 matt ifp->if_flags |= IFF_ALLMULTI; 1136 1.2 matt memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1137 1.2 matt memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1138 1.2 matt break; 1139 1.2 matt } 1140 1.2 matt if ((sc->sc_rctrl & RCTRL_EMEN) 1141 1.2 matt && i < __arraycount(sc->sc_macaddrs)) { 1142 1.2 matt sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1143 1.2 matt } else { 1144 1.2 matt uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1145 1.2 matt #if 0 1146 1.2 matt printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1147 1.2 matt ether_sprintf(addr), crc, 1148 1.2 matt crc >> crc_shift, 1149 1.2 matt crc >> (crc_shift + 5), 1150 1.2 matt (crc >> crc_shift) & 31, 1151 1.2 matt 1 << (((crc >> crc_shift) & 31) ^ 31)); 1152 1.2 matt #endif 1153 1.2 matt /* 1154 1.2 matt * The documentation doesn't completely follow PowerPC 1155 1.2 matt * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1156 1.2 matt * is 0x7fa32d9b. By empirical testing, the 1157 1.2 matt * corresponding hash bit is word 3, bit 31 (ppc bit 1158 1.2 matt * order). Since 3 << 31 | 31 is 0x7f, we deduce 1159 1.2 matt * H[0:2] selects the register while H[3:7] selects 1160 1.2 matt * the bit (ppc bit order). 1161 1.2 matt */ 1162 1.2 matt crc >>= crc_shift; 1163 1.2 matt gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1164 1.2 matt } 1165 1.2 matt ETHER_NEXT_MULTI(step, enm); 1166 1.2 matt } 1167 1.45 msaitoh ETHER_UNLOCK(ec); 1168 1.2 matt for (u_int i = 0; i < 8; i++) { 1169 1.2 matt etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1170 1.2 matt etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1171 1.2 matt #if 0 1172 1.2 matt if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1173 1.2 matt printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1174 1.2 matt i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1175 1.2 matt i, GADDR(i), etsec_read(sc, GADDR(i))); 1176 1.2 matt #endif 1177 1.2 matt } 1178 1.2 matt for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1179 1.2 matt uint64_t macaddr = sc->sc_macaddrs[i]; 1180 1.2 matt etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1181 1.2 matt etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1182 1.2 matt #if 0 1183 1.2 matt if (macaddr) 1184 1.2 matt printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1185 1.2 matt i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1186 1.2 matt i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1187 1.2 matt #endif 1188 1.2 matt } 1189 1.2 matt } 1190 1.2 matt 1191 1.2 matt static int 1192 1.2 matt pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1193 1.2 matt { 1194 1.2 matt struct pq3etsec_softc *sc = ifp->if_softc; 1195 1.2 matt struct ifreq * const ifr = data; 1196 1.2 matt const int s = splnet(); 1197 1.2 matt int error; 1198 1.2 matt 1199 1.2 matt switch (cmd) { 1200 1.2 matt case SIOCSIFMEDIA: 1201 1.2 matt /* Flow control requires full-duplex mode. */ 1202 1.2 matt if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1203 1.2 matt (ifr->ifr_media & IFM_FDX) == 0) 1204 1.2 matt ifr->ifr_media &= ~IFM_ETH_FMASK; 1205 1.2 matt if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1206 1.2 matt if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1207 1.2 matt /* We can do both TXPAUSE and RXPAUSE. */ 1208 1.2 matt ifr->ifr_media |= 1209 1.2 matt IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1210 1.2 matt } 1211 1.2 matt } 1212 1.2 matt error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1213 1.2 matt break; 1214 1.2 matt 1215 1.2 matt default: 1216 1.2 matt error = ether_ioctl(ifp, cmd, data); 1217 1.2 matt if (error != ENETRESET) 1218 1.2 matt break; 1219 1.2 matt 1220 1.2 matt if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1221 1.2 matt error = 0; 1222 1.2 matt if (ifp->if_flags & IFF_RUNNING) 1223 1.2 matt pq3etsec_mc_setup(sc); 1224 1.2 matt break; 1225 1.2 matt } 1226 1.2 matt error = pq3etsec_ifinit(ifp); 1227 1.2 matt break; 1228 1.2 matt } 1229 1.2 matt 1230 1.2 matt splx(s); 1231 1.2 matt return error; 1232 1.2 matt } 1233 1.2 matt 1234 1.2 matt static void 1235 1.2 matt pq3etsec_rxq_desc_presync( 1236 1.2 matt struct pq3etsec_softc *sc, 1237 1.2 matt struct pq3etsec_rxqueue *rxq, 1238 1.2 matt volatile struct rxbd *rxbd, 1239 1.2 matt size_t count) 1240 1.2 matt { 1241 1.43 msaitoh bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1242 1.2 matt (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1243 1.44 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1244 1.2 matt } 1245 1.2 matt 1246 1.2 matt static void 1247 1.2 matt pq3etsec_rxq_desc_postsync( 1248 1.2 matt struct pq3etsec_softc *sc, 1249 1.2 matt struct pq3etsec_rxqueue *rxq, 1250 1.2 matt volatile struct rxbd *rxbd, 1251 1.2 matt size_t count) 1252 1.2 matt { 1253 1.43 msaitoh bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1254 1.2 matt (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1255 1.44 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1256 1.2 matt } 1257 1.2 matt 1258 1.2 matt static void 1259 1.2 matt pq3etsec_txq_desc_presync( 1260 1.2 matt struct pq3etsec_softc *sc, 1261 1.2 matt struct pq3etsec_txqueue *txq, 1262 1.2 matt volatile struct txbd *txbd, 1263 1.2 matt size_t count) 1264 1.2 matt { 1265 1.43 msaitoh bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1266 1.2 matt (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1267 1.44 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1268 1.2 matt } 1269 1.2 matt 1270 1.2 matt static void 1271 1.2 matt pq3etsec_txq_desc_postsync( 1272 1.2 matt struct pq3etsec_softc *sc, 1273 1.2 matt struct pq3etsec_txqueue *txq, 1274 1.2 matt volatile struct txbd *txbd, 1275 1.2 matt size_t count) 1276 1.2 matt { 1277 1.43 msaitoh bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1278 1.2 matt (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1279 1.44 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1280 1.2 matt } 1281 1.2 matt 1282 1.2 matt static bus_dmamap_t 1283 1.2 matt pq3etsec_mapcache_get( 1284 1.2 matt struct pq3etsec_softc *sc, 1285 1.2 matt struct pq3etsec_mapcache *dmc) 1286 1.2 matt { 1287 1.10 matt KASSERT(dmc->dmc_nmaps > 0); 1288 1.2 matt KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1289 1.2 matt return dmc->dmc_maps[--dmc->dmc_nmaps]; 1290 1.2 matt } 1291 1.2 matt 1292 1.2 matt static void 1293 1.2 matt pq3etsec_mapcache_put( 1294 1.2 matt struct pq3etsec_softc *sc, 1295 1.2 matt struct pq3etsec_mapcache *dmc, 1296 1.2 matt bus_dmamap_t map) 1297 1.2 matt { 1298 1.2 matt KASSERT(map != NULL); 1299 1.2 matt KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1300 1.2 matt dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1301 1.2 matt } 1302 1.2 matt 1303 1.2 matt static void 1304 1.2 matt pq3etsec_mapcache_destroy( 1305 1.2 matt struct pq3etsec_softc *sc, 1306 1.2 matt struct pq3etsec_mapcache *dmc) 1307 1.2 matt { 1308 1.2 matt const size_t dmc_size = 1309 1.2 matt offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1310 1.2 matt 1311 1.2 matt for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1312 1.2 matt bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1313 1.2 matt } 1314 1.14 matt kmem_intr_free(dmc, dmc_size); 1315 1.2 matt } 1316 1.2 matt 1317 1.2 matt static int 1318 1.2 matt pq3etsec_mapcache_create( 1319 1.2 matt struct pq3etsec_softc *sc, 1320 1.2 matt struct pq3etsec_mapcache **dmc_p, 1321 1.2 matt size_t maxmaps, 1322 1.2 matt size_t maxmapsize, 1323 1.2 matt size_t maxseg) 1324 1.2 matt { 1325 1.2 matt const size_t dmc_size = 1326 1.2 matt offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1327 1.14 matt struct pq3etsec_mapcache * const dmc = 1328 1.14 matt kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1329 1.2 matt 1330 1.2 matt dmc->dmc_maxmaps = maxmaps; 1331 1.10 matt dmc->dmc_nmaps = maxmaps; 1332 1.2 matt dmc->dmc_maxmapsize = maxmapsize; 1333 1.2 matt dmc->dmc_maxseg = maxseg; 1334 1.2 matt 1335 1.10 matt for (u_int i = 0; i < maxmaps; i++) { 1336 1.2 matt int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1337 1.2 matt dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1338 1.44 msaitoh BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1339 1.2 matt if (error) { 1340 1.2 matt aprint_error_dev(sc->sc_dev, 1341 1.2 matt "failed to creat dma map cache " 1342 1.10 matt "entry %u of %zu: %d\n", 1343 1.10 matt i, maxmaps, error); 1344 1.2 matt while (i-- > 0) { 1345 1.2 matt bus_dmamap_destroy(sc->sc_dmat, 1346 1.2 matt dmc->dmc_maps[i]); 1347 1.2 matt } 1348 1.14 matt kmem_intr_free(dmc, dmc_size); 1349 1.2 matt return error; 1350 1.2 matt } 1351 1.2 matt KASSERT(dmc->dmc_maps[i] != NULL); 1352 1.2 matt } 1353 1.2 matt 1354 1.2 matt *dmc_p = dmc; 1355 1.2 matt 1356 1.2 matt return 0; 1357 1.2 matt } 1358 1.2 matt 1359 1.2 matt #if 0 1360 1.2 matt static void 1361 1.2 matt pq3etsec_dmamem_free( 1362 1.2 matt bus_dma_tag_t dmat, 1363 1.2 matt size_t map_size, 1364 1.2 matt bus_dma_segment_t *seg, 1365 1.2 matt bus_dmamap_t map, 1366 1.2 matt void *kvap) 1367 1.2 matt { 1368 1.2 matt bus_dmamap_destroy(dmat, map); 1369 1.2 matt bus_dmamem_unmap(dmat, kvap, map_size); 1370 1.2 matt bus_dmamem_free(dmat, seg, 1); 1371 1.2 matt } 1372 1.2 matt #endif 1373 1.2 matt 1374 1.2 matt static int 1375 1.2 matt pq3etsec_dmamem_alloc( 1376 1.2 matt bus_dma_tag_t dmat, 1377 1.2 matt size_t map_size, 1378 1.2 matt bus_dma_segment_t *seg, 1379 1.2 matt bus_dmamap_t *map, 1380 1.2 matt void **kvap) 1381 1.2 matt { 1382 1.2 matt int error; 1383 1.2 matt int nseg; 1384 1.2 matt 1385 1.2 matt *kvap = NULL; 1386 1.2 matt *map = NULL; 1387 1.2 matt 1388 1.2 matt error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1389 1.2 matt seg, 1, &nseg, 0); 1390 1.2 matt if (error) 1391 1.2 matt return error; 1392 1.2 matt 1393 1.2 matt KASSERT(nseg == 1); 1394 1.2 matt 1395 1.2 matt error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1396 1.2 matt BUS_DMA_COHERENT); 1397 1.2 matt if (error == 0) { 1398 1.2 matt error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1399 1.2 matt map); 1400 1.2 matt if (error == 0) { 1401 1.2 matt error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1402 1.2 matt NULL, 0); 1403 1.2 matt if (error == 0) 1404 1.2 matt return 0; 1405 1.2 matt bus_dmamap_destroy(dmat, *map); 1406 1.2 matt *map = NULL; 1407 1.2 matt } 1408 1.2 matt bus_dmamem_unmap(dmat, *kvap, map_size); 1409 1.2 matt *kvap = NULL; 1410 1.2 matt } 1411 1.2 matt bus_dmamem_free(dmat, seg, nseg); 1412 1.2 matt return 0; 1413 1.2 matt } 1414 1.2 matt 1415 1.2 matt static struct mbuf * 1416 1.2 matt pq3etsec_rx_buf_alloc( 1417 1.2 matt struct pq3etsec_softc *sc) 1418 1.2 matt { 1419 1.2 matt struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1420 1.2 matt if (m == NULL) { 1421 1.2 matt printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1422 1.2 matt return NULL; 1423 1.2 matt } 1424 1.2 matt MCLGET(m, M_DONTWAIT); 1425 1.2 matt if ((m->m_flags & M_EXT) == 0) { 1426 1.2 matt printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1427 1.2 matt m_freem(m); 1428 1.2 matt return NULL; 1429 1.2 matt } 1430 1.2 matt m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1431 1.2 matt 1432 1.2 matt bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1433 1.2 matt if (map == NULL) { 1434 1.2 matt printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1435 1.2 matt m_freem(m); 1436 1.2 matt return NULL; 1437 1.2 matt } 1438 1.2 matt M_SETCTX(m, map); 1439 1.2 matt m->m_len = m->m_pkthdr.len = MCLBYTES; 1440 1.2 matt int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1441 1.44 msaitoh BUS_DMA_READ | BUS_DMA_NOWAIT); 1442 1.2 matt if (error) { 1443 1.2 matt aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1444 1.2 matt error); 1445 1.2 matt M_SETCTX(m, NULL); 1446 1.2 matt m_freem(m); 1447 1.2 matt pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1448 1.2 matt return NULL; 1449 1.2 matt } 1450 1.2 matt KASSERT(map->dm_mapsize == MCLBYTES); 1451 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1452 1.2 matt BUS_DMASYNC_PREREAD); 1453 1.2 matt 1454 1.2 matt return m; 1455 1.2 matt } 1456 1.2 matt 1457 1.2 matt static void 1458 1.2 matt pq3etsec_rx_map_unload( 1459 1.2 matt struct pq3etsec_softc *sc, 1460 1.2 matt struct mbuf *m) 1461 1.2 matt { 1462 1.2 matt KASSERT(m); 1463 1.2 matt for (; m != NULL; m = m->m_next) { 1464 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1465 1.2 matt KASSERT(map); 1466 1.2 matt KASSERT(map->dm_mapsize == MCLBYTES); 1467 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1468 1.2 matt BUS_DMASYNC_POSTREAD); 1469 1.2 matt bus_dmamap_unload(sc->sc_dmat, map); 1470 1.2 matt pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1471 1.2 matt M_SETCTX(m, NULL); 1472 1.2 matt } 1473 1.2 matt } 1474 1.2 matt 1475 1.2 matt static bool 1476 1.2 matt pq3etsec_rxq_produce( 1477 1.2 matt struct pq3etsec_softc *sc, 1478 1.2 matt struct pq3etsec_rxqueue *rxq) 1479 1.2 matt { 1480 1.2 matt volatile struct rxbd *producer = rxq->rxq_producer; 1481 1.2 matt #if 0 1482 1.2 matt size_t inuse = rxq->rxq_inuse; 1483 1.2 matt #endif 1484 1.2 matt while (rxq->rxq_inuse < rxq->rxq_threshold) { 1485 1.2 matt struct mbuf *m; 1486 1.2 matt IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1487 1.2 matt if (m == NULL) { 1488 1.2 matt m = pq3etsec_rx_buf_alloc(sc); 1489 1.2 matt if (m == NULL) { 1490 1.2 matt printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1491 1.2 matt break; 1492 1.2 matt } 1493 1.2 matt } 1494 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1495 1.2 matt KASSERT(map); 1496 1.2 matt 1497 1.2 matt #ifdef ETSEC_DEBUG 1498 1.2 matt KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1499 1.2 matt rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1500 1.2 matt #endif 1501 1.2 matt 1502 1.2 matt /* rxbd_len is write-only by the ETSEC */ 1503 1.2 matt producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1504 1.2 matt membar_producer(); 1505 1.2 matt producer->rxbd_flags |= RXBD_E; 1506 1.2 matt if (__predict_false(rxq->rxq_mhead == NULL)) { 1507 1.2 matt KASSERT(producer == rxq->rxq_consumer); 1508 1.2 matt rxq->rxq_mconsumer = m; 1509 1.2 matt } 1510 1.2 matt *rxq->rxq_mtail = m; 1511 1.2 matt rxq->rxq_mtail = &m->m_next; 1512 1.2 matt m->m_len = MCLBYTES; 1513 1.2 matt m->m_next = NULL; 1514 1.2 matt rxq->rxq_inuse++; 1515 1.2 matt if (++producer == rxq->rxq_last) { 1516 1.2 matt membar_producer(); 1517 1.2 matt pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1518 1.2 matt rxq->rxq_last - rxq->rxq_producer); 1519 1.2 matt producer = rxq->rxq_producer = rxq->rxq_first; 1520 1.2 matt } 1521 1.2 matt } 1522 1.2 matt if (producer != rxq->rxq_producer) { 1523 1.2 matt membar_producer(); 1524 1.2 matt pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1525 1.2 matt producer - rxq->rxq_producer); 1526 1.2 matt rxq->rxq_producer = producer; 1527 1.2 matt } 1528 1.2 matt uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1529 1.2 matt if (qhlt) { 1530 1.2 matt KASSERT(qhlt & rxq->rxq_qmask); 1531 1.2 matt sc->sc_ev_rx_stall.ev_count++; 1532 1.2 matt etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1533 1.2 matt } 1534 1.2 matt #if 0 1535 1.2 matt aprint_normal_dev(sc->sc_dev, 1536 1.2 matt "%s: buffers inuse went from %zu to %zu\n", 1537 1.2 matt __func__, inuse, rxq->rxq_inuse); 1538 1.2 matt #endif 1539 1.2 matt return true; 1540 1.2 matt } 1541 1.2 matt 1542 1.2 matt static bool 1543 1.2 matt pq3etsec_rx_offload( 1544 1.2 matt struct pq3etsec_softc *sc, 1545 1.2 matt struct mbuf *m, 1546 1.2 matt const struct rxfcb *fcb) 1547 1.2 matt { 1548 1.2 matt if (fcb->rxfcb_flags & RXFCB_VLN) { 1549 1.30 knakahar vlan_set_tag(m, fcb->rxfcb_vlctl); 1550 1.2 matt } 1551 1.2 matt if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1552 1.44 msaitoh || (fcb->rxfcb_flags & (RXFCB_CIP | RXFCB_CTU)) == 0) 1553 1.2 matt return true; 1554 1.2 matt int csum_flags = 0; 1555 1.44 msaitoh if ((fcb->rxfcb_flags & (RXFCB_IP6 | RXFCB_CIP)) == RXFCB_CIP) { 1556 1.2 matt csum_flags |= M_CSUM_IPv4; 1557 1.2 matt if (fcb->rxfcb_flags & RXFCB_EIP) 1558 1.2 matt csum_flags |= M_CSUM_IPv4_BAD; 1559 1.2 matt } 1560 1.2 matt if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1561 1.2 matt int ipv_flags; 1562 1.2 matt if (fcb->rxfcb_flags & RXFCB_IP6) 1563 1.44 msaitoh ipv_flags = M_CSUM_TCPv6 | M_CSUM_UDPv6; 1564 1.2 matt else 1565 1.44 msaitoh ipv_flags = M_CSUM_TCPv4 | M_CSUM_UDPv4; 1566 1.2 matt if (fcb->rxfcb_pro == IPPROTO_TCP) { 1567 1.44 msaitoh csum_flags |= (M_CSUM_TCPv4 |M_CSUM_TCPv6) & ipv_flags; 1568 1.2 matt } else { 1569 1.44 msaitoh csum_flags |= (M_CSUM_UDPv4 |M_CSUM_UDPv6) & ipv_flags; 1570 1.2 matt } 1571 1.2 matt if (fcb->rxfcb_flags & RXFCB_ETU) 1572 1.2 matt csum_flags |= M_CSUM_TCP_UDP_BAD; 1573 1.2 matt } 1574 1.2 matt 1575 1.2 matt m->m_pkthdr.csum_flags = csum_flags; 1576 1.2 matt return true; 1577 1.2 matt } 1578 1.2 matt 1579 1.2 matt static void 1580 1.2 matt pq3etsec_rx_input( 1581 1.2 matt struct pq3etsec_softc *sc, 1582 1.2 matt struct mbuf *m, 1583 1.2 matt uint16_t rxbd_flags) 1584 1.2 matt { 1585 1.2 matt struct ifnet * const ifp = &sc->sc_if; 1586 1.2 matt 1587 1.2 matt pq3etsec_rx_map_unload(sc, m); 1588 1.2 matt 1589 1.2 matt if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1590 1.2 matt struct rxfcb fcb = *mtod(m, struct rxfcb *); 1591 1.2 matt if (!pq3etsec_rx_offload(sc, m, &fcb)) 1592 1.2 matt return; 1593 1.2 matt } 1594 1.2 matt m_adj(m, sc->sc_rx_adjlen); 1595 1.2 matt 1596 1.2 matt if (rxbd_flags & RXBD_M) 1597 1.2 matt m->m_flags |= M_PROMISC; 1598 1.2 matt if (rxbd_flags & RXBD_BC) 1599 1.2 matt m->m_flags |= M_BCAST; 1600 1.2 matt if (rxbd_flags & RXBD_MC) 1601 1.2 matt m->m_flags |= M_MCAST; 1602 1.2 matt m->m_flags |= M_HASFCS; 1603 1.27 ozaki m_set_rcvif(m, &sc->sc_if); 1604 1.2 matt 1605 1.2 matt /* 1606 1.2 matt * Let's give it to the network subsystm to deal with. 1607 1.2 matt */ 1608 1.51 rin if_percpuq_enqueue(ifp->if_percpuq, m); 1609 1.2 matt } 1610 1.2 matt 1611 1.2 matt static void 1612 1.2 matt pq3etsec_rxq_consume( 1613 1.2 matt struct pq3etsec_softc *sc, 1614 1.2 matt struct pq3etsec_rxqueue *rxq) 1615 1.2 matt { 1616 1.2 matt struct ifnet * const ifp = &sc->sc_if; 1617 1.2 matt volatile struct rxbd *consumer = rxq->rxq_consumer; 1618 1.2 matt size_t rxconsumed = 0; 1619 1.2 matt 1620 1.2 matt etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1621 1.2 matt 1622 1.2 matt for (;;) { 1623 1.2 matt if (consumer == rxq->rxq_producer) { 1624 1.2 matt rxq->rxq_consumer = consumer; 1625 1.2 matt rxq->rxq_inuse -= rxconsumed; 1626 1.4 matt KASSERT(rxq->rxq_inuse == 0); 1627 1.53 rin break; 1628 1.2 matt } 1629 1.2 matt pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1630 1.2 matt const uint16_t rxbd_flags = consumer->rxbd_flags; 1631 1.2 matt if (rxbd_flags & RXBD_E) { 1632 1.2 matt rxq->rxq_consumer = consumer; 1633 1.2 matt rxq->rxq_inuse -= rxconsumed; 1634 1.53 rin break; 1635 1.2 matt } 1636 1.2 matt KASSERT(rxq->rxq_mconsumer != NULL); 1637 1.2 matt #ifdef ETSEC_DEBUG 1638 1.2 matt KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1639 1.2 matt #endif 1640 1.2 matt #if 0 1641 1.2 matt printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1642 1.2 matt __func__, 1643 1.2 matt consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1644 1.2 matt mtod(rxq->rxq_mconsumer, int *)[0], 1645 1.2 matt mtod(rxq->rxq_mconsumer, int *)[1], 1646 1.2 matt mtod(rxq->rxq_mconsumer, int *)[2], 1647 1.2 matt mtod(rxq->rxq_mconsumer, int *)[3]); 1648 1.2 matt #endif 1649 1.2 matt /* 1650 1.2 matt * We own this packet again. Clear all flags except wrap. 1651 1.2 matt */ 1652 1.2 matt rxconsumed++; 1653 1.44 msaitoh consumer->rxbd_flags = rxbd_flags & (RXBD_W | RXBD_I); 1654 1.2 matt 1655 1.2 matt /* 1656 1.2 matt * If this descriptor has the LAST bit set and no errors, 1657 1.2 matt * it's a valid input packet. 1658 1.2 matt */ 1659 1.44 msaitoh if ((rxbd_flags & (RXBD_L | RXBD_ERRORS)) == RXBD_L) { 1660 1.2 matt size_t rxbd_len = consumer->rxbd_len; 1661 1.2 matt struct mbuf *m = rxq->rxq_mhead; 1662 1.2 matt struct mbuf *m_last = rxq->rxq_mconsumer; 1663 1.2 matt if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1664 1.2 matt rxq->rxq_mtail = &rxq->rxq_mhead; 1665 1.2 matt rxq->rxq_mconsumer = rxq->rxq_mhead; 1666 1.2 matt m_last->m_next = NULL; 1667 1.2 matt m_last->m_len = rxbd_len & (MCLBYTES - 1); 1668 1.2 matt m->m_pkthdr.len = rxbd_len; 1669 1.2 matt pq3etsec_rx_input(sc, m, rxbd_flags); 1670 1.2 matt } else if (rxbd_flags & RXBD_L) { 1671 1.2 matt KASSERT(rxbd_flags & RXBD_ERRORS); 1672 1.2 matt struct mbuf *m; 1673 1.2 matt /* 1674 1.2 matt * We encountered an error, take the mbufs and add 1675 1.2 matt * then to the rx bufcache so we can reuse them. 1676 1.2 matt */ 1677 1.48 thorpej if_statinc(ifp, if_ierrors); 1678 1.2 matt for (m = rxq->rxq_mhead; 1679 1.2 matt m != rxq->rxq_mconsumer; 1680 1.2 matt m = m->m_next) { 1681 1.2 matt IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1682 1.2 matt } 1683 1.2 matt m = rxq->rxq_mconsumer; 1684 1.2 matt if ((rxq->rxq_mhead = m->m_next) == NULL) 1685 1.2 matt rxq->rxq_mtail = &rxq->rxq_mhead; 1686 1.2 matt rxq->rxq_mconsumer = m->m_next; 1687 1.2 matt IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1688 1.2 matt } else { 1689 1.2 matt rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1690 1.2 matt } 1691 1.2 matt #ifdef ETSEC_DEBUG 1692 1.2 matt rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1693 1.2 matt #endif 1694 1.2 matt 1695 1.2 matt /* 1696 1.2 matt * Wrap at the last entry! 1697 1.2 matt */ 1698 1.2 matt if (rxbd_flags & RXBD_W) { 1699 1.2 matt KASSERT(consumer + 1 == rxq->rxq_last); 1700 1.2 matt consumer = rxq->rxq_first; 1701 1.2 matt } else { 1702 1.2 matt consumer++; 1703 1.2 matt } 1704 1.2 matt #ifdef ETSEC_DEBUG 1705 1.2 matt KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1706 1.2 matt #endif 1707 1.2 matt } 1708 1.52 rin 1709 1.52 rin if (rxconsumed != 0) 1710 1.52 rin rnd_add_uint32(&sc->rnd_source, rxconsumed); 1711 1.2 matt } 1712 1.2 matt 1713 1.2 matt static void 1714 1.2 matt pq3etsec_rxq_purge( 1715 1.2 matt struct pq3etsec_softc *sc, 1716 1.2 matt struct pq3etsec_rxqueue *rxq, 1717 1.2 matt bool discard) 1718 1.2 matt { 1719 1.2 matt struct mbuf *m; 1720 1.2 matt 1721 1.2 matt if ((m = rxq->rxq_mhead) != NULL) { 1722 1.2 matt #ifdef ETSEC_DEBUG 1723 1.2 matt memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1724 1.2 matt #endif 1725 1.2 matt 1726 1.2 matt if (discard) { 1727 1.2 matt pq3etsec_rx_map_unload(sc, m); 1728 1.2 matt m_freem(m); 1729 1.2 matt } else { 1730 1.2 matt while (m != NULL) { 1731 1.2 matt struct mbuf *m0 = m->m_next; 1732 1.2 matt m->m_next = NULL; 1733 1.2 matt IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1734 1.2 matt m = m0; 1735 1.2 matt } 1736 1.2 matt } 1737 1.2 matt } 1738 1.2 matt 1739 1.2 matt rxq->rxq_mconsumer = NULL; 1740 1.2 matt rxq->rxq_mhead = NULL; 1741 1.2 matt rxq->rxq_mtail = &rxq->rxq_mhead; 1742 1.2 matt rxq->rxq_inuse = 0; 1743 1.2 matt } 1744 1.2 matt 1745 1.2 matt static void 1746 1.2 matt pq3etsec_rxq_reset( 1747 1.2 matt struct pq3etsec_softc *sc, 1748 1.2 matt struct pq3etsec_rxqueue *rxq) 1749 1.2 matt { 1750 1.2 matt /* 1751 1.2 matt * sync all the descriptors 1752 1.2 matt */ 1753 1.2 matt pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1754 1.2 matt rxq->rxq_last - rxq->rxq_first); 1755 1.2 matt 1756 1.2 matt /* 1757 1.2 matt * Make sure we own all descriptors in the ring. 1758 1.2 matt */ 1759 1.2 matt volatile struct rxbd *rxbd; 1760 1.2 matt for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1761 1.2 matt rxbd->rxbd_flags = RXBD_I; 1762 1.2 matt } 1763 1.2 matt 1764 1.2 matt /* 1765 1.2 matt * Last descriptor has the wrap flag. 1766 1.2 matt */ 1767 1.44 msaitoh rxbd->rxbd_flags = RXBD_W | RXBD_I; 1768 1.2 matt 1769 1.2 matt /* 1770 1.2 matt * Reset the producer consumer indexes. 1771 1.2 matt */ 1772 1.2 matt rxq->rxq_consumer = rxq->rxq_first; 1773 1.2 matt rxq->rxq_producer = rxq->rxq_first; 1774 1.2 matt rxq->rxq_inuse = 0; 1775 1.2 matt if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1776 1.2 matt rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1777 1.2 matt 1778 1.44 msaitoh sc->sc_imask |= IEVENT_RXF | IEVENT_BSY; 1779 1.2 matt 1780 1.2 matt /* 1781 1.2 matt * Restart the transmit at the first descriptor 1782 1.2 matt */ 1783 1.2 matt etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1784 1.2 matt } 1785 1.2 matt 1786 1.2 matt static int 1787 1.2 matt pq3etsec_rxq_attach( 1788 1.2 matt struct pq3etsec_softc *sc, 1789 1.2 matt struct pq3etsec_rxqueue *rxq, 1790 1.2 matt u_int qno) 1791 1.2 matt { 1792 1.2 matt size_t map_size = PAGE_SIZE; 1793 1.2 matt size_t desc_count = map_size / sizeof(struct rxbd); 1794 1.2 matt int error; 1795 1.2 matt void *descs; 1796 1.2 matt 1797 1.2 matt error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1798 1.2 matt &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1799 1.2 matt if (error) 1800 1.2 matt return error; 1801 1.2 matt 1802 1.2 matt memset(descs, 0, map_size); 1803 1.2 matt rxq->rxq_first = descs; 1804 1.2 matt rxq->rxq_last = rxq->rxq_first + desc_count; 1805 1.2 matt rxq->rxq_consumer = descs; 1806 1.2 matt rxq->rxq_producer = descs; 1807 1.2 matt 1808 1.2 matt pq3etsec_rxq_purge(sc, rxq, true); 1809 1.2 matt pq3etsec_rxq_reset(sc, rxq); 1810 1.2 matt 1811 1.2 matt rxq->rxq_reg_rbase = RBASEn(qno); 1812 1.2 matt rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1813 1.2 matt 1814 1.2 matt return 0; 1815 1.2 matt } 1816 1.2 matt 1817 1.2 matt static bool 1818 1.2 matt pq3etsec_txq_active_p( 1819 1.2 matt struct pq3etsec_softc * const sc, 1820 1.2 matt struct pq3etsec_txqueue *txq) 1821 1.2 matt { 1822 1.2 matt return !IF_IS_EMPTY(&txq->txq_mbufs); 1823 1.2 matt } 1824 1.2 matt 1825 1.2 matt static bool 1826 1.2 matt pq3etsec_txq_fillable_p( 1827 1.2 matt struct pq3etsec_softc * const sc, 1828 1.2 matt struct pq3etsec_txqueue *txq) 1829 1.2 matt { 1830 1.2 matt return txq->txq_free >= txq->txq_threshold; 1831 1.2 matt } 1832 1.2 matt 1833 1.2 matt static int 1834 1.2 matt pq3etsec_txq_attach( 1835 1.2 matt struct pq3etsec_softc *sc, 1836 1.2 matt struct pq3etsec_txqueue *txq, 1837 1.2 matt u_int qno) 1838 1.2 matt { 1839 1.2 matt size_t map_size = PAGE_SIZE; 1840 1.2 matt size_t desc_count = map_size / sizeof(struct txbd); 1841 1.2 matt int error; 1842 1.2 matt void *descs; 1843 1.2 matt 1844 1.2 matt error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1845 1.2 matt &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1846 1.2 matt if (error) 1847 1.2 matt return error; 1848 1.2 matt 1849 1.2 matt memset(descs, 0, map_size); 1850 1.2 matt txq->txq_first = descs; 1851 1.2 matt txq->txq_last = txq->txq_first + desc_count; 1852 1.2 matt txq->txq_consumer = descs; 1853 1.2 matt txq->txq_producer = descs; 1854 1.2 matt 1855 1.2 matt IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1856 1.2 matt 1857 1.2 matt txq->txq_reg_tbase = TBASEn(qno); 1858 1.2 matt txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1859 1.2 matt 1860 1.2 matt pq3etsec_txq_reset(sc, txq); 1861 1.2 matt 1862 1.2 matt return 0; 1863 1.2 matt } 1864 1.2 matt 1865 1.2 matt static int 1866 1.2 matt pq3etsec_txq_map_load( 1867 1.2 matt struct pq3etsec_softc *sc, 1868 1.2 matt struct pq3etsec_txqueue *txq, 1869 1.2 matt struct mbuf *m) 1870 1.2 matt { 1871 1.2 matt bus_dmamap_t map; 1872 1.2 matt int error; 1873 1.2 matt 1874 1.2 matt map = M_GETCTX(m, bus_dmamap_t); 1875 1.2 matt if (map != NULL) 1876 1.2 matt return 0; 1877 1.2 matt 1878 1.2 matt map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1879 1.2 matt if (map == NULL) 1880 1.2 matt return ENOMEM; 1881 1.2 matt 1882 1.2 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1883 1.2 matt BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1884 1.2 matt if (error) 1885 1.2 matt return error; 1886 1.2 matt 1887 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1888 1.2 matt BUS_DMASYNC_PREWRITE); 1889 1.2 matt M_SETCTX(m, map); 1890 1.2 matt return 0; 1891 1.2 matt } 1892 1.2 matt 1893 1.2 matt static void 1894 1.2 matt pq3etsec_txq_map_unload( 1895 1.2 matt struct pq3etsec_softc *sc, 1896 1.2 matt struct pq3etsec_txqueue *txq, 1897 1.2 matt struct mbuf *m) 1898 1.2 matt { 1899 1.2 matt KASSERT(m); 1900 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1901 1.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1902 1.2 matt BUS_DMASYNC_POSTWRITE); 1903 1.2 matt bus_dmamap_unload(sc->sc_dmat, map); 1904 1.2 matt pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1905 1.2 matt } 1906 1.2 matt 1907 1.2 matt static bool 1908 1.2 matt pq3etsec_txq_produce( 1909 1.2 matt struct pq3etsec_softc *sc, 1910 1.2 matt struct pq3etsec_txqueue *txq, 1911 1.2 matt struct mbuf *m) 1912 1.2 matt { 1913 1.2 matt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1914 1.2 matt 1915 1.2 matt if (map->dm_nsegs > txq->txq_free) 1916 1.2 matt return false; 1917 1.2 matt 1918 1.2 matt /* 1919 1.2 matt * TCP Offload flag must be set in the first descriptor. 1920 1.2 matt */ 1921 1.2 matt volatile struct txbd *producer = txq->txq_producer; 1922 1.2 matt uint16_t last_flags = TXBD_L; 1923 1.2 matt uint16_t first_flags = TXBD_R 1924 1.2 matt | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1925 1.2 matt 1926 1.2 matt /* 1927 1.2 matt * If we've produced enough descriptors without consuming any 1928 1.2 matt * we need to ask for an interrupt to reclaim some. 1929 1.2 matt */ 1930 1.2 matt txq->txq_lastintr += map->dm_nsegs; 1931 1.25 nonaka if (ETSEC_IC_TX_ENABLED(sc) 1932 1.25 nonaka || txq->txq_lastintr >= txq->txq_threshold 1933 1.2 matt || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1934 1.2 matt txq->txq_lastintr = 0; 1935 1.2 matt last_flags |= TXBD_I; 1936 1.2 matt } 1937 1.2 matt 1938 1.2 matt #ifdef ETSEC_DEBUG 1939 1.2 matt KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1940 1.2 matt #endif 1941 1.2 matt KASSERT(producer != txq->txq_last); 1942 1.2 matt producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1943 1.2 matt producer->txbd_len = map->dm_segs[0].ds_len; 1944 1.2 matt 1945 1.2 matt if (map->dm_nsegs > 1) { 1946 1.2 matt volatile struct txbd *start = producer + 1; 1947 1.2 matt size_t count = map->dm_nsegs - 1; 1948 1.2 matt for (u_int i = 1; i < map->dm_nsegs; i++) { 1949 1.2 matt if (__predict_false(++producer == txq->txq_last)) { 1950 1.2 matt producer = txq->txq_first; 1951 1.2 matt if (start < txq->txq_last) { 1952 1.2 matt pq3etsec_txq_desc_presync(sc, txq, 1953 1.2 matt start, txq->txq_last - start); 1954 1.2 matt count -= txq->txq_last - start; 1955 1.2 matt } 1956 1.2 matt start = txq->txq_first; 1957 1.2 matt } 1958 1.2 matt #ifdef ETSEC_DEBUG 1959 1.2 matt KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1960 1.2 matt #endif 1961 1.2 matt producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1962 1.2 matt producer->txbd_len = map->dm_segs[i].ds_len; 1963 1.2 matt producer->txbd_flags = TXBD_R 1964 1.2 matt | (producer->txbd_flags & TXBD_W) 1965 1.2 matt | (i == map->dm_nsegs - 1 ? last_flags : 0); 1966 1.2 matt #if 0 1967 1.2 matt printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1968 1.2 matt producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1969 1.2 matt #endif 1970 1.2 matt } 1971 1.2 matt pq3etsec_txq_desc_presync(sc, txq, start, count); 1972 1.2 matt } else { 1973 1.2 matt first_flags |= last_flags; 1974 1.2 matt } 1975 1.2 matt 1976 1.2 matt membar_producer(); 1977 1.2 matt txq->txq_producer->txbd_flags = 1978 1.2 matt first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1979 1.2 matt #if 0 1980 1.2 matt printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1981 1.2 matt txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1982 1.2 matt txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1983 1.2 matt #endif 1984 1.2 matt pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1985 1.2 matt 1986 1.2 matt /* 1987 1.2 matt * Reduce free count by the number of segments we consumed. 1988 1.2 matt */ 1989 1.2 matt txq->txq_free -= map->dm_nsegs; 1990 1.2 matt KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1991 1.2 matt KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1992 1.2 matt KASSERT(producer->txbd_flags & TXBD_L); 1993 1.2 matt #ifdef ETSEC_DEBUG 1994 1.2 matt txq->txq_lmbufs[producer - txq->txq_first] = m; 1995 1.2 matt #endif 1996 1.2 matt 1997 1.2 matt #if 0 1998 1.2 matt printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1999 1.2 matt __func__, m, m->m_pkthdr.len, map->dm_nsegs, 2000 1.2 matt txq->txq_producer - txq->txq_first, producer - txq->txq_first); 2001 1.2 matt #endif 2002 1.2 matt 2003 1.2 matt if (++producer == txq->txq_last) 2004 1.2 matt txq->txq_producer = txq->txq_first; 2005 1.2 matt else 2006 1.2 matt txq->txq_producer = producer; 2007 1.2 matt IF_ENQUEUE(&txq->txq_mbufs, m); 2008 1.2 matt 2009 1.2 matt /* 2010 1.2 matt * Restart the transmitter. 2011 1.2 matt */ 2012 1.2 matt etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 2013 1.2 matt 2014 1.2 matt return true; 2015 1.2 matt } 2016 1.2 matt 2017 1.2 matt static void 2018 1.2 matt pq3etsec_tx_offload( 2019 1.2 matt struct pq3etsec_softc *sc, 2020 1.2 matt struct pq3etsec_txqueue *txq, 2021 1.2 matt struct mbuf **mp) 2022 1.2 matt { 2023 1.2 matt struct mbuf *m = *mp; 2024 1.2 matt u_int csum_flags = m->m_pkthdr.csum_flags; 2025 1.30 knakahar bool have_vtag; 2026 1.30 knakahar uint16_t vtag; 2027 1.2 matt 2028 1.2 matt KASSERT(m->m_flags & M_PKTHDR); 2029 1.2 matt 2030 1.30 knakahar have_vtag = vlan_has_tag(m); 2031 1.31 knakahar vtag = (have_vtag) ? vlan_get_tag(m) : 0; 2032 1.30 knakahar 2033 1.2 matt /* 2034 1.2 matt * Let see if we are doing any offload first. 2035 1.2 matt */ 2036 1.30 knakahar if (csum_flags == 0 && !have_vtag) { 2037 1.2 matt m->m_flags &= ~M_HASFCB; 2038 1.2 matt return; 2039 1.2 matt } 2040 1.2 matt 2041 1.2 matt uint16_t flags = 0; 2042 1.2 matt if (csum_flags & M_CSUM_IP) { 2043 1.2 matt flags |= TXFCB_IP 2044 1.2 matt | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2045 1.2 matt | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2046 1.2 matt | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2047 1.2 matt | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2048 1.2 matt | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2049 1.2 matt } 2050 1.30 knakahar if (have_vtag) { 2051 1.2 matt flags |= TXFCB_VLN; 2052 1.2 matt } 2053 1.2 matt if (flags == 0) { 2054 1.2 matt m->m_flags &= ~M_HASFCB; 2055 1.2 matt return; 2056 1.2 matt } 2057 1.2 matt 2058 1.2 matt struct txfcb fcb; 2059 1.2 matt fcb.txfcb_flags = flags; 2060 1.2 matt if (csum_flags & M_CSUM_IPv4) 2061 1.2 matt fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2062 1.2 matt else 2063 1.33 maxv fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2064 1.2 matt fcb.txfcb_l3os = ETHER_HDR_LEN; 2065 1.2 matt fcb.txfcb_phcs = 0; 2066 1.31 knakahar fcb.txfcb_vlctl = vtag; 2067 1.2 matt 2068 1.2 matt #if 0 2069 1.2 matt printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2070 1.2 matt __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2071 1.2 matt fcb.txfcb_phcs, fcb.txfcb_vlctl); 2072 1.2 matt #endif 2073 1.2 matt 2074 1.2 matt if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2075 1.2 matt m->m_data -= sizeof(fcb); 2076 1.2 matt m->m_len += sizeof(fcb); 2077 1.2 matt } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2078 1.2 matt memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2079 1.2 matt m->m_data = m->m_pktdat; 2080 1.2 matt m->m_len += sizeof(fcb); 2081 1.2 matt } else { 2082 1.2 matt struct mbuf *mn; 2083 1.2 matt MGET(mn, M_DONTWAIT, m->m_type); 2084 1.2 matt if (mn == NULL) { 2085 1.2 matt if (csum_flags & M_CSUM_IP4) { 2086 1.2 matt #ifdef INET 2087 1.35 maxv in_undefer_cksum(m, ETHER_HDR_LEN, 2088 1.2 matt csum_flags & M_CSUM_IP4); 2089 1.2 matt #else 2090 1.2 matt panic("%s: impossible M_CSUM flags %#x", 2091 1.2 matt device_xname(sc->sc_dev), csum_flags); 2092 1.2 matt #endif 2093 1.2 matt } else if (csum_flags & M_CSUM_IP6) { 2094 1.2 matt #ifdef INET6 2095 1.36 maxv in6_undefer_cksum(m, ETHER_HDR_LEN, 2096 1.2 matt csum_flags & M_CSUM_IP6); 2097 1.2 matt #else 2098 1.2 matt panic("%s: impossible M_CSUM flags %#x", 2099 1.2 matt device_xname(sc->sc_dev), csum_flags); 2100 1.2 matt #endif 2101 1.2 matt } 2102 1.2 matt 2103 1.2 matt m->m_flags &= ~M_HASFCB; 2104 1.2 matt return; 2105 1.2 matt } 2106 1.2 matt 2107 1.38 maxv m_move_pkthdr(mn, m); 2108 1.2 matt mn->m_next = m; 2109 1.2 matt m = mn; 2110 1.39 maxv m_align(m, sizeof(fcb)); 2111 1.2 matt m->m_len = sizeof(fcb); 2112 1.2 matt *mp = m; 2113 1.2 matt } 2114 1.2 matt m->m_pkthdr.len += sizeof(fcb); 2115 1.2 matt m->m_flags |= M_HASFCB; 2116 1.2 matt *mtod(m, struct txfcb *) = fcb; 2117 1.2 matt return; 2118 1.2 matt } 2119 1.2 matt 2120 1.2 matt static bool 2121 1.2 matt pq3etsec_txq_enqueue( 2122 1.2 matt struct pq3etsec_softc *sc, 2123 1.2 matt struct pq3etsec_txqueue *txq) 2124 1.2 matt { 2125 1.2 matt for (;;) { 2126 1.2 matt if (IF_QFULL(&txq->txq_mbufs)) 2127 1.2 matt return false; 2128 1.2 matt struct mbuf *m = txq->txq_next; 2129 1.2 matt if (m == NULL) { 2130 1.2 matt int s = splnet(); 2131 1.20 nonaka IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2132 1.2 matt splx(s); 2133 1.2 matt if (m == NULL) 2134 1.2 matt return true; 2135 1.2 matt M_SETCTX(m, NULL); 2136 1.2 matt pq3etsec_tx_offload(sc, txq, &m); 2137 1.2 matt } else { 2138 1.2 matt txq->txq_next = NULL; 2139 1.2 matt } 2140 1.2 matt int error = pq3etsec_txq_map_load(sc, txq, m); 2141 1.2 matt if (error) { 2142 1.2 matt aprint_error_dev(sc->sc_dev, 2143 1.2 matt "discarded packet due to " 2144 1.2 matt "dmamap load failure: %d\n", error); 2145 1.2 matt m_freem(m); 2146 1.2 matt continue; 2147 1.2 matt } 2148 1.2 matt KASSERT(txq->txq_next == NULL); 2149 1.2 matt if (!pq3etsec_txq_produce(sc, txq, m)) { 2150 1.2 matt txq->txq_next = m; 2151 1.2 matt return false; 2152 1.2 matt } 2153 1.2 matt KASSERT(txq->txq_next == NULL); 2154 1.2 matt } 2155 1.2 matt } 2156 1.2 matt 2157 1.2 matt static bool 2158 1.2 matt pq3etsec_txq_consume( 2159 1.2 matt struct pq3etsec_softc *sc, 2160 1.2 matt struct pq3etsec_txqueue *txq) 2161 1.2 matt { 2162 1.2 matt struct ifnet * const ifp = &sc->sc_if; 2163 1.2 matt volatile struct txbd *consumer = txq->txq_consumer; 2164 1.2 matt size_t txfree = 0; 2165 1.53 rin bool ret; 2166 1.2 matt 2167 1.2 matt #if 0 2168 1.2 matt printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2169 1.2 matt #endif 2170 1.2 matt etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2171 1.2 matt 2172 1.2 matt for (;;) { 2173 1.2 matt if (consumer == txq->txq_producer) { 2174 1.2 matt txq->txq_consumer = consumer; 2175 1.2 matt txq->txq_free += txfree; 2176 1.37 riastrad txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2177 1.2 matt KASSERT(txq->txq_lastintr == 0); 2178 1.53 rin KASSERT(txq->txq_free == 2179 1.53 rin txq->txq_last - txq->txq_first - 1); 2180 1.53 rin ret = true; 2181 1.53 rin break; 2182 1.2 matt } 2183 1.2 matt pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2184 1.2 matt const uint16_t txbd_flags = consumer->txbd_flags; 2185 1.2 matt if (txbd_flags & TXBD_R) { 2186 1.2 matt txq->txq_consumer = consumer; 2187 1.2 matt txq->txq_free += txfree; 2188 1.37 riastrad txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2189 1.53 rin ret = pq3etsec_txq_fillable_p(sc, txq); 2190 1.53 rin break; 2191 1.2 matt } 2192 1.2 matt 2193 1.2 matt /* 2194 1.2 matt * If this is the last descriptor in the chain, get the 2195 1.2 matt * mbuf, free its dmamap, and free the mbuf chain itself. 2196 1.2 matt */ 2197 1.2 matt if (txbd_flags & TXBD_L) { 2198 1.2 matt struct mbuf *m; 2199 1.2 matt 2200 1.2 matt IF_DEQUEUE(&txq->txq_mbufs, m); 2201 1.2 matt #ifdef ETSEC_DEBUG 2202 1.8 jym KASSERTMSG( 2203 1.8 jym m == txq->txq_lmbufs[consumer-txq->txq_first], 2204 1.8 jym "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2205 1.8 jym __func__, consumer, consumer - txq->txq_first, 2206 1.8 jym txbd_flags, m, 2207 1.8 jym &txq->txq_lmbufs[consumer-txq->txq_first], 2208 1.8 jym txq->txq_lmbufs[consumer-txq->txq_first]); 2209 1.2 matt #endif 2210 1.2 matt KASSERT(m); 2211 1.2 matt pq3etsec_txq_map_unload(sc, txq, m); 2212 1.2 matt #if 0 2213 1.2 matt printf("%s: mbuf %p: consumed a %u byte packet\n", 2214 1.2 matt __func__, m, m->m_pkthdr.len); 2215 1.2 matt #endif 2216 1.2 matt if (m->m_flags & M_HASFCB) 2217 1.2 matt m_adj(m, sizeof(struct txfcb)); 2218 1.34 msaitoh bpf_mtap(ifp, m, BPF_D_OUT); 2219 1.49 martin net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2220 1.60 riastrad if_statinc_ref(ifp, nsr, if_opackets); 2221 1.60 riastrad if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len); 2222 1.2 matt if (m->m_flags & M_MCAST) 2223 1.60 riastrad if_statinc_ref(ifp, nsr, if_omcasts); 2224 1.2 matt if (txbd_flags & TXBD_ERRORS) 2225 1.60 riastrad if_statinc_ref(ifp, nsr, if_oerrors); 2226 1.48 thorpej IF_STAT_PUTREF(ifp); 2227 1.2 matt m_freem(m); 2228 1.2 matt #ifdef ETSEC_DEBUG 2229 1.2 matt txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2230 1.2 matt #endif 2231 1.2 matt } else { 2232 1.2 matt #ifdef ETSEC_DEBUG 2233 1.2 matt KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2234 1.2 matt #endif 2235 1.2 matt } 2236 1.2 matt 2237 1.2 matt /* 2238 1.2 matt * We own this packet again. Clear all flags except wrap. 2239 1.2 matt */ 2240 1.2 matt txfree++; 2241 1.2 matt //consumer->txbd_flags = txbd_flags & TXBD_W; 2242 1.2 matt 2243 1.2 matt /* 2244 1.2 matt * Wrap at the last entry! 2245 1.2 matt */ 2246 1.2 matt if (txbd_flags & TXBD_W) { 2247 1.2 matt KASSERT(consumer + 1 == txq->txq_last); 2248 1.2 matt consumer = txq->txq_first; 2249 1.2 matt } else { 2250 1.2 matt consumer++; 2251 1.2 matt KASSERT(consumer < txq->txq_last); 2252 1.2 matt } 2253 1.2 matt } 2254 1.52 rin 2255 1.52 rin if (txfree != 0) 2256 1.52 rin rnd_add_uint32(&sc->rnd_source, txfree); 2257 1.53 rin return ret; 2258 1.2 matt } 2259 1.2 matt 2260 1.2 matt static void 2261 1.2 matt pq3etsec_txq_purge( 2262 1.2 matt struct pq3etsec_softc *sc, 2263 1.2 matt struct pq3etsec_txqueue *txq) 2264 1.2 matt { 2265 1.2 matt struct mbuf *m; 2266 1.2 matt KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2267 1.2 matt 2268 1.2 matt for (;;) { 2269 1.2 matt IF_DEQUEUE(&txq->txq_mbufs, m); 2270 1.2 matt if (m == NULL) 2271 1.2 matt break; 2272 1.2 matt pq3etsec_txq_map_unload(sc, txq, m); 2273 1.2 matt m_freem(m); 2274 1.2 matt } 2275 1.2 matt if ((m = txq->txq_next) != NULL) { 2276 1.2 matt txq->txq_next = NULL; 2277 1.2 matt pq3etsec_txq_map_unload(sc, txq, m); 2278 1.2 matt m_freem(m); 2279 1.2 matt } 2280 1.2 matt #ifdef ETSEC_DEBUG 2281 1.2 matt memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2282 1.2 matt #endif 2283 1.2 matt } 2284 1.2 matt 2285 1.2 matt static void 2286 1.2 matt pq3etsec_txq_reset( 2287 1.2 matt struct pq3etsec_softc *sc, 2288 1.2 matt struct pq3etsec_txqueue *txq) 2289 1.2 matt { 2290 1.2 matt /* 2291 1.2 matt * sync all the descriptors 2292 1.2 matt */ 2293 1.2 matt pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2294 1.2 matt txq->txq_last - txq->txq_first); 2295 1.2 matt 2296 1.2 matt /* 2297 1.2 matt * Make sure we own all descriptors in the ring. 2298 1.2 matt */ 2299 1.2 matt volatile struct txbd *txbd; 2300 1.2 matt for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2301 1.2 matt txbd->txbd_flags = 0; 2302 1.2 matt } 2303 1.2 matt 2304 1.2 matt /* 2305 1.2 matt * Last descriptor has the wrap flag. 2306 1.2 matt */ 2307 1.2 matt txbd->txbd_flags = TXBD_W; 2308 1.2 matt 2309 1.2 matt /* 2310 1.2 matt * Reset the producer consumer indexes. 2311 1.2 matt */ 2312 1.2 matt txq->txq_consumer = txq->txq_first; 2313 1.2 matt txq->txq_producer = txq->txq_first; 2314 1.2 matt txq->txq_free = txq->txq_last - txq->txq_first - 1; 2315 1.2 matt txq->txq_threshold = txq->txq_free / 2; 2316 1.2 matt txq->txq_lastintr = 0; 2317 1.2 matt 2318 1.2 matt /* 2319 1.2 matt * What do we want to get interrupted on? 2320 1.2 matt */ 2321 1.44 msaitoh sc->sc_imask |= IEVENT_TXF | IEVENT_TXE; 2322 1.2 matt 2323 1.2 matt /* 2324 1.2 matt * Restart the transmit at the first descriptor 2325 1.2 matt */ 2326 1.2 matt etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2327 1.2 matt } 2328 1.2 matt 2329 1.2 matt static void 2330 1.2 matt pq3etsec_ifstart(struct ifnet *ifp) 2331 1.2 matt { 2332 1.2 matt struct pq3etsec_softc * const sc = ifp->if_softc; 2333 1.2 matt 2334 1.58 thorpej if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 2335 1.23 nonaka return; 2336 1.23 nonaka } 2337 1.23 nonaka 2338 1.2 matt atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2339 1.2 matt softint_schedule(sc->sc_soft_ih); 2340 1.2 matt } 2341 1.2 matt 2342 1.2 matt static void 2343 1.2 matt pq3etsec_tx_error( 2344 1.2 matt struct pq3etsec_softc * const sc) 2345 1.2 matt { 2346 1.2 matt struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2347 1.2 matt 2348 1.2 matt pq3etsec_txq_consume(sc, txq); 2349 1.2 matt 2350 1.44 msaitoh if (sc->sc_txerrors 2351 1.44 msaitoh & (IEVENT_LC | IEVENT_CRL | IEVENT_XFUN | IEVENT_BABT)) { 2352 1.2 matt } else if (sc->sc_txerrors & IEVENT_EBERR) { 2353 1.2 matt } 2354 1.2 matt 2355 1.2 matt if (pq3etsec_txq_active_p(sc, txq)) 2356 1.2 matt etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2357 1.2 matt if (!pq3etsec_txq_enqueue(sc, txq)) { 2358 1.2 matt sc->sc_ev_tx_stall.ev_count++; 2359 1.2 matt } 2360 1.2 matt 2361 1.2 matt sc->sc_txerrors = 0; 2362 1.2 matt } 2363 1.2 matt 2364 1.2 matt int 2365 1.2 matt pq3etsec_tx_intr(void *arg) 2366 1.2 matt { 2367 1.2 matt struct pq3etsec_softc * const sc = arg; 2368 1.2 matt 2369 1.23 nonaka mutex_enter(sc->sc_hwlock); 2370 1.23 nonaka 2371 1.2 matt sc->sc_ev_tx_intr.ev_count++; 2372 1.2 matt 2373 1.2 matt uint32_t ievent = etsec_read(sc, IEVENT); 2374 1.44 msaitoh ievent &= IEVENT_TXF | IEVENT_TXB; 2375 1.2 matt etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2376 1.2 matt 2377 1.2 matt #if 0 2378 1.43 msaitoh aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2379 1.2 matt __func__, ievent, etsec_read(sc, IMASK)); 2380 1.2 matt #endif 2381 1.2 matt 2382 1.23 nonaka if (ievent == 0) { 2383 1.23 nonaka mutex_exit(sc->sc_hwlock); 2384 1.2 matt return 0; 2385 1.23 nonaka } 2386 1.2 matt 2387 1.44 msaitoh sc->sc_imask &= ~(IEVENT_TXF | IEVENT_TXB); 2388 1.2 matt atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2389 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2390 1.2 matt softint_schedule(sc->sc_soft_ih); 2391 1.23 nonaka 2392 1.23 nonaka mutex_exit(sc->sc_hwlock); 2393 1.23 nonaka 2394 1.2 matt return 1; 2395 1.2 matt } 2396 1.2 matt 2397 1.2 matt int 2398 1.2 matt pq3etsec_rx_intr(void *arg) 2399 1.2 matt { 2400 1.2 matt struct pq3etsec_softc * const sc = arg; 2401 1.2 matt 2402 1.23 nonaka mutex_enter(sc->sc_hwlock); 2403 1.23 nonaka 2404 1.2 matt sc->sc_ev_rx_intr.ev_count++; 2405 1.2 matt 2406 1.2 matt uint32_t ievent = etsec_read(sc, IEVENT); 2407 1.44 msaitoh ievent &= IEVENT_RXF | IEVENT_RXB; 2408 1.2 matt etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2409 1.23 nonaka if (ievent == 0) { 2410 1.23 nonaka mutex_exit(sc->sc_hwlock); 2411 1.2 matt return 0; 2412 1.23 nonaka } 2413 1.2 matt 2414 1.2 matt #if 0 2415 1.2 matt aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2416 1.2 matt #endif 2417 1.2 matt 2418 1.44 msaitoh sc->sc_imask &= ~(IEVENT_RXF | IEVENT_RXB); 2419 1.2 matt atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2420 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2421 1.2 matt softint_schedule(sc->sc_soft_ih); 2422 1.23 nonaka 2423 1.23 nonaka mutex_exit(sc->sc_hwlock); 2424 1.23 nonaka 2425 1.2 matt return 1; 2426 1.2 matt } 2427 1.2 matt 2428 1.2 matt int 2429 1.2 matt pq3etsec_error_intr(void *arg) 2430 1.2 matt { 2431 1.2 matt struct pq3etsec_softc * const sc = arg; 2432 1.2 matt 2433 1.23 nonaka mutex_enter(sc->sc_hwlock); 2434 1.23 nonaka 2435 1.2 matt sc->sc_ev_error_intr.ev_count++; 2436 1.2 matt 2437 1.2 matt for (int rv = 0, soft_flags = 0;; rv = 1) { 2438 1.2 matt uint32_t ievent = etsec_read(sc, IEVENT); 2439 1.44 msaitoh ievent &= ~(IEVENT_RXF | IEVENT_RXB | IEVENT_TXF | IEVENT_TXB); 2440 1.2 matt etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2441 1.2 matt if (ievent == 0) { 2442 1.2 matt if (soft_flags) { 2443 1.2 matt atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2444 1.2 matt softint_schedule(sc->sc_soft_ih); 2445 1.2 matt } 2446 1.23 nonaka mutex_exit(sc->sc_hwlock); 2447 1.2 matt return rv; 2448 1.2 matt } 2449 1.2 matt #if 0 2450 1.43 msaitoh aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2451 1.2 matt __func__, ievent, etsec_read(sc, IMASK)); 2452 1.2 matt #endif 2453 1.2 matt 2454 1.44 msaitoh if (ievent & (IEVENT_GRSC | IEVENT_GTSC)) { 2455 1.44 msaitoh sc->sc_imask &= ~(IEVENT_GRSC | IEVENT_GTSC); 2456 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2457 1.2 matt wakeup(sc); 2458 1.2 matt } 2459 1.44 msaitoh if (ievent & (IEVENT_MMRD | IEVENT_MMWR)) { 2460 1.44 msaitoh sc->sc_imask &= ~(IEVENT_MMRD | IEVENT_MMWR); 2461 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2462 1.2 matt wakeup(&sc->sc_mii); 2463 1.2 matt } 2464 1.2 matt if (ievent & IEVENT_BSY) { 2465 1.2 matt soft_flags |= SOFT_RXBSY; 2466 1.2 matt sc->sc_imask &= ~IEVENT_BSY; 2467 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2468 1.2 matt } 2469 1.2 matt if (ievent & IEVENT_TXE) { 2470 1.2 matt soft_flags |= SOFT_TXERROR; 2471 1.2 matt sc->sc_imask &= ~IEVENT_TXE; 2472 1.2 matt sc->sc_txerrors |= ievent; 2473 1.2 matt } 2474 1.2 matt if (ievent & IEVENT_TXC) { 2475 1.2 matt sc->sc_ev_tx_pause.ev_count++; 2476 1.2 matt } 2477 1.2 matt if (ievent & IEVENT_RXC) { 2478 1.2 matt sc->sc_ev_rx_pause.ev_count++; 2479 1.2 matt } 2480 1.2 matt if (ievent & IEVENT_DPE) { 2481 1.2 matt soft_flags |= SOFT_RESET; 2482 1.2 matt sc->sc_imask &= ~IEVENT_DPE; 2483 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2484 1.2 matt } 2485 1.2 matt } 2486 1.2 matt } 2487 1.2 matt 2488 1.2 matt void 2489 1.2 matt pq3etsec_soft_intr(void *arg) 2490 1.2 matt { 2491 1.2 matt struct pq3etsec_softc * const sc = arg; 2492 1.2 matt struct ifnet * const ifp = &sc->sc_if; 2493 1.23 nonaka uint32_t imask = 0; 2494 1.2 matt 2495 1.2 matt mutex_enter(sc->sc_lock); 2496 1.2 matt 2497 1.2 matt u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2498 1.2 matt 2499 1.2 matt sc->sc_ev_soft_intr.ev_count++; 2500 1.2 matt 2501 1.2 matt if (soft_flags & SOFT_RESET) { 2502 1.2 matt int s = splnet(); 2503 1.2 matt pq3etsec_ifinit(ifp); 2504 1.2 matt splx(s); 2505 1.2 matt soft_flags = 0; 2506 1.2 matt } 2507 1.2 matt 2508 1.2 matt if (soft_flags & SOFT_RXBSY) { 2509 1.2 matt struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2510 1.2 matt size_t threshold = 5 * rxq->rxq_threshold / 4; 2511 1.2 matt if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2512 1.2 matt threshold = rxq->rxq_last - rxq->rxq_first - 1; 2513 1.2 matt } else { 2514 1.23 nonaka imask |= IEVENT_BSY; 2515 1.2 matt } 2516 1.2 matt aprint_normal_dev(sc->sc_dev, 2517 1.2 matt "increasing receive buffers from %zu to %zu\n", 2518 1.2 matt rxq->rxq_threshold, threshold); 2519 1.2 matt rxq->rxq_threshold = threshold; 2520 1.2 matt } 2521 1.2 matt 2522 1.2 matt if ((soft_flags & SOFT_TXINTR) 2523 1.2 matt || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2524 1.2 matt /* 2525 1.2 matt * Let's do what we came here for. Consume transmitted 2526 1.41 msaitoh * packets off the transmit ring. 2527 1.2 matt */ 2528 1.2 matt if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2529 1.2 matt || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2530 1.2 matt sc->sc_ev_tx_stall.ev_count++; 2531 1.2 matt } 2532 1.23 nonaka imask |= IEVENT_TXF; 2533 1.2 matt } 2534 1.2 matt 2535 1.44 msaitoh if (soft_flags & (SOFT_RXINTR | SOFT_RXBSY)) { 2536 1.43 msaitoh /* Let's consume */ 2537 1.2 matt pq3etsec_rxq_consume(sc, &sc->sc_rxq); 2538 1.23 nonaka imask |= IEVENT_RXF; 2539 1.2 matt } 2540 1.2 matt 2541 1.2 matt if (soft_flags & SOFT_TXERROR) { 2542 1.2 matt pq3etsec_tx_error(sc); 2543 1.23 nonaka imask |= IEVENT_TXE; 2544 1.2 matt } 2545 1.2 matt 2546 1.2 matt if (ifp->if_flags & IFF_RUNNING) { 2547 1.2 matt pq3etsec_rxq_produce(sc, &sc->sc_rxq); 2548 1.23 nonaka mutex_spin_enter(sc->sc_hwlock); 2549 1.23 nonaka sc->sc_imask |= imask; 2550 1.2 matt etsec_write(sc, IMASK, sc->sc_imask); 2551 1.23 nonaka mutex_spin_exit(sc->sc_hwlock); 2552 1.2 matt } else { 2553 1.2 matt KASSERT((soft_flags & SOFT_RXBSY) == 0); 2554 1.2 matt } 2555 1.2 matt 2556 1.2 matt mutex_exit(sc->sc_lock); 2557 1.2 matt } 2558 1.2 matt 2559 1.2 matt static void 2560 1.2 matt pq3etsec_mii_tick(void *arg) 2561 1.2 matt { 2562 1.2 matt struct pq3etsec_softc * const sc = arg; 2563 1.2 matt mutex_enter(sc->sc_lock); 2564 1.2 matt callout_ack(&sc->sc_mii_callout); 2565 1.2 matt sc->sc_ev_mii_ticks.ev_count++; 2566 1.2 matt #ifdef DEBUG 2567 1.2 matt uint64_t now = mftb(); 2568 1.2 matt if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2569 1.2 matt aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2570 1.2 matt __func__, now - sc->sc_mii_last_tick); 2571 1.2 matt callout_stop(&sc->sc_mii_callout); 2572 1.2 matt } 2573 1.2 matt #endif 2574 1.2 matt mii_tick(&sc->sc_mii); 2575 1.2 matt int s = splnet(); 2576 1.2 matt if (sc->sc_soft_flags & SOFT_RESET) 2577 1.2 matt softint_schedule(sc->sc_soft_ih); 2578 1.2 matt splx(s); 2579 1.2 matt callout_schedule(&sc->sc_mii_callout, hz); 2580 1.6 matt #ifdef DEBUG 2581 1.2 matt sc->sc_mii_last_tick = now; 2582 1.6 matt #endif 2583 1.2 matt mutex_exit(sc->sc_lock); 2584 1.2 matt } 2585 1.25 nonaka 2586 1.25 nonaka static void 2587 1.25 nonaka pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 2588 1.25 nonaka { 2589 1.25 nonaka uint32_t reg; 2590 1.25 nonaka 2591 1.25 nonaka if (ETSEC_IC_RX_ENABLED(sc)) { 2592 1.25 nonaka reg = RXIC_ICEN; 2593 1.25 nonaka reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 2594 1.25 nonaka reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 2595 1.25 nonaka } else { 2596 1.25 nonaka /* Disable RX interrupt coalescing */ 2597 1.25 nonaka reg = 0; 2598 1.25 nonaka } 2599 1.25 nonaka 2600 1.25 nonaka etsec_write(sc, RXIC, reg); 2601 1.25 nonaka } 2602 1.25 nonaka 2603 1.25 nonaka static void 2604 1.25 nonaka pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 2605 1.25 nonaka { 2606 1.25 nonaka uint32_t reg; 2607 1.25 nonaka 2608 1.25 nonaka if (ETSEC_IC_TX_ENABLED(sc)) { 2609 1.25 nonaka reg = TXIC_ICEN; 2610 1.25 nonaka reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 2611 1.25 nonaka reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 2612 1.25 nonaka } else { 2613 1.25 nonaka /* Disable TX interrupt coalescing */ 2614 1.25 nonaka reg = 0; 2615 1.25 nonaka } 2616 1.25 nonaka 2617 1.25 nonaka etsec_write(sc, TXIC, reg); 2618 1.25 nonaka } 2619 1.25 nonaka 2620 1.25 nonaka /* 2621 1.25 nonaka * sysctl 2622 1.25 nonaka */ 2623 1.25 nonaka static int 2624 1.25 nonaka pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 2625 1.25 nonaka { 2626 1.25 nonaka struct sysctlnode node = *rnode; 2627 1.25 nonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 2628 1.25 nonaka int value = *valuep; 2629 1.25 nonaka int error; 2630 1.25 nonaka 2631 1.25 nonaka node.sysctl_data = &value; 2632 1.25 nonaka error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2633 1.25 nonaka if (error != 0 || newp == NULL) 2634 1.25 nonaka return error; 2635 1.25 nonaka 2636 1.25 nonaka if (value < 0 || value > 65535) 2637 1.25 nonaka return EINVAL; 2638 1.25 nonaka 2639 1.25 nonaka mutex_enter(sc->sc_lock); 2640 1.25 nonaka *valuep = value; 2641 1.25 nonaka if (valuep == &sc->sc_ic_rx_time) 2642 1.25 nonaka pq3etsec_set_ic_rx(sc); 2643 1.25 nonaka else 2644 1.25 nonaka pq3etsec_set_ic_tx(sc); 2645 1.25 nonaka mutex_exit(sc->sc_lock); 2646 1.25 nonaka 2647 1.25 nonaka return 0; 2648 1.25 nonaka } 2649 1.25 nonaka 2650 1.25 nonaka static int 2651 1.25 nonaka pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 2652 1.25 nonaka { 2653 1.25 nonaka struct sysctlnode node = *rnode; 2654 1.25 nonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 2655 1.25 nonaka int value = *valuep; 2656 1.25 nonaka int error; 2657 1.25 nonaka 2658 1.25 nonaka node.sysctl_data = &value; 2659 1.25 nonaka error = sysctl_lookup(SYSCTLFN_CALL(&node)); 2660 1.25 nonaka if (error != 0 || newp == NULL) 2661 1.25 nonaka return error; 2662 1.25 nonaka 2663 1.25 nonaka if (value < 0 || value > 255) 2664 1.25 nonaka return EINVAL; 2665 1.25 nonaka 2666 1.25 nonaka mutex_enter(sc->sc_lock); 2667 1.25 nonaka *valuep = value; 2668 1.25 nonaka if (valuep == &sc->sc_ic_rx_count) 2669 1.25 nonaka pq3etsec_set_ic_rx(sc); 2670 1.25 nonaka else 2671 1.25 nonaka pq3etsec_set_ic_tx(sc); 2672 1.25 nonaka mutex_exit(sc->sc_lock); 2673 1.25 nonaka 2674 1.25 nonaka return 0; 2675 1.25 nonaka } 2676 1.25 nonaka 2677 1.25 nonaka static int 2678 1.25 nonaka pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 2679 1.25 nonaka { 2680 1.25 nonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 2681 1.25 nonaka 2682 1.25 nonaka return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2683 1.25 nonaka &sc->sc_ic_rx_time); 2684 1.25 nonaka } 2685 1.25 nonaka 2686 1.25 nonaka static int 2687 1.25 nonaka pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 2688 1.25 nonaka { 2689 1.25 nonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 2690 1.25 nonaka 2691 1.25 nonaka return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2692 1.25 nonaka &sc->sc_ic_rx_count); 2693 1.25 nonaka } 2694 1.25 nonaka 2695 1.25 nonaka static int 2696 1.25 nonaka pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 2697 1.25 nonaka { 2698 1.25 nonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 2699 1.25 nonaka 2700 1.25 nonaka return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 2701 1.25 nonaka &sc->sc_ic_tx_time); 2702 1.25 nonaka } 2703 1.25 nonaka 2704 1.25 nonaka static int 2705 1.25 nonaka pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 2706 1.25 nonaka { 2707 1.25 nonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 2708 1.25 nonaka 2709 1.25 nonaka return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 2710 1.25 nonaka &sc->sc_ic_tx_count); 2711 1.25 nonaka } 2712 1.25 nonaka 2713 1.25 nonaka static void pq3etsec_sysctl_setup(struct sysctllog **clog, 2714 1.25 nonaka struct pq3etsec_softc *sc) 2715 1.25 nonaka { 2716 1.25 nonaka const struct sysctlnode *cnode, *rnode; 2717 1.25 nonaka 2718 1.25 nonaka if (sysctl_createv(clog, 0, NULL, &rnode, 2719 1.25 nonaka CTLFLAG_PERMANENT, 2720 1.25 nonaka CTLTYPE_NODE, device_xname(sc->sc_dev), 2721 1.25 nonaka SYSCTL_DESCR("TSEC interface"), 2722 1.25 nonaka NULL, 0, NULL, 0, 2723 1.25 nonaka CTL_HW, CTL_CREATE, CTL_EOL) != 0) 2724 1.25 nonaka goto bad; 2725 1.25 nonaka 2726 1.25 nonaka if (sysctl_createv(clog, 0, &rnode, &rnode, 2727 1.25 nonaka CTLFLAG_PERMANENT, 2728 1.25 nonaka CTLTYPE_NODE, "int_coal", 2729 1.25 nonaka SYSCTL_DESCR("Interrupts coalescing"), 2730 1.25 nonaka NULL, 0, NULL, 0, 2731 1.25 nonaka CTL_CREATE, CTL_EOL) != 0) 2732 1.25 nonaka goto bad; 2733 1.25 nonaka 2734 1.25 nonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 2735 1.44 msaitoh CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2736 1.25 nonaka CTLTYPE_INT, "rx_time", 2737 1.25 nonaka SYSCTL_DESCR("RX time threshold (0-65535)"), 2738 1.25 nonaka pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 2739 1.25 nonaka CTL_CREATE, CTL_EOL) != 0) 2740 1.25 nonaka goto bad; 2741 1.25 nonaka 2742 1.25 nonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 2743 1.44 msaitoh CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2744 1.25 nonaka CTLTYPE_INT, "rx_count", 2745 1.25 nonaka SYSCTL_DESCR("RX frame count threshold (0-255)"), 2746 1.25 nonaka pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 2747 1.25 nonaka CTL_CREATE, CTL_EOL) != 0) 2748 1.25 nonaka goto bad; 2749 1.25 nonaka 2750 1.25 nonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 2751 1.44 msaitoh CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2752 1.25 nonaka CTLTYPE_INT, "tx_time", 2753 1.25 nonaka SYSCTL_DESCR("TX time threshold (0-65535)"), 2754 1.25 nonaka pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 2755 1.25 nonaka CTL_CREATE, CTL_EOL) != 0) 2756 1.25 nonaka goto bad; 2757 1.25 nonaka 2758 1.25 nonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 2759 1.44 msaitoh CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 2760 1.25 nonaka CTLTYPE_INT, "tx_count", 2761 1.25 nonaka SYSCTL_DESCR("TX frame count threshold (0-255)"), 2762 1.25 nonaka pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 2763 1.25 nonaka CTL_CREATE, CTL_EOL) != 0) 2764 1.25 nonaka goto bad; 2765 1.25 nonaka 2766 1.25 nonaka return; 2767 1.25 nonaka 2768 1.25 nonaka bad: 2769 1.25 nonaka aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 2770 1.25 nonaka } 2771