1 1.81 rin /* $NetBSD: if_nfe.c,v 1.81 2024/07/05 04:31:51 rin Exp $ */ 2 1.31 christos /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ 3 1.1 chs 4 1.1 chs /*- 5 1.31 christos * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini (at) free.fr> 6 1.1 chs * Copyright (c) 2005, 2006 Jonathan Gray <jsg (at) openbsd.org> 7 1.1 chs * 8 1.1 chs * Permission to use, copy, modify, and distribute this software for any 9 1.1 chs * purpose with or without fee is hereby granted, provided that the above 10 1.1 chs * copyright notice and this permission notice appear in all copies. 11 1.1 chs * 12 1.1 chs * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 1.1 chs * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 1.1 chs * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 1.1 chs * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 1.1 chs * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 1.1 chs * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 1.1 chs * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 1.1 chs */ 20 1.1 chs 21 1.1 chs /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 1.1 chs 23 1.1 chs #include <sys/cdefs.h> 24 1.81 rin __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.81 2024/07/05 04:31:51 rin Exp $"); 25 1.1 chs 26 1.1 chs #include "opt_inet.h" 27 1.1 chs #include "vlan.h" 28 1.1 chs 29 1.1 chs #include <sys/param.h> 30 1.1 chs #include <sys/endian.h> 31 1.1 chs #include <sys/systm.h> 32 1.1 chs #include <sys/types.h> 33 1.1 chs #include <sys/sockio.h> 34 1.1 chs #include <sys/mbuf.h> 35 1.34 cube #include <sys/mutex.h> 36 1.1 chs #include <sys/queue.h> 37 1.1 chs #include <sys/kernel.h> 38 1.1 chs #include <sys/device.h> 39 1.31 christos #include <sys/callout.h> 40 1.1 chs #include <sys/socket.h> 41 1.1 chs 42 1.20 ad #include <sys/bus.h> 43 1.1 chs 44 1.1 chs #include <net/if.h> 45 1.1 chs #include <net/if_dl.h> 46 1.1 chs #include <net/if_media.h> 47 1.1 chs #include <net/if_ether.h> 48 1.1 chs #include <net/if_arp.h> 49 1.1 chs 50 1.1 chs #ifdef INET 51 1.1 chs #include <netinet/in.h> 52 1.1 chs #include <netinet/in_systm.h> 53 1.1 chs #include <netinet/in_var.h> 54 1.1 chs #include <netinet/ip.h> 55 1.1 chs #include <netinet/if_inarp.h> 56 1.1 chs #endif 57 1.1 chs 58 1.1 chs #if NVLAN > 0 59 1.1 chs #include <net/if_types.h> 60 1.1 chs #endif 61 1.1 chs 62 1.1 chs #include <net/bpf.h> 63 1.1 chs 64 1.1 chs #include <dev/mii/mii.h> 65 1.1 chs #include <dev/mii/miivar.h> 66 1.1 chs 67 1.1 chs #include <dev/pci/pcireg.h> 68 1.1 chs #include <dev/pci/pcivar.h> 69 1.1 chs #include <dev/pci/pcidevs.h> 70 1.1 chs 71 1.1 chs #include <dev/pci/if_nfereg.h> 72 1.1 chs #include <dev/pci/if_nfevar.h> 73 1.1 chs 74 1.37 dyoung static int nfe_ifflags_cb(struct ethercom *); 75 1.37 dyoung 76 1.30 cube int nfe_match(device_t, cfdata_t, void *); 77 1.30 cube void nfe_attach(device_t, device_t, void *); 78 1.53 jakllsch int nfe_detach(device_t, int); 79 1.1 chs void nfe_power(int, void *); 80 1.56 matt void nfe_miibus_statchg(struct ifnet *); 81 1.67 msaitoh int nfe_miibus_readreg(device_t, int, int, uint16_t *); 82 1.67 msaitoh int nfe_miibus_writereg(device_t, int, int, uint16_t); 83 1.1 chs int nfe_intr(void *); 84 1.15 christos int nfe_ioctl(struct ifnet *, u_long, void *); 85 1.1 chs void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 86 1.1 chs void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 87 1.1 chs void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 88 1.1 chs void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 89 1.1 chs void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 90 1.1 chs void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 91 1.1 chs void nfe_rxeof(struct nfe_softc *); 92 1.1 chs void nfe_txeof(struct nfe_softc *); 93 1.1 chs int nfe_encap(struct nfe_softc *, struct mbuf *); 94 1.1 chs void nfe_start(struct ifnet *); 95 1.1 chs void nfe_watchdog(struct ifnet *); 96 1.1 chs int nfe_init(struct ifnet *); 97 1.1 chs void nfe_stop(struct ifnet *, int); 98 1.19 cube struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); 99 1.15 christos void nfe_jfree(struct mbuf *, void *, size_t, void *); 100 1.1 chs int nfe_jpool_alloc(struct nfe_softc *); 101 1.1 chs void nfe_jpool_free(struct nfe_softc *); 102 1.1 chs int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 103 1.1 chs void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 104 1.1 chs void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 105 1.1 chs int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 106 1.1 chs void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 107 1.1 chs void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 108 1.1 chs void nfe_setmulti(struct nfe_softc *); 109 1.1 chs void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 110 1.1 chs void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 111 1.1 chs void nfe_tick(void *); 112 1.35 jmcneill void nfe_poweron(device_t); 113 1.50 dyoung bool nfe_resume(device_t, const pmf_qual_t *); 114 1.1 chs 115 1.53 jakllsch CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), 116 1.53 jakllsch nfe_match, nfe_attach, nfe_detach, NULL); 117 1.1 chs 118 1.34 cube /* #define NFE_NO_JUMBO */ 119 1.34 cube 120 1.1 chs #ifdef NFE_DEBUG 121 1.1 chs int nfedebug = 0; 122 1.1 chs #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 123 1.69 msaitoh #define DPRINTFN(n, x) do { if (nfedebug >= (n)) printf x; } while (0) 124 1.1 chs #else 125 1.1 chs #define DPRINTF(x) 126 1.69 msaitoh #define DPRINTFN(n, x) 127 1.1 chs #endif 128 1.1 chs 129 1.1 chs /* deal with naming differences */ 130 1.1 chs 131 1.1 chs #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ 132 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 133 1.1 chs #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ 134 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 135 1.1 chs #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ 136 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN 137 1.1 chs 138 1.1 chs #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ 139 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 140 1.1 chs #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ 141 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 142 1.1 chs 143 1.1 chs #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ 144 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 145 1.1 chs #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ 146 1.1 chs PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 147 1.1 chs 148 1.1 chs const struct nfe_product { 149 1.1 chs pci_vendor_id_t vendor; 150 1.1 chs pci_product_id_t product; 151 1.1 chs } nfe_devices[] = { 152 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 153 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 154 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 155 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 156 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 157 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 158 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 159 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 160 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 161 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 162 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 163 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 164 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 165 1.1 chs { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 166 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 167 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 168 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 169 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 170 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 171 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 172 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 173 1.4 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 174 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 175 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 176 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 177 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 178 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 179 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 180 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 181 1.22 xtraeme { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 182 1.29 isaki { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 183 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 184 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 185 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 186 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 187 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 188 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 189 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 190 1.31 christos { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 191 1.1 chs }; 192 1.1 chs 193 1.1 chs int 194 1.30 cube nfe_match(device_t dev, cfdata_t match, void *aux) 195 1.1 chs { 196 1.1 chs struct pci_attach_args *pa = aux; 197 1.1 chs const struct nfe_product *np; 198 1.1 chs int i; 199 1.1 chs 200 1.45 cegger for (i = 0; i < __arraycount(nfe_devices); i++) { 201 1.1 chs np = &nfe_devices[i]; 202 1.1 chs if (PCI_VENDOR(pa->pa_id) == np->vendor && 203 1.1 chs PCI_PRODUCT(pa->pa_id) == np->product) 204 1.1 chs return 1; 205 1.1 chs } 206 1.1 chs return 0; 207 1.1 chs } 208 1.1 chs 209 1.1 chs void 210 1.30 cube nfe_attach(device_t parent, device_t self, void *aux) 211 1.1 chs { 212 1.30 cube struct nfe_softc *sc = device_private(self); 213 1.1 chs struct pci_attach_args *pa = aux; 214 1.1 chs pci_chipset_tag_t pc = pa->pa_pc; 215 1.1 chs pci_intr_handle_t ih; 216 1.1 chs const char *intrstr; 217 1.1 chs struct ifnet *ifp; 218 1.69 msaitoh struct mii_data * const mii = &sc->sc_mii; 219 1.52 jakllsch pcireg_t memtype, csr; 220 1.40 cegger int mii_flags = 0; 221 1.59 christos char intrbuf[PCI_INTRSTR_LEN]; 222 1.10 tsutsui 223 1.30 cube sc->sc_dev = self; 224 1.53 jakllsch sc->sc_pc = pa->pa_pc; 225 1.55 drochner pci_aprint_devinfo(pa, NULL); 226 1.1 chs 227 1.1 chs memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 228 1.1 chs switch (memtype) { 229 1.1 chs case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 230 1.1 chs case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 231 1.1 chs if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 232 1.53 jakllsch &sc->sc_memh, NULL, &sc->sc_mems) == 0) 233 1.1 chs break; 234 1.1 chs /* FALLTHROUGH */ 235 1.1 chs default: 236 1.30 cube aprint_error_dev(self, "could not map mem space\n"); 237 1.1 chs return; 238 1.1 chs } 239 1.1 chs 240 1.1 chs if (pci_intr_map(pa, &ih) != 0) { 241 1.30 cube aprint_error_dev(self, "could not map interrupt\n"); 242 1.42 cegger goto fail; 243 1.1 chs } 244 1.1 chs 245 1.59 christos intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 246 1.66 jdolecek sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, nfe_intr, sc, 247 1.66 jdolecek device_xname(self)); 248 1.1 chs if (sc->sc_ih == NULL) { 249 1.30 cube aprint_error_dev(self, "could not establish interrupt"); 250 1.1 chs if (intrstr != NULL) 251 1.47 njoly aprint_error(" at %s", intrstr); 252 1.47 njoly aprint_error("\n"); 253 1.42 cegger goto fail; 254 1.1 chs } 255 1.30 cube aprint_normal_dev(self, "interrupting at %s\n", intrstr); 256 1.1 chs 257 1.52 jakllsch csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 258 1.52 jakllsch csr |= PCI_COMMAND_MASTER_ENABLE; 259 1.52 jakllsch pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); 260 1.52 jakllsch 261 1.1 chs sc->sc_flags = 0; 262 1.1 chs 263 1.1 chs switch (PCI_PRODUCT(pa->pa_id)) { 264 1.1 chs case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 265 1.1 chs case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 266 1.1 chs case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 267 1.1 chs case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 268 1.1 chs sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 269 1.1 chs break; 270 1.1 chs case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 271 1.1 chs case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 272 1.31 christos sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 273 1.31 christos break; 274 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 275 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 276 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 277 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 278 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 279 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 280 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 281 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 282 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 283 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 284 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 285 1.23 xtraeme case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 286 1.31 christos sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 287 1.31 christos NFE_PWR_MGMT; 288 1.31 christos break; 289 1.31 christos case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 290 1.31 christos case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 291 1.31 christos case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 292 1.31 christos case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 293 1.41 cegger sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 294 1.41 cegger NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 295 1.41 cegger break; 296 1.31 christos case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 297 1.31 christos case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 298 1.31 christos case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 299 1.31 christos case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 300 1.41 cegger sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 301 1.31 christos NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 302 1.1 chs break; 303 1.1 chs case PCI_PRODUCT_NVIDIA_CK804_LAN1: 304 1.1 chs case PCI_PRODUCT_NVIDIA_CK804_LAN2: 305 1.1 chs case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 306 1.1 chs case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 307 1.1 chs sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 308 1.1 chs break; 309 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 310 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 311 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 312 1.4 xtraeme case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 313 1.31 christos sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 314 1.31 christos NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 315 1.40 cegger mii_flags = MIIF_DOPAUSE; 316 1.31 christos break; 317 1.31 christos case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 318 1.31 christos case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 319 1.1 chs sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 320 1.27 tsutsui NFE_HW_VLAN | NFE_PWR_MGMT; 321 1.1 chs break; 322 1.1 chs } 323 1.1 chs 324 1.75 thorpej if (pci_dma64_available(pa) && (sc->sc_flags & NFE_40BIT_ADDR) != 0) { 325 1.75 thorpej if (bus_dmatag_subregion(pa->pa_dmat64, 326 1.75 thorpej 0, 327 1.80 martin (bus_addr_t)__MASK(40), 328 1.75 thorpej &sc->sc_dmat, 329 1.75 thorpej BUS_DMA_WAITOK) != 0) { 330 1.75 thorpej aprint_error_dev(self, 331 1.75 thorpej "unable to create 40-bit DMA tag\n"); 332 1.75 thorpej sc->sc_dmat = pa->pa_dmat64; 333 1.75 thorpej } else 334 1.75 thorpej sc->sc_dmat_needs_free = true; 335 1.75 thorpej } else 336 1.57 chs sc->sc_dmat = pa->pa_dmat; 337 1.57 chs 338 1.35 jmcneill nfe_poweron(self); 339 1.27 tsutsui 340 1.34 cube #ifndef NFE_NO_JUMBO 341 1.1 chs /* enable jumbo frames for adapters that support it */ 342 1.1 chs if (sc->sc_flags & NFE_JUMBO_SUP) 343 1.1 chs sc->sc_flags |= NFE_USE_JUMBO; 344 1.1 chs #endif 345 1.1 chs 346 1.31 christos /* Check for reversed ethernet address */ 347 1.31 christos if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 348 1.31 christos sc->sc_flags |= NFE_CORRECT_MACADDR; 349 1.31 christos 350 1.31 christos nfe_get_macaddr(sc, sc->sc_enaddr); 351 1.31 christos aprint_normal_dev(self, "Ethernet address %s\n", 352 1.31 christos ether_sprintf(sc->sc_enaddr)); 353 1.31 christos 354 1.1 chs /* 355 1.1 chs * Allocate Tx and Rx rings. 356 1.1 chs */ 357 1.1 chs if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 358 1.30 cube aprint_error_dev(self, "could not allocate Tx ring\n"); 359 1.42 cegger goto fail; 360 1.1 chs } 361 1.1 chs 362 1.36 cube mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET); 363 1.34 cube 364 1.1 chs if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 365 1.30 cube aprint_error_dev(self, "could not allocate Rx ring\n"); 366 1.1 chs nfe_free_tx_ring(sc, &sc->txq); 367 1.42 cegger goto fail; 368 1.1 chs } 369 1.1 chs 370 1.1 chs ifp = &sc->sc_ethercom.ec_if; 371 1.1 chs ifp->if_softc = sc; 372 1.1 chs ifp->if_mtu = ETHERMTU; 373 1.1 chs ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 374 1.1 chs ifp->if_ioctl = nfe_ioctl; 375 1.1 chs ifp->if_start = nfe_start; 376 1.24 jmcneill ifp->if_stop = nfe_stop; 377 1.1 chs ifp->if_watchdog = nfe_watchdog; 378 1.1 chs ifp->if_init = nfe_init; 379 1.1 chs ifp->if_baudrate = IF_Gbps(1); 380 1.1 chs IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 381 1.1 chs IFQ_SET_READY(&ifp->if_snd); 382 1.30 cube strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 383 1.1 chs 384 1.31 christos if (sc->sc_flags & NFE_USE_JUMBO) 385 1.37 dyoung sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 386 1.31 christos 387 1.1 chs #if NVLAN > 0 388 1.71 msaitoh if (sc->sc_flags & NFE_HW_VLAN) { 389 1.1 chs sc->sc_ethercom.ec_capabilities |= 390 1.1 chs ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 391 1.71 msaitoh sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 392 1.71 msaitoh } 393 1.1 chs #endif 394 1.1 chs if (sc->sc_flags & NFE_HW_CSUM) { 395 1.13 tsutsui ifp->if_capabilities |= 396 1.13 tsutsui IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 397 1.13 tsutsui IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 398 1.13 tsutsui IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 399 1.1 chs } 400 1.1 chs 401 1.69 msaitoh mii->mii_ifp = ifp; 402 1.69 msaitoh mii->mii_readreg = nfe_miibus_readreg; 403 1.69 msaitoh mii->mii_writereg = nfe_miibus_writereg; 404 1.69 msaitoh mii->mii_statchg = nfe_miibus_statchg; 405 1.40 cegger 406 1.69 msaitoh sc->sc_ethercom.ec_mii = mii; 407 1.69 msaitoh ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 408 1.40 cegger 409 1.69 msaitoh mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, mii_flags); 410 1.69 msaitoh 411 1.69 msaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) { 412 1.30 cube aprint_error_dev(self, "no PHY found!\n"); 413 1.69 msaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 414 1.69 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 415 1.1 chs } else 416 1.69 msaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 417 1.1 chs 418 1.1 chs if_attach(ifp); 419 1.62 ozaki if_deferred_start_init(ifp, NULL); 420 1.1 chs ether_ifattach(ifp, sc->sc_enaddr); 421 1.37 dyoung ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb); 422 1.1 chs 423 1.16 ad callout_init(&sc->sc_tick_ch, 0); 424 1.1 chs callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); 425 1.1 chs 426 1.46 tsutsui if (pmf_device_register(self, NULL, nfe_resume)) 427 1.46 tsutsui pmf_class_network_register(self, ifp); 428 1.46 tsutsui else 429 1.24 jmcneill aprint_error_dev(self, "couldn't establish power handler\n"); 430 1.42 cegger 431 1.42 cegger return; 432 1.42 cegger 433 1.42 cegger fail: 434 1.42 cegger if (sc->sc_ih != NULL) { 435 1.42 cegger pci_intr_disestablish(pc, sc->sc_ih); 436 1.42 cegger sc->sc_ih = NULL; 437 1.42 cegger } 438 1.53 jakllsch if (sc->sc_mems != 0) { 439 1.53 jakllsch bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 440 1.53 jakllsch sc->sc_mems = 0; 441 1.53 jakllsch } 442 1.53 jakllsch } 443 1.53 jakllsch 444 1.53 jakllsch int 445 1.53 jakllsch nfe_detach(device_t self, int flags) 446 1.53 jakllsch { 447 1.53 jakllsch struct nfe_softc *sc = device_private(self); 448 1.53 jakllsch struct ifnet *ifp = &sc->sc_ethercom.ec_if; 449 1.53 jakllsch int s; 450 1.53 jakllsch 451 1.53 jakllsch s = splnet(); 452 1.53 jakllsch 453 1.53 jakllsch nfe_stop(ifp, 1); 454 1.53 jakllsch 455 1.53 jakllsch pmf_device_deregister(self); 456 1.53 jakllsch callout_destroy(&sc->sc_tick_ch); 457 1.53 jakllsch ether_ifdetach(ifp); 458 1.53 jakllsch if_detach(ifp); 459 1.53 jakllsch mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 460 1.74 thorpej ifmedia_fini(&sc->sc_mii.mii_media); 461 1.53 jakllsch 462 1.53 jakllsch nfe_free_rx_ring(sc, &sc->rxq); 463 1.53 jakllsch mutex_destroy(&sc->rxq.mtx); 464 1.53 jakllsch nfe_free_tx_ring(sc, &sc->txq); 465 1.53 jakllsch 466 1.75 thorpej if (sc->sc_dmat_needs_free) 467 1.75 thorpej bus_dmatag_destroy(sc->sc_dmat); 468 1.75 thorpej 469 1.53 jakllsch if (sc->sc_ih != NULL) { 470 1.53 jakllsch pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 471 1.53 jakllsch sc->sc_ih = NULL; 472 1.53 jakllsch } 473 1.53 jakllsch 474 1.53 jakllsch if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 475 1.53 jakllsch nfe_set_macaddr(sc, sc->sc_enaddr); 476 1.53 jakllsch } else { 477 1.53 jakllsch NFE_WRITE(sc, NFE_MACADDR_LO, 478 1.53 jakllsch sc->sc_enaddr[0] << 8 | sc->sc_enaddr[1]); 479 1.53 jakllsch NFE_WRITE(sc, NFE_MACADDR_HI, 480 1.53 jakllsch sc->sc_enaddr[2] << 24 | sc->sc_enaddr[3] << 16 | 481 1.53 jakllsch sc->sc_enaddr[4] << 8 | sc->sc_enaddr[5]); 482 1.53 jakllsch } 483 1.53 jakllsch 484 1.53 jakllsch if (sc->sc_mems != 0) { 485 1.53 jakllsch bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 486 1.53 jakllsch sc->sc_mems = 0; 487 1.53 jakllsch } 488 1.53 jakllsch 489 1.53 jakllsch splx(s); 490 1.53 jakllsch 491 1.53 jakllsch return 0; 492 1.1 chs } 493 1.1 chs 494 1.1 chs void 495 1.56 matt nfe_miibus_statchg(struct ifnet *ifp) 496 1.1 chs { 497 1.56 matt struct nfe_softc *sc = ifp->if_softc; 498 1.1 chs struct mii_data *mii = &sc->sc_mii; 499 1.1 chs uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 500 1.1 chs 501 1.1 chs phy = NFE_READ(sc, NFE_PHY_IFACE); 502 1.1 chs phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 503 1.1 chs 504 1.1 chs seed = NFE_READ(sc, NFE_RNDSEED); 505 1.1 chs seed &= ~NFE_SEED_MASK; 506 1.1 chs 507 1.68 msaitoh if ((mii->mii_media_active & IFM_HDX) != 0) { 508 1.1 chs phy |= NFE_PHY_HDX; /* half-duplex */ 509 1.1 chs misc |= NFE_MISC1_HDX; 510 1.1 chs } 511 1.1 chs 512 1.1 chs switch (IFM_SUBTYPE(mii->mii_media_active)) { 513 1.1 chs case IFM_1000_T: /* full-duplex only */ 514 1.1 chs link |= NFE_MEDIA_1000T; 515 1.1 chs seed |= NFE_SEED_1000T; 516 1.1 chs phy |= NFE_PHY_1000T; 517 1.1 chs break; 518 1.1 chs case IFM_100_TX: 519 1.1 chs link |= NFE_MEDIA_100TX; 520 1.1 chs seed |= NFE_SEED_100TX; 521 1.1 chs phy |= NFE_PHY_100TX; 522 1.1 chs break; 523 1.1 chs case IFM_10_T: 524 1.1 chs link |= NFE_MEDIA_10T; 525 1.1 chs seed |= NFE_SEED_10T; 526 1.1 chs break; 527 1.1 chs } 528 1.1 chs 529 1.1 chs NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 530 1.1 chs 531 1.1 chs NFE_WRITE(sc, NFE_PHY_IFACE, phy); 532 1.1 chs NFE_WRITE(sc, NFE_MISC1, misc); 533 1.1 chs NFE_WRITE(sc, NFE_LINKSPEED, link); 534 1.1 chs } 535 1.1 chs 536 1.1 chs int 537 1.67 msaitoh nfe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 538 1.1 chs { 539 1.30 cube struct nfe_softc *sc = device_private(dev); 540 1.67 msaitoh uint32_t data; 541 1.1 chs int ntries; 542 1.1 chs 543 1.1 chs NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 544 1.1 chs 545 1.1 chs if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 546 1.1 chs NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 547 1.1 chs DELAY(100); 548 1.1 chs } 549 1.1 chs 550 1.1 chs NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 551 1.1 chs 552 1.1 chs for (ntries = 0; ntries < 1000; ntries++) { 553 1.1 chs DELAY(100); 554 1.1 chs if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 555 1.1 chs break; 556 1.1 chs } 557 1.1 chs if (ntries == 1000) { 558 1.78 msaitoh DPRINTFN(2, ("%s: timeout waiting for PHY read (%d, %d)\n", 559 1.78 msaitoh device_xname(sc->sc_dev), phy, reg)); 560 1.67 msaitoh return ETIMEDOUT; 561 1.1 chs } 562 1.1 chs 563 1.1 chs if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 564 1.78 msaitoh DPRINTFN(2, ("%s: could not read PHY (%d, %d)\n", 565 1.78 msaitoh device_xname(sc->sc_dev), phy, reg)); 566 1.67 msaitoh return -1; 567 1.1 chs } 568 1.1 chs 569 1.67 msaitoh data = NFE_READ(sc, NFE_PHY_DATA); 570 1.78 msaitoh sc->mii_phyaddr = phy; 571 1.1 chs 572 1.67 msaitoh DPRINTFN(2, ("%s: mii read phy %d reg 0x%x data 0x%x\n", 573 1.67 msaitoh device_xname(sc->sc_dev), phy, reg, data)); 574 1.1 chs 575 1.67 msaitoh *val = data & 0x0000ffff; 576 1.67 msaitoh return 0; 577 1.1 chs } 578 1.1 chs 579 1.67 msaitoh int 580 1.67 msaitoh nfe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 581 1.1 chs { 582 1.30 cube struct nfe_softc *sc = device_private(dev); 583 1.1 chs uint32_t ctl; 584 1.1 chs int ntries; 585 1.1 chs 586 1.1 chs NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 587 1.1 chs 588 1.1 chs if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 589 1.1 chs NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 590 1.1 chs DELAY(100); 591 1.1 chs } 592 1.1 chs 593 1.1 chs NFE_WRITE(sc, NFE_PHY_DATA, val); 594 1.1 chs ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 595 1.1 chs NFE_WRITE(sc, NFE_PHY_CTL, ctl); 596 1.1 chs 597 1.1 chs for (ntries = 0; ntries < 1000; ntries++) { 598 1.1 chs DELAY(100); 599 1.1 chs if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 600 1.1 chs break; 601 1.1 chs } 602 1.67 msaitoh if (ntries == 1000) { 603 1.1 chs #ifdef NFE_DEBUG 604 1.67 msaitoh if (nfedebug >= 2) 605 1.78 msaitoh printf("timeout waiting for PHY write (%d, %d)\n", 606 1.78 msaitoh phy, reg); 607 1.1 chs #endif 608 1.67 msaitoh return ETIMEDOUT; 609 1.67 msaitoh } 610 1.78 msaitoh if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 611 1.78 msaitoh DPRINTFN(2, ("%s: could not write PHY (%d, %d)\n", 612 1.78 msaitoh device_xname(sc->sc_dev), phy, reg)); 613 1.78 msaitoh return -1; 614 1.78 msaitoh } 615 1.67 msaitoh return 0; 616 1.1 chs } 617 1.1 chs 618 1.1 chs int 619 1.1 chs nfe_intr(void *arg) 620 1.1 chs { 621 1.1 chs struct nfe_softc *sc = arg; 622 1.1 chs struct ifnet *ifp = &sc->sc_ethercom.ec_if; 623 1.1 chs uint32_t r; 624 1.14 tsutsui int handled; 625 1.1 chs 626 1.14 tsutsui if ((ifp->if_flags & IFF_UP) == 0) 627 1.14 tsutsui return 0; 628 1.1 chs 629 1.14 tsutsui handled = 0; 630 1.1 chs 631 1.14 tsutsui for (;;) { 632 1.14 tsutsui r = NFE_READ(sc, NFE_IRQ_STATUS); 633 1.14 tsutsui if ((r & NFE_IRQ_WANTED) == 0) 634 1.14 tsutsui break; 635 1.1 chs 636 1.14 tsutsui NFE_WRITE(sc, NFE_IRQ_STATUS, r); 637 1.14 tsutsui handled = 1; 638 1.14 tsutsui DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 639 1.14 tsutsui 640 1.69 msaitoh if ((r & (NFE_IRQ_RXERR |NFE_IRQ_RX_NOBUF |NFE_IRQ_RX)) != 0) { 641 1.14 tsutsui /* check Rx ring */ 642 1.14 tsutsui nfe_rxeof(sc); 643 1.14 tsutsui } 644 1.31 christos if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { 645 1.14 tsutsui /* check Tx ring */ 646 1.14 tsutsui nfe_txeof(sc); 647 1.14 tsutsui } 648 1.14 tsutsui if ((r & NFE_IRQ_LINK) != 0) { 649 1.14 tsutsui NFE_READ(sc, NFE_PHY_STATUS); 650 1.14 tsutsui NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 651 1.14 tsutsui DPRINTF(("%s: link state changed\n", 652 1.30 cube device_xname(sc->sc_dev))); 653 1.14 tsutsui } 654 1.1 chs } 655 1.1 chs 656 1.62 ozaki if (handled) 657 1.62 ozaki if_schedule_deferred_start(ifp); 658 1.12 jmcneill 659 1.14 tsutsui return handled; 660 1.1 chs } 661 1.1 chs 662 1.37 dyoung static int 663 1.37 dyoung nfe_ifflags_cb(struct ethercom *ec) 664 1.37 dyoung { 665 1.37 dyoung struct ifnet *ifp = &ec->ec_if; 666 1.37 dyoung struct nfe_softc *sc = ifp->if_softc; 667 1.72 msaitoh u_short change = ifp->if_flags ^ sc->sc_if_flags; 668 1.37 dyoung 669 1.37 dyoung /* 670 1.37 dyoung * If only the PROMISC flag changes, then 671 1.37 dyoung * don't do a full re-init of the chip, just update 672 1.37 dyoung * the Rx filter. 673 1.37 dyoung */ 674 1.69 msaitoh if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 675 1.37 dyoung return ENETRESET; 676 1.37 dyoung else if ((change & IFF_PROMISC) != 0) 677 1.37 dyoung nfe_setmulti(sc); 678 1.37 dyoung 679 1.37 dyoung return 0; 680 1.37 dyoung } 681 1.37 dyoung 682 1.1 chs int 683 1.15 christos nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 684 1.1 chs { 685 1.1 chs struct nfe_softc *sc = ifp->if_softc; 686 1.1 chs struct ifaddr *ifa = (struct ifaddr *)data; 687 1.1 chs int s, error = 0; 688 1.1 chs 689 1.1 chs s = splnet(); 690 1.1 chs 691 1.1 chs switch (cmd) { 692 1.37 dyoung case SIOCINITIFADDR: 693 1.1 chs ifp->if_flags |= IFF_UP; 694 1.1 chs nfe_init(ifp); 695 1.1 chs switch (ifa->ifa_addr->sa_family) { 696 1.1 chs #ifdef INET 697 1.1 chs case AF_INET: 698 1.1 chs arp_ifinit(ifp, ifa); 699 1.1 chs break; 700 1.1 chs #endif 701 1.1 chs default: 702 1.1 chs break; 703 1.1 chs } 704 1.1 chs break; 705 1.26 dyoung default: 706 1.28 dyoung if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 707 1.28 dyoung break; 708 1.31 christos 709 1.28 dyoung error = 0; 710 1.28 dyoung 711 1.28 dyoung if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 712 1.28 dyoung ; 713 1.28 dyoung else if (ifp->if_flags & IFF_RUNNING) 714 1.28 dyoung nfe_setmulti(sc); 715 1.1 chs break; 716 1.1 chs } 717 1.37 dyoung sc->sc_if_flags = ifp->if_flags; 718 1.1 chs 719 1.1 chs splx(s); 720 1.1 chs 721 1.1 chs return error; 722 1.1 chs } 723 1.1 chs 724 1.1 chs void 725 1.1 chs nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 726 1.1 chs { 727 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 728 1.15 christos (char *)desc32 - (char *)sc->txq.desc32, 729 1.1 chs sizeof (struct nfe_desc32), ops); 730 1.1 chs } 731 1.1 chs 732 1.1 chs void 733 1.1 chs nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 734 1.1 chs { 735 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 736 1.15 christos (char *)desc64 - (char *)sc->txq.desc64, 737 1.1 chs sizeof (struct nfe_desc64), ops); 738 1.1 chs } 739 1.1 chs 740 1.1 chs void 741 1.1 chs nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 742 1.1 chs { 743 1.1 chs if (end > start) { 744 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 745 1.15 christos (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 746 1.15 christos (char *)&sc->txq.desc32[end] - 747 1.15 christos (char *)&sc->txq.desc32[start], ops); 748 1.1 chs return; 749 1.1 chs } 750 1.1 chs /* sync from 'start' to end of ring */ 751 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 752 1.15 christos (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 753 1.15 christos (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - 754 1.15 christos (char *)&sc->txq.desc32[start], ops); 755 1.1 chs 756 1.1 chs /* sync from start of ring to 'end' */ 757 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 758 1.15 christos (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); 759 1.1 chs } 760 1.1 chs 761 1.1 chs void 762 1.1 chs nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 763 1.1 chs { 764 1.1 chs if (end > start) { 765 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 766 1.15 christos (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 767 1.15 christos (char *)&sc->txq.desc64[end] - 768 1.15 christos (char *)&sc->txq.desc64[start], ops); 769 1.1 chs return; 770 1.1 chs } 771 1.1 chs /* sync from 'start' to end of ring */ 772 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 773 1.15 christos (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 774 1.15 christos (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - 775 1.15 christos (char *)&sc->txq.desc64[start], ops); 776 1.1 chs 777 1.1 chs /* sync from start of ring to 'end' */ 778 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 779 1.15 christos (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); 780 1.1 chs } 781 1.1 chs 782 1.1 chs void 783 1.1 chs nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 784 1.1 chs { 785 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 786 1.15 christos (char *)desc32 - (char *)sc->rxq.desc32, 787 1.1 chs sizeof (struct nfe_desc32), ops); 788 1.1 chs } 789 1.1 chs 790 1.1 chs void 791 1.1 chs nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 792 1.1 chs { 793 1.1 chs bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 794 1.15 christos (char *)desc64 - (char *)sc->rxq.desc64, 795 1.1 chs sizeof (struct nfe_desc64), ops); 796 1.1 chs } 797 1.1 chs 798 1.1 chs void 799 1.1 chs nfe_rxeof(struct nfe_softc *sc) 800 1.1 chs { 801 1.1 chs struct ifnet *ifp = &sc->sc_ethercom.ec_if; 802 1.1 chs struct nfe_desc32 *desc32; 803 1.1 chs struct nfe_desc64 *desc64; 804 1.1 chs struct nfe_rx_data *data; 805 1.1 chs struct nfe_jbuf *jbuf; 806 1.1 chs struct mbuf *m, *mnew; 807 1.1 chs bus_addr_t physaddr; 808 1.1 chs uint16_t flags; 809 1.14 tsutsui int error, len, i; 810 1.1 chs 811 1.1 chs desc32 = NULL; 812 1.1 chs desc64 = NULL; 813 1.14 tsutsui for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { 814 1.14 tsutsui data = &sc->rxq.data[i]; 815 1.1 chs 816 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 817 1.14 tsutsui desc64 = &sc->rxq.desc64[i]; 818 1.14 tsutsui nfe_rxdesc64_sync(sc, desc64, 819 1.69 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 820 1.1 chs 821 1.1 chs flags = le16toh(desc64->flags); 822 1.1 chs len = le16toh(desc64->length) & 0x3fff; 823 1.1 chs } else { 824 1.14 tsutsui desc32 = &sc->rxq.desc32[i]; 825 1.14 tsutsui nfe_rxdesc32_sync(sc, desc32, 826 1.69 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 827 1.1 chs 828 1.1 chs flags = le16toh(desc32->flags); 829 1.1 chs len = le16toh(desc32->length) & 0x3fff; 830 1.1 chs } 831 1.1 chs 832 1.14 tsutsui if ((flags & NFE_RX_READY) != 0) 833 1.1 chs break; 834 1.1 chs 835 1.1 chs if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 836 1.14 tsutsui if ((flags & NFE_RX_VALID_V1) == 0) 837 1.1 chs goto skip; 838 1.1 chs 839 1.1 chs if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 840 1.1 chs flags &= ~NFE_RX_ERROR; 841 1.1 chs len--; /* fix buffer length */ 842 1.1 chs } 843 1.1 chs } else { 844 1.14 tsutsui if ((flags & NFE_RX_VALID_V2) == 0) 845 1.1 chs goto skip; 846 1.1 chs 847 1.1 chs if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 848 1.1 chs flags &= ~NFE_RX_ERROR; 849 1.1 chs len--; /* fix buffer length */ 850 1.1 chs } 851 1.1 chs } 852 1.1 chs 853 1.1 chs if (flags & NFE_RX_ERROR) { 854 1.73 thorpej if_statinc(ifp, if_ierrors); 855 1.1 chs goto skip; 856 1.1 chs } 857 1.1 chs 858 1.1 chs /* 859 1.1 chs * Try to allocate a new mbuf for this ring element and load 860 1.1 chs * it before processing the current mbuf. If the ring element 861 1.1 chs * cannot be loaded, drop the received packet and reuse the 862 1.1 chs * old mbuf. In the unlikely case that the old mbuf can't be 863 1.1 chs * reloaded either, explicitly panic. 864 1.1 chs */ 865 1.1 chs MGETHDR(mnew, M_DONTWAIT, MT_DATA); 866 1.1 chs if (mnew == NULL) { 867 1.73 thorpej if_statinc(ifp, if_ierrors); 868 1.1 chs goto skip; 869 1.1 chs } 870 1.1 chs 871 1.1 chs if (sc->sc_flags & NFE_USE_JUMBO) { 872 1.19 cube physaddr = 873 1.19 cube sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; 874 1.19 cube if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 875 1.19 cube if (len > MCLBYTES) { 876 1.19 cube m_freem(mnew); 877 1.73 thorpej if_statinc(ifp, if_ierrors); 878 1.19 cube goto skip1; 879 1.19 cube } 880 1.19 cube MCLGET(mnew, M_DONTWAIT); 881 1.19 cube if ((mnew->m_flags & M_EXT) == 0) { 882 1.19 cube m_freem(mnew); 883 1.73 thorpej if_statinc(ifp, if_ierrors); 884 1.19 cube goto skip1; 885 1.19 cube } 886 1.1 chs 887 1.31 christos (void)memcpy(mtod(mnew, void *), 888 1.19 cube mtod(data->m, const void *), len); 889 1.19 cube m = mnew; 890 1.19 cube goto mbufcopied; 891 1.19 cube } else { 892 1.19 cube MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 893 1.19 cube bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 894 1.19 cube mtod(data->m, char *) - (char *)sc->rxq.jpool, 895 1.19 cube NFE_JBYTES, BUS_DMASYNC_POSTREAD); 896 1.1 chs 897 1.19 cube physaddr = jbuf->physaddr; 898 1.19 cube } 899 1.1 chs } else { 900 1.1 chs MCLGET(mnew, M_DONTWAIT); 901 1.14 tsutsui if ((mnew->m_flags & M_EXT) == 0) { 902 1.1 chs m_freem(mnew); 903 1.73 thorpej if_statinc(ifp, if_ierrors); 904 1.1 chs goto skip; 905 1.1 chs } 906 1.1 chs 907 1.1 chs bus_dmamap_sync(sc->sc_dmat, data->map, 0, 908 1.1 chs data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 909 1.1 chs bus_dmamap_unload(sc->sc_dmat, data->map); 910 1.1 chs 911 1.19 cube error = bus_dmamap_load(sc->sc_dmat, data->map, 912 1.19 cube mtod(mnew, void *), MCLBYTES, NULL, 913 1.19 cube BUS_DMA_READ | BUS_DMA_NOWAIT); 914 1.1 chs if (error != 0) { 915 1.1 chs m_freem(mnew); 916 1.1 chs 917 1.1 chs /* try to reload the old mbuf */ 918 1.19 cube error = bus_dmamap_load(sc->sc_dmat, data->map, 919 1.19 cube mtod(data->m, void *), MCLBYTES, NULL, 920 1.1 chs BUS_DMA_READ | BUS_DMA_NOWAIT); 921 1.1 chs if (error != 0) { 922 1.1 chs /* very unlikely that it will fail.. */ 923 1.1 chs panic("%s: could not load old rx mbuf", 924 1.30 cube device_xname(sc->sc_dev)); 925 1.1 chs } 926 1.73 thorpej if_statinc(ifp, if_ierrors); 927 1.1 chs goto skip; 928 1.1 chs } 929 1.1 chs physaddr = data->map->dm_segs[0].ds_addr; 930 1.1 chs } 931 1.1 chs 932 1.1 chs /* 933 1.1 chs * New mbuf successfully loaded, update Rx ring and continue 934 1.1 chs * processing. 935 1.1 chs */ 936 1.1 chs m = data->m; 937 1.1 chs data->m = mnew; 938 1.1 chs 939 1.19 cube mbufcopied: 940 1.1 chs /* finalize mbuf */ 941 1.1 chs m->m_pkthdr.len = m->m_len = len; 942 1.61 ozaki m_set_rcvif(m, ifp); 943 1.1 chs 944 1.13 tsutsui if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 945 1.13 tsutsui /* 946 1.13 tsutsui * XXX 947 1.13 tsutsui * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? 948 1.13 tsutsui */ 949 1.13 tsutsui if (flags & NFE_RX_IP_CSUMOK) { 950 1.13 tsutsui m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 951 1.13 tsutsui DPRINTFN(3, ("%s: ip4csum-rx ok\n", 952 1.30 cube device_xname(sc->sc_dev))); 953 1.13 tsutsui } 954 1.13 tsutsui /* 955 1.13 tsutsui * XXX 956 1.13 tsutsui * no way to check M_CSUM_TCP_UDP_BAD or 957 1.13 tsutsui * other protocols? 958 1.13 tsutsui */ 959 1.13 tsutsui if (flags & NFE_RX_UDP_CSUMOK) { 960 1.13 tsutsui m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 961 1.13 tsutsui DPRINTFN(3, ("%s: udp4csum-rx ok\n", 962 1.30 cube device_xname(sc->sc_dev))); 963 1.13 tsutsui } else if (flags & NFE_RX_TCP_CSUMOK) { 964 1.13 tsutsui m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 965 1.13 tsutsui DPRINTFN(3, ("%s: tcp4csum-rx ok\n", 966 1.30 cube device_xname(sc->sc_dev))); 967 1.13 tsutsui } 968 1.13 tsutsui } 969 1.60 ozaki if_percpuq_enqueue(ifp->if_percpuq, m); 970 1.1 chs 971 1.19 cube skip1: 972 1.1 chs /* update mapping address in h/w descriptor */ 973 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 974 1.75 thorpej desc64->physaddr[0] = 975 1.75 thorpej htole32(((uint64_t)physaddr) >> 32); 976 1.1 chs desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 977 1.1 chs } else { 978 1.1 chs desc32->physaddr = htole32(physaddr); 979 1.1 chs } 980 1.1 chs 981 1.58 christos skip: 982 1.14 tsutsui if (sc->sc_flags & NFE_40BIT_ADDR) { 983 1.1 chs desc64->length = htole16(sc->rxq.bufsz); 984 1.1 chs desc64->flags = htole16(NFE_RX_READY); 985 1.1 chs 986 1.14 tsutsui nfe_rxdesc64_sync(sc, desc64, 987 1.69 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 988 1.1 chs } else { 989 1.1 chs desc32->length = htole16(sc->rxq.bufsz); 990 1.1 chs desc32->flags = htole16(NFE_RX_READY); 991 1.1 chs 992 1.14 tsutsui nfe_rxdesc32_sync(sc, desc32, 993 1.69 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 994 1.1 chs } 995 1.1 chs } 996 1.14 tsutsui /* update current RX pointer */ 997 1.14 tsutsui sc->rxq.cur = i; 998 1.1 chs } 999 1.1 chs 1000 1.1 chs void 1001 1.1 chs nfe_txeof(struct nfe_softc *sc) 1002 1.1 chs { 1003 1.1 chs struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1004 1.1 chs struct nfe_desc32 *desc32; 1005 1.1 chs struct nfe_desc64 *desc64; 1006 1.1 chs struct nfe_tx_data *data = NULL; 1007 1.14 tsutsui int i; 1008 1.1 chs uint16_t flags; 1009 1.31 christos char buf[128]; 1010 1.1 chs 1011 1.14 tsutsui for (i = sc->txq.next; 1012 1.14 tsutsui sc->txq.queued > 0; 1013 1.14 tsutsui i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { 1014 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1015 1.14 tsutsui desc64 = &sc->txq.desc64[i]; 1016 1.14 tsutsui nfe_txdesc64_sync(sc, desc64, 1017 1.69 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1018 1.1 chs 1019 1.1 chs flags = le16toh(desc64->flags); 1020 1.1 chs } else { 1021 1.14 tsutsui desc32 = &sc->txq.desc32[i]; 1022 1.14 tsutsui nfe_txdesc32_sync(sc, desc32, 1023 1.69 msaitoh BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1024 1.1 chs 1025 1.1 chs flags = le16toh(desc32->flags); 1026 1.1 chs } 1027 1.1 chs 1028 1.14 tsutsui if ((flags & NFE_TX_VALID) != 0) 1029 1.1 chs break; 1030 1.1 chs 1031 1.14 tsutsui data = &sc->txq.data[i]; 1032 1.1 chs 1033 1.1 chs if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1034 1.14 tsutsui if ((flags & NFE_TX_LASTFRAG_V1) == 0 && 1035 1.14 tsutsui data->m == NULL) 1036 1.14 tsutsui continue; 1037 1.1 chs 1038 1.1 chs if ((flags & NFE_TX_ERROR_V1) != 0) { 1039 1.38 christos snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags); 1040 1.33 christos aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", 1041 1.38 christos buf); 1042 1.73 thorpej if_statinc(ifp, if_oerrors); 1043 1.1 chs } else 1044 1.73 thorpej if_statinc(ifp, if_opackets); 1045 1.1 chs } else { 1046 1.14 tsutsui if ((flags & NFE_TX_LASTFRAG_V2) == 0 && 1047 1.14 tsutsui data->m == NULL) 1048 1.14 tsutsui continue; 1049 1.1 chs 1050 1.1 chs if ((flags & NFE_TX_ERROR_V2) != 0) { 1051 1.38 christos snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags); 1052 1.32 xtraeme aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", 1053 1.38 christos buf); 1054 1.73 thorpej if_statinc(ifp, if_oerrors); 1055 1.1 chs } else 1056 1.73 thorpej if_statinc(ifp, if_opackets); 1057 1.1 chs } 1058 1.1 chs 1059 1.1 chs if (data->m == NULL) { /* should not get there */ 1060 1.30 cube aprint_error_dev(sc->sc_dev, 1061 1.30 cube "last fragment bit w/o associated mbuf!\n"); 1062 1.14 tsutsui continue; 1063 1.1 chs } 1064 1.1 chs 1065 1.1 chs /* last fragment of the mbuf chain transmitted */ 1066 1.1 chs bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1067 1.1 chs data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1068 1.1 chs bus_dmamap_unload(sc->sc_dmat, data->active); 1069 1.1 chs m_freem(data->m); 1070 1.1 chs data->m = NULL; 1071 1.14 tsutsui } 1072 1.1 chs 1073 1.14 tsutsui sc->txq.next = i; 1074 1.1 chs 1075 1.14 tsutsui if (sc->txq.queued < NFE_TX_RING_COUNT) { 1076 1.14 tsutsui /* at least one slot freed */ 1077 1.14 tsutsui ifp->if_flags &= ~IFF_OACTIVE; 1078 1.1 chs } 1079 1.1 chs 1080 1.14 tsutsui if (sc->txq.queued == 0) { 1081 1.14 tsutsui /* all queued packets are sent */ 1082 1.14 tsutsui ifp->if_timer = 0; 1083 1.1 chs } 1084 1.1 chs } 1085 1.1 chs 1086 1.1 chs int 1087 1.1 chs nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 1088 1.1 chs { 1089 1.1 chs struct nfe_desc32 *desc32; 1090 1.1 chs struct nfe_desc64 *desc64; 1091 1.1 chs struct nfe_tx_data *data; 1092 1.1 chs bus_dmamap_t map; 1093 1.13 tsutsui uint16_t flags, csumflags; 1094 1.1 chs #if NVLAN > 0 1095 1.1 chs uint32_t vtag = 0; 1096 1.1 chs #endif 1097 1.11 tsutsui int error, i, first; 1098 1.1 chs 1099 1.1 chs desc32 = NULL; 1100 1.1 chs desc64 = NULL; 1101 1.1 chs data = NULL; 1102 1.11 tsutsui 1103 1.11 tsutsui flags = 0; 1104 1.13 tsutsui csumflags = 0; 1105 1.11 tsutsui first = sc->txq.cur; 1106 1.11 tsutsui 1107 1.11 tsutsui map = sc->txq.data[first].map; 1108 1.1 chs 1109 1.1 chs error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 1110 1.1 chs if (error != 0) { 1111 1.30 cube aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", 1112 1.30 cube error); 1113 1.1 chs return error; 1114 1.1 chs } 1115 1.1 chs 1116 1.1 chs if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 1117 1.1 chs bus_dmamap_unload(sc->sc_dmat, map); 1118 1.1 chs return ENOBUFS; 1119 1.1 chs } 1120 1.1 chs 1121 1.1 chs #if NVLAN > 0 1122 1.1 chs /* setup h/w VLAN tagging */ 1123 1.64 knakahar if (vlan_has_tag(m0)) 1124 1.64 knakahar vtag = NFE_TX_VTAG | vlan_get_tag(m0); 1125 1.1 chs #endif 1126 1.13 tsutsui if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 1127 1.13 tsutsui if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 1128 1.13 tsutsui csumflags |= NFE_TX_IP_CSUM; 1129 1.13 tsutsui if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1130 1.14 tsutsui csumflags |= NFE_TX_TCP_UDP_CSUM; 1131 1.13 tsutsui } 1132 1.1 chs 1133 1.1 chs for (i = 0; i < map->dm_nsegs; i++) { 1134 1.1 chs data = &sc->txq.data[sc->txq.cur]; 1135 1.1 chs 1136 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1137 1.1 chs desc64 = &sc->txq.desc64[sc->txq.cur]; 1138 1.1 chs desc64->physaddr[0] = 1139 1.75 thorpej htole32(((uint64_t)map->dm_segs[i].ds_addr) >> 32); 1140 1.1 chs desc64->physaddr[1] = 1141 1.1 chs htole32(map->dm_segs[i].ds_addr & 0xffffffff); 1142 1.1 chs desc64->length = htole16(map->dm_segs[i].ds_len - 1); 1143 1.1 chs desc64->flags = htole16(flags); 1144 1.13 tsutsui desc64->vtag = 0; 1145 1.1 chs } else { 1146 1.1 chs desc32 = &sc->txq.desc32[sc->txq.cur]; 1147 1.1 chs 1148 1.1 chs desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 1149 1.1 chs desc32->length = htole16(map->dm_segs[i].ds_len - 1); 1150 1.1 chs desc32->flags = htole16(flags); 1151 1.1 chs } 1152 1.1 chs 1153 1.13 tsutsui /* 1154 1.13 tsutsui * Setting of the valid bit in the first descriptor is 1155 1.13 tsutsui * deferred until the whole chain is fully setup. 1156 1.13 tsutsui */ 1157 1.13 tsutsui flags |= NFE_TX_VALID; 1158 1.1 chs 1159 1.1 chs sc->txq.queued++; 1160 1.14 tsutsui sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); 1161 1.1 chs } 1162 1.1 chs 1163 1.11 tsutsui /* the whole mbuf chain has been setup */ 1164 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1165 1.11 tsutsui /* fix last descriptor */ 1166 1.1 chs flags |= NFE_TX_LASTFRAG_V2; 1167 1.1 chs desc64->flags = htole16(flags); 1168 1.11 tsutsui 1169 1.13 tsutsui /* Checksum flags and vtag belong to the first fragment only. */ 1170 1.13 tsutsui #if NVLAN > 0 1171 1.13 tsutsui sc->txq.desc64[first].vtag = htole32(vtag); 1172 1.13 tsutsui #endif 1173 1.13 tsutsui sc->txq.desc64[first].flags |= htole16(csumflags); 1174 1.13 tsutsui 1175 1.11 tsutsui /* finally, set the valid bit in the first descriptor */ 1176 1.11 tsutsui sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1177 1.1 chs } else { 1178 1.11 tsutsui /* fix last descriptor */ 1179 1.1 chs if (sc->sc_flags & NFE_JUMBO_SUP) 1180 1.1 chs flags |= NFE_TX_LASTFRAG_V2; 1181 1.1 chs else 1182 1.1 chs flags |= NFE_TX_LASTFRAG_V1; 1183 1.1 chs desc32->flags = htole16(flags); 1184 1.11 tsutsui 1185 1.13 tsutsui /* Checksum flags belong to the first fragment only. */ 1186 1.13 tsutsui sc->txq.desc32[first].flags |= htole16(csumflags); 1187 1.13 tsutsui 1188 1.11 tsutsui /* finally, set the valid bit in the first descriptor */ 1189 1.11 tsutsui sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1190 1.1 chs } 1191 1.1 chs 1192 1.1 chs data->m = m0; 1193 1.1 chs data->active = map; 1194 1.1 chs 1195 1.1 chs bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1196 1.1 chs BUS_DMASYNC_PREWRITE); 1197 1.1 chs 1198 1.1 chs return 0; 1199 1.1 chs } 1200 1.1 chs 1201 1.1 chs void 1202 1.1 chs nfe_start(struct ifnet *ifp) 1203 1.1 chs { 1204 1.1 chs struct nfe_softc *sc = ifp->if_softc; 1205 1.14 tsutsui int old = sc->txq.queued; 1206 1.1 chs struct mbuf *m0; 1207 1.1 chs 1208 1.31 christos if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1209 1.18 cube return; 1210 1.18 cube 1211 1.1 chs for (;;) { 1212 1.1 chs IFQ_POLL(&ifp->if_snd, m0); 1213 1.1 chs if (m0 == NULL) 1214 1.1 chs break; 1215 1.1 chs 1216 1.1 chs if (nfe_encap(sc, m0) != 0) { 1217 1.1 chs ifp->if_flags |= IFF_OACTIVE; 1218 1.1 chs break; 1219 1.1 chs } 1220 1.1 chs 1221 1.1 chs /* packet put in h/w queue, remove from s/w queue */ 1222 1.1 chs IFQ_DEQUEUE(&ifp->if_snd, m0); 1223 1.1 chs 1224 1.65 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT); 1225 1.1 chs } 1226 1.1 chs 1227 1.14 tsutsui if (sc->txq.queued != old) { 1228 1.14 tsutsui /* packets are queued */ 1229 1.14 tsutsui if (sc->sc_flags & NFE_40BIT_ADDR) 1230 1.14 tsutsui nfe_txdesc64_rsync(sc, old, sc->txq.cur, 1231 1.69 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1232 1.14 tsutsui else 1233 1.14 tsutsui nfe_txdesc32_rsync(sc, old, sc->txq.cur, 1234 1.69 msaitoh BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1235 1.14 tsutsui /* kick Tx */ 1236 1.14 tsutsui NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1237 1.1 chs 1238 1.14 tsutsui /* 1239 1.14 tsutsui * Set a timeout in case the chip goes out to lunch. 1240 1.14 tsutsui */ 1241 1.14 tsutsui ifp->if_timer = 5; 1242 1.14 tsutsui } 1243 1.1 chs } 1244 1.1 chs 1245 1.1 chs void 1246 1.1 chs nfe_watchdog(struct ifnet *ifp) 1247 1.1 chs { 1248 1.1 chs struct nfe_softc *sc = ifp->if_softc; 1249 1.1 chs 1250 1.30 cube aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); 1251 1.1 chs 1252 1.1 chs ifp->if_flags &= ~IFF_RUNNING; 1253 1.1 chs nfe_init(ifp); 1254 1.1 chs 1255 1.73 thorpej if_statinc(ifp, if_oerrors); 1256 1.1 chs } 1257 1.1 chs 1258 1.1 chs int 1259 1.1 chs nfe_init(struct ifnet *ifp) 1260 1.1 chs { 1261 1.1 chs struct nfe_softc *sc = ifp->if_softc; 1262 1.1 chs uint32_t tmp; 1263 1.26 dyoung int rc = 0, s; 1264 1.1 chs 1265 1.1 chs if (ifp->if_flags & IFF_RUNNING) 1266 1.1 chs return 0; 1267 1.1 chs 1268 1.1 chs nfe_stop(ifp, 0); 1269 1.1 chs 1270 1.1 chs NFE_WRITE(sc, NFE_TX_UNK, 0); 1271 1.1 chs NFE_WRITE(sc, NFE_STATUS, 0); 1272 1.1 chs 1273 1.1 chs sc->rxtxctl = NFE_RXTX_BIT2; 1274 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) 1275 1.1 chs sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1276 1.1 chs else if (sc->sc_flags & NFE_JUMBO_SUP) 1277 1.1 chs sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1278 1.1 chs if (sc->sc_flags & NFE_HW_CSUM) 1279 1.1 chs sc->rxtxctl |= NFE_RXTX_RXCSUM; 1280 1.1 chs #if NVLAN > 0 1281 1.1 chs /* 1282 1.1 chs * Although the adapter is capable of stripping VLAN tags from received 1283 1.1 chs * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1284 1.1 chs * purpose. This will be done in software by our network stack. 1285 1.1 chs */ 1286 1.1 chs if (sc->sc_flags & NFE_HW_VLAN) 1287 1.1 chs sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1288 1.1 chs #endif 1289 1.1 chs NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1290 1.1 chs DELAY(10); 1291 1.1 chs NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1292 1.1 chs 1293 1.1 chs #if NVLAN 1294 1.1 chs if (sc->sc_flags & NFE_HW_VLAN) 1295 1.1 chs NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1296 1.1 chs #endif 1297 1.1 chs 1298 1.1 chs NFE_WRITE(sc, NFE_SETUP_R6, 0); 1299 1.1 chs 1300 1.1 chs /* set MAC address */ 1301 1.1 chs nfe_set_macaddr(sc, sc->sc_enaddr); 1302 1.1 chs 1303 1.1 chs /* tell MAC where rings are in memory */ 1304 1.75 thorpej NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, ((uint64_t)sc->rxq.physaddr) >> 32); 1305 1.1 chs NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1306 1.75 thorpej NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, ((uint64_t)sc->txq.physaddr) >> 32); 1307 1.1 chs NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1308 1.1 chs 1309 1.1 chs NFE_WRITE(sc, NFE_RING_SIZE, 1310 1.1 chs (NFE_RX_RING_COUNT - 1) << 16 | 1311 1.1 chs (NFE_TX_RING_COUNT - 1)); 1312 1.1 chs 1313 1.1 chs NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1314 1.1 chs 1315 1.1 chs /* force MAC to wakeup */ 1316 1.1 chs tmp = NFE_READ(sc, NFE_PWR_STATE); 1317 1.1 chs NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1318 1.1 chs DELAY(10); 1319 1.1 chs tmp = NFE_READ(sc, NFE_PWR_STATE); 1320 1.1 chs NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1321 1.1 chs 1322 1.12 jmcneill s = splnet(); 1323 1.39 cegger NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1324 1.12 jmcneill nfe_intr(sc); /* XXX clear IRQ status registers */ 1325 1.39 cegger NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1326 1.12 jmcneill splx(s); 1327 1.12 jmcneill 1328 1.1 chs #if 1 1329 1.1 chs /* configure interrupts coalescing/mitigation */ 1330 1.1 chs NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1331 1.1 chs #else 1332 1.1 chs /* no interrupt mitigation: one interrupt per packet */ 1333 1.1 chs NFE_WRITE(sc, NFE_IMTIMER, 970); 1334 1.1 chs #endif 1335 1.1 chs 1336 1.1 chs NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1337 1.1 chs NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1338 1.1 chs NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1339 1.1 chs 1340 1.1 chs /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1341 1.1 chs NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1342 1.1 chs 1343 1.1 chs NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1344 1.31 christos NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1345 1.1 chs 1346 1.1 chs sc->rxtxctl &= ~NFE_RXTX_BIT2; 1347 1.1 chs NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1348 1.1 chs DELAY(10); 1349 1.1 chs NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1350 1.1 chs 1351 1.1 chs /* set Rx filter */ 1352 1.1 chs nfe_setmulti(sc); 1353 1.1 chs 1354 1.26 dyoung if ((rc = ether_mediachange(ifp)) != 0) 1355 1.26 dyoung goto out; 1356 1.1 chs 1357 1.12 jmcneill nfe_tick(sc); 1358 1.12 jmcneill 1359 1.1 chs /* enable Rx */ 1360 1.1 chs NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1361 1.1 chs 1362 1.1 chs /* enable Tx */ 1363 1.1 chs NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1364 1.1 chs 1365 1.1 chs NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1366 1.1 chs 1367 1.1 chs /* enable interrupts */ 1368 1.1 chs NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1369 1.1 chs 1370 1.1 chs callout_schedule(&sc->sc_tick_ch, hz); 1371 1.1 chs 1372 1.1 chs ifp->if_flags |= IFF_RUNNING; 1373 1.1 chs ifp->if_flags &= ~IFF_OACTIVE; 1374 1.1 chs 1375 1.26 dyoung out: 1376 1.26 dyoung return rc; 1377 1.1 chs } 1378 1.1 chs 1379 1.1 chs void 1380 1.7 christos nfe_stop(struct ifnet *ifp, int disable) 1381 1.1 chs { 1382 1.1 chs struct nfe_softc *sc = ifp->if_softc; 1383 1.1 chs 1384 1.1 chs callout_stop(&sc->sc_tick_ch); 1385 1.1 chs 1386 1.1 chs ifp->if_timer = 0; 1387 1.1 chs ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1388 1.1 chs 1389 1.1 chs mii_down(&sc->sc_mii); 1390 1.1 chs 1391 1.1 chs /* abort Tx */ 1392 1.1 chs NFE_WRITE(sc, NFE_TX_CTL, 0); 1393 1.1 chs 1394 1.1 chs /* disable Rx */ 1395 1.1 chs NFE_WRITE(sc, NFE_RX_CTL, 0); 1396 1.1 chs 1397 1.1 chs /* disable interrupts */ 1398 1.1 chs NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1399 1.1 chs 1400 1.1 chs /* reset Tx and Rx rings */ 1401 1.1 chs nfe_reset_tx_ring(sc, &sc->txq); 1402 1.1 chs nfe_reset_rx_ring(sc, &sc->rxq); 1403 1.1 chs } 1404 1.1 chs 1405 1.1 chs int 1406 1.1 chs nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1407 1.1 chs { 1408 1.1 chs struct nfe_desc32 *desc32; 1409 1.1 chs struct nfe_desc64 *desc64; 1410 1.1 chs struct nfe_rx_data *data; 1411 1.1 chs struct nfe_jbuf *jbuf; 1412 1.1 chs void **desc; 1413 1.1 chs bus_addr_t physaddr; 1414 1.1 chs int i, nsegs, error, descsize; 1415 1.1 chs 1416 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1417 1.1 chs desc = (void **)&ring->desc64; 1418 1.1 chs descsize = sizeof (struct nfe_desc64); 1419 1.1 chs } else { 1420 1.1 chs desc = (void **)&ring->desc32; 1421 1.1 chs descsize = sizeof (struct nfe_desc32); 1422 1.1 chs } 1423 1.1 chs 1424 1.1 chs ring->cur = ring->next = 0; 1425 1.1 chs ring->bufsz = MCLBYTES; 1426 1.1 chs 1427 1.1 chs error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1428 1.1 chs NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1429 1.1 chs if (error != 0) { 1430 1.30 cube aprint_error_dev(sc->sc_dev, 1431 1.30 cube "could not create desc DMA map\n"); 1432 1.42 cegger ring->map = NULL; 1433 1.1 chs goto fail; 1434 1.1 chs } 1435 1.1 chs 1436 1.1 chs error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1437 1.1 chs PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1438 1.1 chs if (error != 0) { 1439 1.30 cube aprint_error_dev(sc->sc_dev, 1440 1.30 cube "could not allocate DMA memory\n"); 1441 1.1 chs goto fail; 1442 1.1 chs } 1443 1.1 chs 1444 1.1 chs error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1445 1.15 christos NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1446 1.1 chs if (error != 0) { 1447 1.30 cube aprint_error_dev(sc->sc_dev, 1448 1.30 cube "could not map desc DMA memory\n"); 1449 1.1 chs goto fail; 1450 1.1 chs } 1451 1.1 chs 1452 1.1 chs error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1453 1.1 chs NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1454 1.1 chs if (error != 0) { 1455 1.30 cube aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1456 1.1 chs goto fail; 1457 1.1 chs } 1458 1.1 chs 1459 1.43 cegger memset(*desc, 0, NFE_RX_RING_COUNT * descsize); 1460 1.1 chs ring->physaddr = ring->map->dm_segs[0].ds_addr; 1461 1.1 chs 1462 1.1 chs if (sc->sc_flags & NFE_USE_JUMBO) { 1463 1.1 chs ring->bufsz = NFE_JBYTES; 1464 1.1 chs if ((error = nfe_jpool_alloc(sc)) != 0) { 1465 1.30 cube aprint_error_dev(sc->sc_dev, 1466 1.30 cube "could not allocate jumbo frames\n"); 1467 1.1 chs goto fail; 1468 1.1 chs } 1469 1.1 chs } 1470 1.1 chs 1471 1.1 chs /* 1472 1.1 chs * Pre-allocate Rx buffers and populate Rx ring. 1473 1.1 chs */ 1474 1.1 chs for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1475 1.1 chs data = &sc->rxq.data[i]; 1476 1.1 chs 1477 1.1 chs MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1478 1.1 chs if (data->m == NULL) { 1479 1.30 cube aprint_error_dev(sc->sc_dev, 1480 1.30 cube "could not allocate rx mbuf\n"); 1481 1.1 chs error = ENOMEM; 1482 1.1 chs goto fail; 1483 1.1 chs } 1484 1.1 chs 1485 1.1 chs if (sc->sc_flags & NFE_USE_JUMBO) { 1486 1.19 cube if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 1487 1.30 cube aprint_error_dev(sc->sc_dev, 1488 1.30 cube "could not allocate jumbo buffer\n"); 1489 1.1 chs goto fail; 1490 1.1 chs } 1491 1.1 chs MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1492 1.1 chs sc); 1493 1.1 chs 1494 1.1 chs physaddr = jbuf->physaddr; 1495 1.1 chs } else { 1496 1.1 chs error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1497 1.1 chs MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1498 1.1 chs if (error != 0) { 1499 1.30 cube aprint_error_dev(sc->sc_dev, 1500 1.30 cube "could not create DMA map\n"); 1501 1.42 cegger data->map = NULL; 1502 1.1 chs goto fail; 1503 1.1 chs } 1504 1.1 chs MCLGET(data->m, M_DONTWAIT); 1505 1.1 chs if (!(data->m->m_flags & M_EXT)) { 1506 1.30 cube aprint_error_dev(sc->sc_dev, 1507 1.30 cube "could not allocate mbuf cluster\n"); 1508 1.1 chs error = ENOMEM; 1509 1.1 chs goto fail; 1510 1.1 chs } 1511 1.1 chs 1512 1.1 chs error = bus_dmamap_load(sc->sc_dmat, data->map, 1513 1.1 chs mtod(data->m, void *), MCLBYTES, NULL, 1514 1.1 chs BUS_DMA_READ | BUS_DMA_NOWAIT); 1515 1.1 chs if (error != 0) { 1516 1.30 cube aprint_error_dev(sc->sc_dev, 1517 1.30 cube "could not load rx buf DMA map"); 1518 1.1 chs goto fail; 1519 1.1 chs } 1520 1.1 chs physaddr = data->map->dm_segs[0].ds_addr; 1521 1.1 chs } 1522 1.1 chs 1523 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1524 1.1 chs desc64 = &sc->rxq.desc64[i]; 1525 1.75 thorpej desc64->physaddr[0] = 1526 1.75 thorpej htole32(((uint64_t)physaddr) >> 32); 1527 1.1 chs desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1528 1.1 chs desc64->length = htole16(sc->rxq.bufsz); 1529 1.1 chs desc64->flags = htole16(NFE_RX_READY); 1530 1.1 chs } else { 1531 1.1 chs desc32 = &sc->rxq.desc32[i]; 1532 1.1 chs desc32->physaddr = htole32(physaddr); 1533 1.1 chs desc32->length = htole16(sc->rxq.bufsz); 1534 1.1 chs desc32->flags = htole16(NFE_RX_READY); 1535 1.1 chs } 1536 1.1 chs } 1537 1.1 chs 1538 1.1 chs bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1539 1.1 chs BUS_DMASYNC_PREWRITE); 1540 1.1 chs 1541 1.1 chs return 0; 1542 1.1 chs 1543 1.1 chs fail: nfe_free_rx_ring(sc, ring); 1544 1.1 chs return error; 1545 1.1 chs } 1546 1.1 chs 1547 1.1 chs void 1548 1.1 chs nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1549 1.1 chs { 1550 1.1 chs int i; 1551 1.1 chs 1552 1.1 chs for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1553 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1554 1.1 chs ring->desc64[i].length = htole16(ring->bufsz); 1555 1.1 chs ring->desc64[i].flags = htole16(NFE_RX_READY); 1556 1.1 chs } else { 1557 1.1 chs ring->desc32[i].length = htole16(ring->bufsz); 1558 1.1 chs ring->desc32[i].flags = htole16(NFE_RX_READY); 1559 1.1 chs } 1560 1.1 chs } 1561 1.1 chs 1562 1.1 chs bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1563 1.1 chs BUS_DMASYNC_PREWRITE); 1564 1.1 chs 1565 1.1 chs ring->cur = ring->next = 0; 1566 1.1 chs } 1567 1.1 chs 1568 1.1 chs void 1569 1.1 chs nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1570 1.1 chs { 1571 1.1 chs struct nfe_rx_data *data; 1572 1.1 chs void *desc; 1573 1.1 chs int i, descsize; 1574 1.1 chs 1575 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1576 1.1 chs desc = ring->desc64; 1577 1.1 chs descsize = sizeof (struct nfe_desc64); 1578 1.1 chs } else { 1579 1.1 chs desc = ring->desc32; 1580 1.1 chs descsize = sizeof (struct nfe_desc32); 1581 1.1 chs } 1582 1.1 chs 1583 1.1 chs if (desc != NULL) { 1584 1.1 chs bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1585 1.1 chs ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1586 1.1 chs bus_dmamap_unload(sc->sc_dmat, ring->map); 1587 1.15 christos bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1588 1.1 chs NFE_RX_RING_COUNT * descsize); 1589 1.1 chs bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1590 1.1 chs } 1591 1.1 chs 1592 1.1 chs for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1593 1.1 chs data = &ring->data[i]; 1594 1.1 chs 1595 1.1 chs if (data->map != NULL) { 1596 1.1 chs bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1597 1.1 chs data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1598 1.1 chs bus_dmamap_unload(sc->sc_dmat, data->map); 1599 1.1 chs bus_dmamap_destroy(sc->sc_dmat, data->map); 1600 1.1 chs } 1601 1.81 rin m_freem(data->m); 1602 1.1 chs } 1603 1.53 jakllsch 1604 1.53 jakllsch nfe_jpool_free(sc); 1605 1.1 chs } 1606 1.1 chs 1607 1.1 chs struct nfe_jbuf * 1608 1.19 cube nfe_jalloc(struct nfe_softc *sc, int i) 1609 1.1 chs { 1610 1.1 chs struct nfe_jbuf *jbuf; 1611 1.1 chs 1612 1.34 cube mutex_enter(&sc->rxq.mtx); 1613 1.1 chs jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1614 1.34 cube if (jbuf != NULL) 1615 1.34 cube SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1616 1.34 cube mutex_exit(&sc->rxq.mtx); 1617 1.1 chs if (jbuf == NULL) 1618 1.1 chs return NULL; 1619 1.19 cube sc->rxq.jbufmap[i] = 1620 1.19 cube ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1621 1.1 chs return jbuf; 1622 1.1 chs } 1623 1.1 chs 1624 1.1 chs /* 1625 1.1 chs * This is called automatically by the network stack when the mbuf is freed. 1626 1.1 chs * Caution must be taken that the NIC might be reset by the time the mbuf is 1627 1.1 chs * freed. 1628 1.1 chs */ 1629 1.1 chs void 1630 1.15 christos nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1631 1.1 chs { 1632 1.1 chs struct nfe_softc *sc = arg; 1633 1.1 chs struct nfe_jbuf *jbuf; 1634 1.1 chs int i; 1635 1.1 chs 1636 1.1 chs /* find the jbuf from the base pointer */ 1637 1.15 christos i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1638 1.1 chs if (i < 0 || i >= NFE_JPOOL_COUNT) { 1639 1.30 cube aprint_error_dev(sc->sc_dev, 1640 1.30 cube "request to free a buffer (%p) not managed by us\n", buf); 1641 1.1 chs return; 1642 1.1 chs } 1643 1.1 chs jbuf = &sc->rxq.jbuf[i]; 1644 1.1 chs 1645 1.1 chs /* ..and put it back in the free list */ 1646 1.34 cube mutex_enter(&sc->rxq.mtx); 1647 1.1 chs SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1648 1.34 cube mutex_exit(&sc->rxq.mtx); 1649 1.2 chs 1650 1.31 christos if (m != NULL) 1651 1.31 christos pool_cache_put(mb_cache, m); 1652 1.1 chs } 1653 1.1 chs 1654 1.1 chs int 1655 1.1 chs nfe_jpool_alloc(struct nfe_softc *sc) 1656 1.1 chs { 1657 1.1 chs struct nfe_rx_ring *ring = &sc->rxq; 1658 1.1 chs struct nfe_jbuf *jbuf; 1659 1.1 chs bus_addr_t physaddr; 1660 1.15 christos char *buf; 1661 1.1 chs int i, nsegs, error; 1662 1.1 chs 1663 1.1 chs /* 1664 1.1 chs * Allocate a big chunk of DMA'able memory. 1665 1.1 chs */ 1666 1.1 chs error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1667 1.1 chs NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1668 1.1 chs if (error != 0) { 1669 1.30 cube aprint_error_dev(sc->sc_dev, 1670 1.30 cube "could not create jumbo DMA map\n"); 1671 1.42 cegger ring->jmap = NULL; 1672 1.1 chs goto fail; 1673 1.1 chs } 1674 1.1 chs 1675 1.1 chs error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1676 1.1 chs &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1677 1.1 chs if (error != 0) { 1678 1.30 cube aprint_error_dev(sc->sc_dev, 1679 1.30 cube "could not allocate jumbo DMA memory\n"); 1680 1.1 chs goto fail; 1681 1.1 chs } 1682 1.1 chs 1683 1.1 chs error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1684 1.1 chs &ring->jpool, BUS_DMA_NOWAIT); 1685 1.1 chs if (error != 0) { 1686 1.30 cube aprint_error_dev(sc->sc_dev, 1687 1.30 cube "could not map jumbo DMA memory\n"); 1688 1.1 chs goto fail; 1689 1.1 chs } 1690 1.1 chs 1691 1.1 chs error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1692 1.1 chs NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1693 1.1 chs if (error != 0) { 1694 1.30 cube aprint_error_dev(sc->sc_dev, 1695 1.30 cube "could not load jumbo DMA map\n"); 1696 1.1 chs goto fail; 1697 1.1 chs } 1698 1.1 chs 1699 1.1 chs /* ..and split it into 9KB chunks */ 1700 1.1 chs SLIST_INIT(&ring->jfreelist); 1701 1.1 chs 1702 1.1 chs buf = ring->jpool; 1703 1.1 chs physaddr = ring->jmap->dm_segs[0].ds_addr; 1704 1.1 chs for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1705 1.1 chs jbuf = &ring->jbuf[i]; 1706 1.1 chs 1707 1.1 chs jbuf->buf = buf; 1708 1.1 chs jbuf->physaddr = physaddr; 1709 1.1 chs 1710 1.1 chs SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1711 1.1 chs 1712 1.1 chs buf += NFE_JBYTES; 1713 1.1 chs physaddr += NFE_JBYTES; 1714 1.1 chs } 1715 1.1 chs 1716 1.1 chs return 0; 1717 1.1 chs 1718 1.1 chs fail: nfe_jpool_free(sc); 1719 1.1 chs return error; 1720 1.1 chs } 1721 1.1 chs 1722 1.1 chs void 1723 1.1 chs nfe_jpool_free(struct nfe_softc *sc) 1724 1.1 chs { 1725 1.1 chs struct nfe_rx_ring *ring = &sc->rxq; 1726 1.1 chs 1727 1.1 chs if (ring->jmap != NULL) { 1728 1.1 chs bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1729 1.1 chs ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1730 1.1 chs bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1731 1.1 chs bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1732 1.53 jakllsch ring->jmap = NULL; 1733 1.1 chs } 1734 1.1 chs if (ring->jpool != NULL) { 1735 1.1 chs bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1736 1.1 chs bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1737 1.53 jakllsch ring->jpool = NULL; 1738 1.1 chs } 1739 1.1 chs } 1740 1.1 chs 1741 1.1 chs int 1742 1.1 chs nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1743 1.1 chs { 1744 1.1 chs int i, nsegs, error; 1745 1.1 chs void **desc; 1746 1.1 chs int descsize; 1747 1.1 chs 1748 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1749 1.1 chs desc = (void **)&ring->desc64; 1750 1.1 chs descsize = sizeof (struct nfe_desc64); 1751 1.1 chs } else { 1752 1.1 chs desc = (void **)&ring->desc32; 1753 1.1 chs descsize = sizeof (struct nfe_desc32); 1754 1.1 chs } 1755 1.1 chs 1756 1.1 chs ring->queued = 0; 1757 1.1 chs ring->cur = ring->next = 0; 1758 1.1 chs 1759 1.1 chs error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1760 1.1 chs NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1761 1.1 chs 1762 1.1 chs if (error != 0) { 1763 1.30 cube aprint_error_dev(sc->sc_dev, 1764 1.30 cube "could not create desc DMA map\n"); 1765 1.42 cegger ring->map = NULL; 1766 1.1 chs goto fail; 1767 1.1 chs } 1768 1.1 chs 1769 1.1 chs error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1770 1.1 chs PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1771 1.1 chs if (error != 0) { 1772 1.30 cube aprint_error_dev(sc->sc_dev, 1773 1.30 cube "could not allocate DMA memory\n"); 1774 1.1 chs goto fail; 1775 1.1 chs } 1776 1.1 chs 1777 1.1 chs error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1778 1.15 christos NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1779 1.1 chs if (error != 0) { 1780 1.30 cube aprint_error_dev(sc->sc_dev, 1781 1.30 cube "could not map desc DMA memory\n"); 1782 1.1 chs goto fail; 1783 1.1 chs } 1784 1.1 chs 1785 1.1 chs error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1786 1.1 chs NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1787 1.1 chs if (error != 0) { 1788 1.30 cube aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1789 1.1 chs goto fail; 1790 1.1 chs } 1791 1.1 chs 1792 1.43 cegger memset(*desc, 0, NFE_TX_RING_COUNT * descsize); 1793 1.1 chs ring->physaddr = ring->map->dm_segs[0].ds_addr; 1794 1.1 chs 1795 1.1 chs for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1796 1.1 chs error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1797 1.1 chs NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1798 1.1 chs &ring->data[i].map); 1799 1.1 chs if (error != 0) { 1800 1.30 cube aprint_error_dev(sc->sc_dev, 1801 1.30 cube "could not create DMA map\n"); 1802 1.42 cegger ring->data[i].map = NULL; 1803 1.1 chs goto fail; 1804 1.1 chs } 1805 1.1 chs } 1806 1.1 chs 1807 1.1 chs return 0; 1808 1.1 chs 1809 1.1 chs fail: nfe_free_tx_ring(sc, ring); 1810 1.1 chs return error; 1811 1.1 chs } 1812 1.1 chs 1813 1.1 chs void 1814 1.1 chs nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1815 1.1 chs { 1816 1.1 chs struct nfe_tx_data *data; 1817 1.1 chs int i; 1818 1.1 chs 1819 1.1 chs for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1820 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) 1821 1.1 chs ring->desc64[i].flags = 0; 1822 1.1 chs else 1823 1.1 chs ring->desc32[i].flags = 0; 1824 1.1 chs 1825 1.1 chs data = &ring->data[i]; 1826 1.1 chs 1827 1.1 chs if (data->m != NULL) { 1828 1.1 chs bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1829 1.1 chs data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1830 1.1 chs bus_dmamap_unload(sc->sc_dmat, data->active); 1831 1.1 chs m_freem(data->m); 1832 1.1 chs data->m = NULL; 1833 1.1 chs } 1834 1.1 chs } 1835 1.1 chs 1836 1.1 chs bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1837 1.1 chs BUS_DMASYNC_PREWRITE); 1838 1.1 chs 1839 1.1 chs ring->queued = 0; 1840 1.1 chs ring->cur = ring->next = 0; 1841 1.1 chs } 1842 1.1 chs 1843 1.1 chs void 1844 1.1 chs nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1845 1.1 chs { 1846 1.1 chs struct nfe_tx_data *data; 1847 1.1 chs void *desc; 1848 1.1 chs int i, descsize; 1849 1.1 chs 1850 1.1 chs if (sc->sc_flags & NFE_40BIT_ADDR) { 1851 1.1 chs desc = ring->desc64; 1852 1.1 chs descsize = sizeof (struct nfe_desc64); 1853 1.1 chs } else { 1854 1.1 chs desc = ring->desc32; 1855 1.1 chs descsize = sizeof (struct nfe_desc32); 1856 1.1 chs } 1857 1.1 chs 1858 1.1 chs if (desc != NULL) { 1859 1.1 chs bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1860 1.1 chs ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1861 1.1 chs bus_dmamap_unload(sc->sc_dmat, ring->map); 1862 1.15 christos bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1863 1.1 chs NFE_TX_RING_COUNT * descsize); 1864 1.1 chs bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1865 1.1 chs } 1866 1.1 chs 1867 1.1 chs for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1868 1.1 chs data = &ring->data[i]; 1869 1.1 chs 1870 1.1 chs if (data->m != NULL) { 1871 1.1 chs bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1872 1.1 chs data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1873 1.1 chs bus_dmamap_unload(sc->sc_dmat, data->active); 1874 1.1 chs m_freem(data->m); 1875 1.1 chs } 1876 1.1 chs } 1877 1.1 chs 1878 1.1 chs /* ..and now actually destroy the DMA mappings */ 1879 1.1 chs for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1880 1.1 chs data = &ring->data[i]; 1881 1.1 chs if (data->map == NULL) 1882 1.1 chs continue; 1883 1.1 chs bus_dmamap_destroy(sc->sc_dmat, data->map); 1884 1.1 chs } 1885 1.1 chs } 1886 1.1 chs 1887 1.1 chs void 1888 1.1 chs nfe_setmulti(struct nfe_softc *sc) 1889 1.1 chs { 1890 1.1 chs struct ethercom *ec = &sc->sc_ethercom; 1891 1.1 chs struct ifnet *ifp = &ec->ec_if; 1892 1.1 chs struct ether_multi *enm; 1893 1.1 chs struct ether_multistep step; 1894 1.1 chs uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1895 1.1 chs uint32_t filter = NFE_RXFILTER_MAGIC; 1896 1.1 chs int i; 1897 1.1 chs 1898 1.1 chs if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1899 1.43 cegger memset(addr, 0, ETHER_ADDR_LEN); 1900 1.43 cegger memset(mask, 0, ETHER_ADDR_LEN); 1901 1.1 chs goto done; 1902 1.1 chs } 1903 1.1 chs 1904 1.43 cegger memcpy(addr, etherbroadcastaddr, ETHER_ADDR_LEN); 1905 1.43 cegger memcpy(mask, etherbroadcastaddr, ETHER_ADDR_LEN); 1906 1.1 chs 1907 1.70 msaitoh ETHER_LOCK(ec); 1908 1.1 chs ETHER_FIRST_MULTI(step, ec, enm); 1909 1.1 chs while (enm != NULL) { 1910 1.44 cegger if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1911 1.1 chs ifp->if_flags |= IFF_ALLMULTI; 1912 1.43 cegger memset(addr, 0, ETHER_ADDR_LEN); 1913 1.43 cegger memset(mask, 0, ETHER_ADDR_LEN); 1914 1.70 msaitoh ETHER_UNLOCK(ec); 1915 1.1 chs goto done; 1916 1.1 chs } 1917 1.1 chs for (i = 0; i < ETHER_ADDR_LEN; i++) { 1918 1.1 chs addr[i] &= enm->enm_addrlo[i]; 1919 1.1 chs mask[i] &= ~enm->enm_addrlo[i]; 1920 1.1 chs } 1921 1.1 chs ETHER_NEXT_MULTI(step, enm); 1922 1.1 chs } 1923 1.70 msaitoh ETHER_UNLOCK(ec); 1924 1.1 chs for (i = 0; i < ETHER_ADDR_LEN; i++) 1925 1.1 chs mask[i] |= addr[i]; 1926 1.1 chs 1927 1.1 chs done: 1928 1.1 chs addr[0] |= 0x01; /* make sure multicast bit is set */ 1929 1.1 chs 1930 1.1 chs NFE_WRITE(sc, NFE_MULTIADDR_HI, 1931 1.77 msaitoh (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1932 1.1 chs NFE_WRITE(sc, NFE_MULTIADDR_LO, 1933 1.1 chs addr[5] << 8 | addr[4]); 1934 1.1 chs NFE_WRITE(sc, NFE_MULTIMASK_HI, 1935 1.77 msaitoh (uint32_t)mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1936 1.1 chs NFE_WRITE(sc, NFE_MULTIMASK_LO, 1937 1.1 chs mask[5] << 8 | mask[4]); 1938 1.1 chs 1939 1.1 chs filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1940 1.1 chs NFE_WRITE(sc, NFE_RXFILTER, filter); 1941 1.1 chs } 1942 1.1 chs 1943 1.1 chs void 1944 1.1 chs nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1945 1.1 chs { 1946 1.1 chs uint32_t tmp; 1947 1.1 chs 1948 1.31 christos if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 1949 1.31 christos tmp = NFE_READ(sc, NFE_MACADDR_HI); 1950 1.31 christos addr[0] = (tmp & 0xff); 1951 1.31 christos addr[1] = (tmp >> 8) & 0xff; 1952 1.31 christos addr[2] = (tmp >> 16) & 0xff; 1953 1.31 christos addr[3] = (tmp >> 24) & 0xff; 1954 1.31 christos 1955 1.31 christos tmp = NFE_READ(sc, NFE_MACADDR_LO); 1956 1.31 christos addr[4] = (tmp & 0xff); 1957 1.31 christos addr[5] = (tmp >> 8) & 0xff; 1958 1.31 christos 1959 1.31 christos } else { 1960 1.25 tsutsui tmp = NFE_READ(sc, NFE_MACADDR_LO); 1961 1.25 tsutsui addr[0] = (tmp >> 8) & 0xff; 1962 1.25 tsutsui addr[1] = (tmp & 0xff); 1963 1.25 tsutsui 1964 1.25 tsutsui tmp = NFE_READ(sc, NFE_MACADDR_HI); 1965 1.25 tsutsui addr[2] = (tmp >> 24) & 0xff; 1966 1.25 tsutsui addr[3] = (tmp >> 16) & 0xff; 1967 1.25 tsutsui addr[4] = (tmp >> 8) & 0xff; 1968 1.25 tsutsui addr[5] = (tmp & 0xff); 1969 1.25 tsutsui } 1970 1.1 chs } 1971 1.1 chs 1972 1.1 chs void 1973 1.1 chs nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1974 1.1 chs { 1975 1.1 chs NFE_WRITE(sc, NFE_MACADDR_LO, 1976 1.1 chs addr[5] << 8 | addr[4]); 1977 1.1 chs NFE_WRITE(sc, NFE_MACADDR_HI, 1978 1.77 msaitoh (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1979 1.1 chs } 1980 1.1 chs 1981 1.1 chs void 1982 1.1 chs nfe_tick(void *arg) 1983 1.1 chs { 1984 1.1 chs struct nfe_softc *sc = arg; 1985 1.1 chs int s; 1986 1.1 chs 1987 1.1 chs s = splnet(); 1988 1.1 chs mii_tick(&sc->sc_mii); 1989 1.1 chs splx(s); 1990 1.1 chs 1991 1.1 chs callout_schedule(&sc->sc_tick_ch, hz); 1992 1.1 chs } 1993 1.35 jmcneill 1994 1.35 jmcneill void 1995 1.35 jmcneill nfe_poweron(device_t self) 1996 1.35 jmcneill { 1997 1.35 jmcneill struct nfe_softc *sc = device_private(self); 1998 1.35 jmcneill 1999 1.35 jmcneill if ((sc->sc_flags & NFE_PWR_MGMT) != 0) { 2000 1.35 jmcneill NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 2001 1.35 jmcneill NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 2002 1.35 jmcneill DELAY(100); 2003 1.35 jmcneill NFE_WRITE(sc, NFE_MAC_RESET, 0); 2004 1.35 jmcneill DELAY(100); 2005 1.35 jmcneill NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 2006 1.35 jmcneill NFE_WRITE(sc, NFE_PWR2_CTL, 2007 1.35 jmcneill NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 2008 1.35 jmcneill } 2009 1.35 jmcneill } 2010 1.35 jmcneill 2011 1.35 jmcneill bool 2012 1.50 dyoung nfe_resume(device_t dv, const pmf_qual_t *qual) 2013 1.35 jmcneill { 2014 1.35 jmcneill nfe_poweron(dv); 2015 1.35 jmcneill 2016 1.35 jmcneill return true; 2017 1.35 jmcneill } 2018