if_nfe.c revision 1.10 1 /* $NetBSD: if_nfe.c,v 1.10 2007/01/01 03:43:04 tsutsui Exp $ */
2 /* $OpenBSD: if_nfe.c,v 1.52 2006/03/02 09:04:00 jsg Exp $ */
3
4 /*-
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini (at) free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg (at) openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
22
23 #include <sys/cdefs.h>
24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.10 2007/01/01 03:43:04 tsutsui Exp $");
25
26 #include "opt_inet.h"
27 #include "bpfilter.h"
28 #include "vlan.h"
29
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/types.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/queue.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/device.h>
40 #include <sys/socket.h>
41
42 #include <machine/bus.h>
43
44 #include <net/if.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_ether.h>
48 #include <net/if_arp.h>
49
50 #ifdef INET
51 #include <netinet/in.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in_var.h>
54 #include <netinet/ip.h>
55 #include <netinet/if_inarp.h>
56 #endif
57
58 #if NVLAN > 0
59 #include <net/if_types.h>
60 #endif
61
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65
66 #include <dev/mii/mii.h>
67 #include <dev/mii/miivar.h>
68
69 #include <dev/pci/pcireg.h>
70 #include <dev/pci/pcivar.h>
71 #include <dev/pci/pcidevs.h>
72
73 #include <dev/pci/if_nfereg.h>
74 #include <dev/pci/if_nfevar.h>
75
76 int nfe_match(struct device *, struct cfdata *, void *);
77 void nfe_attach(struct device *, struct device *, void *);
78 void nfe_power(int, void *);
79 void nfe_miibus_statchg(struct device *);
80 int nfe_miibus_readreg(struct device *, int, int);
81 void nfe_miibus_writereg(struct device *, int, int, int);
82 int nfe_intr(void *);
83 int nfe_ioctl(struct ifnet *, u_long, caddr_t);
84 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
85 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
86 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int);
87 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int);
88 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int);
89 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int);
90 void nfe_rxeof(struct nfe_softc *);
91 void nfe_txeof(struct nfe_softc *);
92 int nfe_encap(struct nfe_softc *, struct mbuf *);
93 void nfe_start(struct ifnet *);
94 void nfe_watchdog(struct ifnet *);
95 int nfe_init(struct ifnet *);
96 void nfe_stop(struct ifnet *, int);
97 struct nfe_jbuf *nfe_jalloc(struct nfe_softc *);
98 void nfe_jfree(struct mbuf *, caddr_t, size_t, void *);
99 int nfe_jpool_alloc(struct nfe_softc *);
100 void nfe_jpool_free(struct nfe_softc *);
101 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
102 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
103 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
104 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
105 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
106 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
107 int nfe_ifmedia_upd(struct ifnet *);
108 void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109 void nfe_setmulti(struct nfe_softc *);
110 void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
111 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *);
112 void nfe_tick(void *);
113
114 CFATTACH_DECL(nfe, sizeof(struct nfe_softc), nfe_match, nfe_attach, NULL, NULL);
115
116 /*#define NFE_NO_JUMBO*/
117
118 #ifdef NFE_DEBUG
119 int nfedebug = 0;
120 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0)
121 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0)
122 #else
123 #define DPRINTF(x)
124 #define DPRINTFN(n,x)
125 #endif
126
127 /* deal with naming differences */
128
129 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \
130 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1
131 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \
132 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2
133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \
134 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN
135
136 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \
137 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1
138 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \
139 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2
140
141 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \
142 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1
143 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \
144 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2
145
146 #ifdef _LP64
147 #define __LP64__ 1
148 #endif
149
150 const struct nfe_product {
151 pci_vendor_id_t vendor;
152 pci_product_id_t product;
153 } nfe_devices[] = {
154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN },
155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN },
156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 },
157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 },
158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 },
159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 },
160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 },
161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 },
162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 },
163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 },
164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 },
165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 },
166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 },
167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 },
168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 },
169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 },
170 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 },
171 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 },
172 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 },
173 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 },
174 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 },
175 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 },
176 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }
177 };
178
179 int
180 nfe_match(struct device *dev, struct cfdata *match, void *aux)
181 {
182 struct pci_attach_args *pa = aux;
183 const struct nfe_product *np;
184 int i;
185
186 for (i = 0; i < sizeof(nfe_devices) / sizeof(nfe_devices[0]); i++) {
187 np = &nfe_devices[i];
188 if (PCI_VENDOR(pa->pa_id) == np->vendor &&
189 PCI_PRODUCT(pa->pa_id) == np->product)
190 return 1;
191 }
192 return 0;
193 }
194
195 void
196 nfe_attach(struct device *parent, struct device *self, void *aux)
197 {
198 struct nfe_softc *sc = (struct nfe_softc *)self;
199 struct pci_attach_args *pa = aux;
200 pci_chipset_tag_t pc = pa->pa_pc;
201 pci_intr_handle_t ih;
202 const char *intrstr;
203 struct ifnet *ifp;
204 bus_size_t memsize;
205 pcireg_t memtype;
206 char devinfo[256];
207
208 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo));
209 aprint_normal(": %s (rev. 0x%02x)\n",
210 devinfo, PCI_REVISION(pa->pa_class));
211
212 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA);
213 switch (memtype) {
214 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
215 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
216 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt,
217 &sc->sc_memh, NULL, &memsize) == 0)
218 break;
219 /* FALLTHROUGH */
220 default:
221 printf("%s: could not map mem space\n", sc->sc_dev.dv_xname);
222 return;
223 }
224
225 if (pci_intr_map(pa, &ih) != 0) {
226 printf("%s: could not map interrupt\n", sc->sc_dev.dv_xname);
227 return;
228 }
229
230 intrstr = pci_intr_string(pc, ih);
231 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc);
232 if (sc->sc_ih == NULL) {
233 printf("%s: could not establish interrupt",
234 sc->sc_dev.dv_xname);
235 if (intrstr != NULL)
236 printf(" at %s", intrstr);
237 printf("\n");
238 return;
239 }
240 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
241
242 sc->sc_dmat = pa->pa_dmat;
243
244 nfe_get_macaddr(sc, sc->sc_enaddr);
245 printf("%s: Ethernet address %s\n",
246 sc->sc_dev.dv_xname, ether_sprintf(sc->sc_enaddr));
247
248 sc->sc_flags = 0;
249
250 switch (PCI_PRODUCT(pa->pa_id)) {
251 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
252 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
253 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
254 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
255 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
256 break;
257 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
258 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
259 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
260 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
261 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
262 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
263 sc->sc_flags |= NFE_40BIT_ADDR;
264 break;
265 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
266 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
267 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
268 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
269 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
270 break;
271 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
272 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
273 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
274 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
275 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
276 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
277 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
278 NFE_HW_VLAN;
279 break;
280 }
281
282 #ifndef NFE_NO_JUMBO
283 /* enable jumbo frames for adapters that support it */
284 if (sc->sc_flags & NFE_JUMBO_SUP)
285 sc->sc_flags |= NFE_USE_JUMBO;
286 #endif
287
288 /*
289 * Allocate Tx and Rx rings.
290 */
291 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) {
292 printf("%s: could not allocate Tx ring\n",
293 sc->sc_dev.dv_xname);
294 return;
295 }
296
297 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) {
298 printf("%s: could not allocate Rx ring\n",
299 sc->sc_dev.dv_xname);
300 nfe_free_tx_ring(sc, &sc->txq);
301 return;
302 }
303
304 ifp = &sc->sc_ethercom.ec_if;
305 ifp->if_softc = sc;
306 ifp->if_mtu = ETHERMTU;
307 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
308 ifp->if_ioctl = nfe_ioctl;
309 ifp->if_start = nfe_start;
310 ifp->if_watchdog = nfe_watchdog;
311 ifp->if_init = nfe_init;
312 ifp->if_baudrate = IF_Gbps(1);
313 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN);
314 IFQ_SET_READY(&ifp->if_snd);
315 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
316
317 #if NVLAN > 0
318 if (sc->sc_flags & NFE_HW_VLAN)
319 sc->sc_ethercom.ec_capabilities |=
320 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
321 #endif
322 #ifdef NFE_CSUM
323 if (sc->sc_flags & NFE_HW_CSUM) {
324 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
325 IFCAP_CSUM_UDPv4;
326 }
327 #endif
328
329 sc->sc_mii.mii_ifp = ifp;
330 sc->sc_mii.mii_readreg = nfe_miibus_readreg;
331 sc->sc_mii.mii_writereg = nfe_miibus_writereg;
332 sc->sc_mii.mii_statchg = nfe_miibus_statchg;
333
334 ifmedia_init(&sc->sc_mii.mii_media, 0, nfe_ifmedia_upd,
335 nfe_ifmedia_sts);
336 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
337 MII_OFFSET_ANY, 0);
338 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
339 printf("%s: no PHY found!\n", sc->sc_dev.dv_xname);
340 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL,
341 0, NULL);
342 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL);
343 } else
344 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
345
346 if_attach(ifp);
347 ether_ifattach(ifp, sc->sc_enaddr);
348
349 callout_init(&sc->sc_tick_ch);
350 callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc);
351
352 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
353 nfe_power, sc);
354 }
355
356 void
357 nfe_power(int why, void *arg)
358 {
359 struct nfe_softc *sc = arg;
360 struct ifnet *ifp;
361
362 if (why == PWR_RESUME) {
363 ifp = &sc->sc_ethercom.ec_if;
364 if (ifp->if_flags & IFF_UP) {
365 ifp->if_flags &= ~IFF_RUNNING;
366 nfe_init(ifp);
367 if (ifp->if_flags & IFF_RUNNING)
368 nfe_start(ifp);
369 }
370 }
371 }
372
373 void
374 nfe_miibus_statchg(struct device *dev)
375 {
376 struct nfe_softc *sc = (struct nfe_softc *)dev;
377 struct mii_data *mii = &sc->sc_mii;
378 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
379
380 phy = NFE_READ(sc, NFE_PHY_IFACE);
381 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
382
383 seed = NFE_READ(sc, NFE_RNDSEED);
384 seed &= ~NFE_SEED_MASK;
385
386 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) {
387 phy |= NFE_PHY_HDX; /* half-duplex */
388 misc |= NFE_MISC1_HDX;
389 }
390
391 switch (IFM_SUBTYPE(mii->mii_media_active)) {
392 case IFM_1000_T: /* full-duplex only */
393 link |= NFE_MEDIA_1000T;
394 seed |= NFE_SEED_1000T;
395 phy |= NFE_PHY_1000T;
396 break;
397 case IFM_100_TX:
398 link |= NFE_MEDIA_100TX;
399 seed |= NFE_SEED_100TX;
400 phy |= NFE_PHY_100TX;
401 break;
402 case IFM_10_T:
403 link |= NFE_MEDIA_10T;
404 seed |= NFE_SEED_10T;
405 break;
406 }
407
408 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
409
410 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
411 NFE_WRITE(sc, NFE_MISC1, misc);
412 NFE_WRITE(sc, NFE_LINKSPEED, link);
413 }
414
415 int
416 nfe_miibus_readreg(struct device *dev, int phy, int reg)
417 {
418 struct nfe_softc *sc = (struct nfe_softc *)dev;
419 uint32_t val;
420 int ntries;
421
422 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
423
424 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
425 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
426 DELAY(100);
427 }
428
429 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
430
431 for (ntries = 0; ntries < 1000; ntries++) {
432 DELAY(100);
433 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
434 break;
435 }
436 if (ntries == 1000) {
437 DPRINTFN(2, ("%s: timeout waiting for PHY\n",
438 sc->sc_dev.dv_xname));
439 return 0;
440 }
441
442 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
443 DPRINTFN(2, ("%s: could not read PHY\n",
444 sc->sc_dev.dv_xname));
445 return 0;
446 }
447
448 val = NFE_READ(sc, NFE_PHY_DATA);
449 if (val != 0xffffffff && val != 0)
450 sc->mii_phyaddr = phy;
451
452 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n",
453 sc->sc_dev.dv_xname, phy, reg, val));
454
455 return val;
456 }
457
458 void
459 nfe_miibus_writereg(struct device *dev, int phy, int reg, int val)
460 {
461 struct nfe_softc *sc = (struct nfe_softc *)dev;
462 uint32_t ctl;
463 int ntries;
464
465 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
466
467 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
468 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
469 DELAY(100);
470 }
471
472 NFE_WRITE(sc, NFE_PHY_DATA, val);
473 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
474 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
475
476 for (ntries = 0; ntries < 1000; ntries++) {
477 DELAY(100);
478 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
479 break;
480 }
481 #ifdef NFE_DEBUG
482 if (nfedebug >= 2 && ntries == 1000)
483 printf("could not write to PHY\n");
484 #endif
485 }
486
487 int
488 nfe_intr(void *arg)
489 {
490 struct nfe_softc *sc = arg;
491 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
492 uint32_t r;
493
494 if ((r = NFE_READ(sc, NFE_IRQ_STATUS)) == 0)
495 return 0; /* not for us */
496 NFE_WRITE(sc, NFE_IRQ_STATUS, r);
497
498 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r));
499
500 if (r & NFE_IRQ_LINK) {
501 NFE_READ(sc, NFE_PHY_STATUS);
502 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
503 DPRINTF(("%s: link state changed\n", sc->sc_dev.dv_xname));
504 }
505
506 if (ifp->if_flags & IFF_RUNNING) {
507 /* check Rx ring */
508 nfe_rxeof(sc);
509
510 /* check Tx ring */
511 nfe_txeof(sc);
512 }
513
514 return 1;
515 }
516
517 int
518 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
519 {
520 struct nfe_softc *sc = ifp->if_softc;
521 struct ifreq *ifr = (struct ifreq *)data;
522 struct ifaddr *ifa = (struct ifaddr *)data;
523 int s, error = 0;
524
525 s = splnet();
526
527 switch (cmd) {
528 case SIOCSIFADDR:
529 ifp->if_flags |= IFF_UP;
530 nfe_init(ifp);
531 switch (ifa->ifa_addr->sa_family) {
532 #ifdef INET
533 case AF_INET:
534 arp_ifinit(ifp, ifa);
535 break;
536 #endif
537 default:
538 break;
539 }
540 break;
541 case SIOCSIFMTU:
542 if (ifr->ifr_mtu < ETHERMIN ||
543 ((sc->sc_flags & NFE_USE_JUMBO) &&
544 ifr->ifr_mtu > ETHERMTU_JUMBO) ||
545 (!(sc->sc_flags & NFE_USE_JUMBO) &&
546 ifr->ifr_mtu > ETHERMTU))
547 error = EINVAL;
548 else if (ifp->if_mtu != ifr->ifr_mtu)
549 ifp->if_mtu = ifr->ifr_mtu;
550 break;
551 case SIOCSIFFLAGS:
552 if (ifp->if_flags & IFF_UP) {
553 /*
554 * If only the PROMISC or ALLMULTI flag changes, then
555 * don't do a full re-init of the chip, just update
556 * the Rx filter.
557 */
558 if ((ifp->if_flags & IFF_RUNNING) &&
559 ((ifp->if_flags ^ sc->sc_if_flags) &
560 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
561 nfe_setmulti(sc);
562 else
563 nfe_init(ifp);
564 } else {
565 if (ifp->if_flags & IFF_RUNNING)
566 nfe_stop(ifp, 1);
567 }
568 sc->sc_if_flags = ifp->if_flags;
569 break;
570 case SIOCADDMULTI:
571 case SIOCDELMULTI:
572 error = (cmd == SIOCADDMULTI) ?
573 ether_addmulti(ifr, &sc->sc_ethercom) :
574 ether_delmulti(ifr, &sc->sc_ethercom);
575
576 if (error == ENETRESET) {
577 if (ifp->if_flags & IFF_RUNNING)
578 nfe_setmulti(sc);
579 error = 0;
580 }
581 break;
582 case SIOCSIFMEDIA:
583 case SIOCGIFMEDIA:
584 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
585 break;
586 default:
587 error = ether_ioctl(ifp, cmd, data);
588 if (error == ENETRESET) {
589 if (ifp->if_flags & IFF_RUNNING)
590 nfe_setmulti(sc);
591 error = 0;
592 }
593 break;
594
595 }
596
597 splx(s);
598
599 return error;
600 }
601
602 void
603 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
604 {
605 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
606 (caddr_t)desc32 - (caddr_t)sc->txq.desc32,
607 sizeof (struct nfe_desc32), ops);
608 }
609
610 void
611 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
612 {
613 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
614 (caddr_t)desc64 - (caddr_t)sc->txq.desc64,
615 sizeof (struct nfe_desc64), ops);
616 }
617
618 void
619 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops)
620 {
621 if (end > start) {
622 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
623 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
624 (caddr_t)&sc->txq.desc32[end] -
625 (caddr_t)&sc->txq.desc32[start], ops);
626 return;
627 }
628 /* sync from 'start' to end of ring */
629 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
630 (caddr_t)&sc->txq.desc32[start] - (caddr_t)sc->txq.desc32,
631 (caddr_t)&sc->txq.desc32[NFE_TX_RING_COUNT] -
632 (caddr_t)&sc->txq.desc32[start], ops);
633
634 /* sync from start of ring to 'end' */
635 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
636 (caddr_t)&sc->txq.desc32[end] - (caddr_t)sc->txq.desc32, ops);
637 }
638
639 void
640 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops)
641 {
642 if (end > start) {
643 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
644 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
645 (caddr_t)&sc->txq.desc64[end] -
646 (caddr_t)&sc->txq.desc64[start], ops);
647 return;
648 }
649 /* sync from 'start' to end of ring */
650 bus_dmamap_sync(sc->sc_dmat, sc->txq.map,
651 (caddr_t)&sc->txq.desc64[start] - (caddr_t)sc->txq.desc64,
652 (caddr_t)&sc->txq.desc64[NFE_TX_RING_COUNT] -
653 (caddr_t)&sc->txq.desc64[start], ops);
654
655 /* sync from start of ring to 'end' */
656 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0,
657 (caddr_t)&sc->txq.desc64[end] - (caddr_t)sc->txq.desc64, ops);
658 }
659
660 void
661 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops)
662 {
663 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
664 (caddr_t)desc32 - (caddr_t)sc->rxq.desc32,
665 sizeof (struct nfe_desc32), ops);
666 }
667
668 void
669 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops)
670 {
671 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map,
672 (caddr_t)desc64 - (caddr_t)sc->rxq.desc64,
673 sizeof (struct nfe_desc64), ops);
674 }
675
676 void
677 nfe_rxeof(struct nfe_softc *sc)
678 {
679 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
680 struct nfe_desc32 *desc32;
681 struct nfe_desc64 *desc64;
682 struct nfe_rx_data *data;
683 struct nfe_jbuf *jbuf;
684 struct mbuf *m, *mnew;
685 bus_addr_t physaddr;
686 uint16_t flags;
687 int error, len;
688
689 desc32 = NULL;
690 desc64 = NULL;
691 for (;;) {
692 data = &sc->rxq.data[sc->rxq.cur];
693
694 if (sc->sc_flags & NFE_40BIT_ADDR) {
695 desc64 = &sc->rxq.desc64[sc->rxq.cur];
696 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
697
698 flags = le16toh(desc64->flags);
699 len = le16toh(desc64->length) & 0x3fff;
700 } else {
701 desc32 = &sc->rxq.desc32[sc->rxq.cur];
702 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
703
704 flags = le16toh(desc32->flags);
705 len = le16toh(desc32->length) & 0x3fff;
706 }
707
708 if (flags & NFE_RX_READY)
709 break;
710
711 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
712 if (!(flags & NFE_RX_VALID_V1))
713 goto skip;
714
715 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
716 flags &= ~NFE_RX_ERROR;
717 len--; /* fix buffer length */
718 }
719 } else {
720 if (!(flags & NFE_RX_VALID_V2))
721 goto skip;
722
723 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
724 flags &= ~NFE_RX_ERROR;
725 len--; /* fix buffer length */
726 }
727 }
728
729 if (flags & NFE_RX_ERROR) {
730 ifp->if_ierrors++;
731 goto skip;
732 }
733
734 /*
735 * Try to allocate a new mbuf for this ring element and load
736 * it before processing the current mbuf. If the ring element
737 * cannot be loaded, drop the received packet and reuse the
738 * old mbuf. In the unlikely case that the old mbuf can't be
739 * reloaded either, explicitly panic.
740 */
741 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
742 if (mnew == NULL) {
743 ifp->if_ierrors++;
744 goto skip;
745 }
746
747 if (sc->sc_flags & NFE_USE_JUMBO) {
748 if ((jbuf = nfe_jalloc(sc)) == NULL) {
749 m_freem(mnew);
750 ifp->if_ierrors++;
751 goto skip;
752 }
753 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc);
754
755 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap,
756 mtod(data->m, caddr_t) - sc->rxq.jpool, NFE_JBYTES,
757 BUS_DMASYNC_POSTREAD);
758
759 physaddr = jbuf->physaddr;
760 } else {
761 MCLGET(mnew, M_DONTWAIT);
762 if (!(mnew->m_flags & M_EXT)) {
763 m_freem(mnew);
764 ifp->if_ierrors++;
765 goto skip;
766 }
767
768 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
769 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
770 bus_dmamap_unload(sc->sc_dmat, data->map);
771
772 error = bus_dmamap_load(sc->sc_dmat, data->map,
773 mtod(mnew, void *), MCLBYTES, NULL,
774 BUS_DMA_READ | BUS_DMA_NOWAIT);
775 if (error != 0) {
776 m_freem(mnew);
777
778 /* try to reload the old mbuf */
779 error = bus_dmamap_load(sc->sc_dmat, data->map,
780 mtod(data->m, void *), MCLBYTES, NULL,
781 BUS_DMA_READ | BUS_DMA_NOWAIT);
782 if (error != 0) {
783 /* very unlikely that it will fail.. */
784 panic("%s: could not load old rx mbuf",
785 sc->sc_dev.dv_xname);
786 }
787 ifp->if_ierrors++;
788 goto skip;
789 }
790 physaddr = data->map->dm_segs[0].ds_addr;
791 }
792
793 /*
794 * New mbuf successfully loaded, update Rx ring and continue
795 * processing.
796 */
797 m = data->m;
798 data->m = mnew;
799
800 /* finalize mbuf */
801 m->m_pkthdr.len = m->m_len = len;
802 m->m_pkthdr.rcvif = ifp;
803
804 #ifdef notyet
805 if (sc->sc_flags & NFE_HW_CSUM) {
806 if (flags & NFE_RX_IP_CSUMOK)
807 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
808 if (flags & NFE_RX_UDP_CSUMOK)
809 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
810 if (flags & NFE_RX_TCP_CSUMOK)
811 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
812 }
813 #elif defined(NFE_CSUM)
814 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK))
815 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
816 #endif
817
818 #if NBPFILTER > 0
819 if (ifp->if_bpf)
820 bpf_mtap(ifp->if_bpf, m);
821 #endif
822 ifp->if_ipackets++;
823 (*ifp->if_input)(ifp, m);
824
825 /* update mapping address in h/w descriptor */
826 if (sc->sc_flags & NFE_40BIT_ADDR) {
827 #if defined(__LP64__)
828 desc64->physaddr[0] = htole32(physaddr >> 32);
829 #endif
830 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
831 } else {
832 desc32->physaddr = htole32(physaddr);
833 }
834
835 skip: if (sc->sc_flags & NFE_40BIT_ADDR) {
836 desc64->length = htole16(sc->rxq.bufsz);
837 desc64->flags = htole16(NFE_RX_READY);
838
839 nfe_rxdesc64_sync(sc, desc64, BUS_DMASYNC_PREWRITE);
840 } else {
841 desc32->length = htole16(sc->rxq.bufsz);
842 desc32->flags = htole16(NFE_RX_READY);
843
844 nfe_rxdesc32_sync(sc, desc32, BUS_DMASYNC_PREWRITE);
845 }
846
847 sc->rxq.cur = (sc->rxq.cur + 1) % NFE_RX_RING_COUNT;
848 }
849 }
850
851 void
852 nfe_txeof(struct nfe_softc *sc)
853 {
854 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
855 struct nfe_desc32 *desc32;
856 struct nfe_desc64 *desc64;
857 struct nfe_tx_data *data = NULL;
858 uint16_t flags;
859
860 while (sc->txq.next != sc->txq.cur) {
861 if (sc->sc_flags & NFE_40BIT_ADDR) {
862 desc64 = &sc->txq.desc64[sc->txq.next];
863 nfe_txdesc64_sync(sc, desc64, BUS_DMASYNC_POSTREAD);
864
865 flags = le16toh(desc64->flags);
866 } else {
867 desc32 = &sc->txq.desc32[sc->txq.next];
868 nfe_txdesc32_sync(sc, desc32, BUS_DMASYNC_POSTREAD);
869
870 flags = le16toh(desc32->flags);
871 }
872
873 if (flags & NFE_TX_VALID)
874 break;
875
876 data = &sc->txq.data[sc->txq.next];
877
878 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
879 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL)
880 goto skip;
881
882 if ((flags & NFE_TX_ERROR_V1) != 0) {
883 printf("%s: tx v1 error 0x%04x\n",
884 sc->sc_dev.dv_xname, flags);
885 ifp->if_oerrors++;
886 } else
887 ifp->if_opackets++;
888 } else {
889 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL)
890 goto skip;
891
892 if ((flags & NFE_TX_ERROR_V2) != 0) {
893 printf("%s: tx v2 error 0x%04x\n",
894 sc->sc_dev.dv_xname, flags);
895 ifp->if_oerrors++;
896 } else
897 ifp->if_opackets++;
898 }
899
900 if (data->m == NULL) { /* should not get there */
901 printf("%s: last fragment bit w/o associated mbuf!\n",
902 sc->sc_dev.dv_xname);
903 goto skip;
904 }
905
906 /* last fragment of the mbuf chain transmitted */
907 bus_dmamap_sync(sc->sc_dmat, data->active, 0,
908 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
909 bus_dmamap_unload(sc->sc_dmat, data->active);
910 m_freem(data->m);
911 data->m = NULL;
912
913 ifp->if_timer = 0;
914
915 skip: sc->txq.queued--;
916 sc->txq.next = (sc->txq.next + 1) % NFE_TX_RING_COUNT;
917 }
918
919 if (data != NULL) { /* at least one slot freed */
920 ifp->if_flags &= ~IFF_OACTIVE;
921 nfe_start(ifp);
922 }
923 }
924
925 int
926 nfe_encap(struct nfe_softc *sc, struct mbuf *m0)
927 {
928 struct nfe_desc32 *desc32;
929 struct nfe_desc64 *desc64;
930 struct nfe_tx_data *data;
931 bus_dmamap_t map;
932 uint16_t flags = NFE_TX_VALID;
933 #if NVLAN > 0
934 struct m_tag *mtag;
935 uint32_t vtag = 0;
936 #endif
937 int error, i;
938
939 desc32 = NULL;
940 desc64 = NULL;
941 data = NULL;
942 map = sc->txq.data[sc->txq.cur].map;
943
944 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT);
945 if (error != 0) {
946 printf("%s: could not map mbuf (error %d)\n",
947 sc->sc_dev.dv_xname, error);
948 return error;
949 }
950
951 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) {
952 bus_dmamap_unload(sc->sc_dmat, map);
953 return ENOBUFS;
954 }
955
956 #if NVLAN > 0
957 /* setup h/w VLAN tagging */
958 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL)
959 vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag);
960 #endif
961 #ifdef NFE_CSUM
962 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
963 flags |= NFE_TX_IP_CSUM;
964 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT))
965 flags |= NFE_TX_TCP_CSUM;
966 #endif
967
968 for (i = 0; i < map->dm_nsegs; i++) {
969 data = &sc->txq.data[sc->txq.cur];
970
971 if (sc->sc_flags & NFE_40BIT_ADDR) {
972 desc64 = &sc->txq.desc64[sc->txq.cur];
973 #if defined(__LP64__)
974 desc64->physaddr[0] =
975 htole32(map->dm_segs[i].ds_addr >> 32);
976 #endif
977 desc64->physaddr[1] =
978 htole32(map->dm_segs[i].ds_addr & 0xffffffff);
979 desc64->length = htole16(map->dm_segs[i].ds_len - 1);
980 desc64->flags = htole16(flags);
981 #if NVLAN > 0
982 desc64->vtag = htole32(vtag);
983 #endif
984 } else {
985 desc32 = &sc->txq.desc32[sc->txq.cur];
986
987 desc32->physaddr = htole32(map->dm_segs[i].ds_addr);
988 desc32->length = htole16(map->dm_segs[i].ds_len - 1);
989 desc32->flags = htole16(flags);
990 }
991
992 /* csum flags and vtag belong to the first fragment only */
993 if (map->dm_nsegs > 1) {
994 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM);
995 #if NVLAN > 0
996 vtag = 0;
997 #endif
998 }
999
1000 sc->txq.queued++;
1001 sc->txq.cur = (sc->txq.cur + 1) % NFE_TX_RING_COUNT;
1002 }
1003
1004 /* the whole mbuf chain has been DMA mapped, fix last descriptor */
1005 if (sc->sc_flags & NFE_40BIT_ADDR) {
1006 flags |= NFE_TX_LASTFRAG_V2;
1007 desc64->flags = htole16(flags);
1008 } else {
1009 if (sc->sc_flags & NFE_JUMBO_SUP)
1010 flags |= NFE_TX_LASTFRAG_V2;
1011 else
1012 flags |= NFE_TX_LASTFRAG_V1;
1013 desc32->flags = htole16(flags);
1014 }
1015
1016 data->m = m0;
1017 data->active = map;
1018
1019 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1020 BUS_DMASYNC_PREWRITE);
1021
1022 return 0;
1023 }
1024
1025 void
1026 nfe_start(struct ifnet *ifp)
1027 {
1028 struct nfe_softc *sc = ifp->if_softc;
1029 int old = sc->txq.cur;
1030 struct mbuf *m0;
1031
1032 for (;;) {
1033 IFQ_POLL(&ifp->if_snd, m0);
1034 if (m0 == NULL)
1035 break;
1036
1037 if (nfe_encap(sc, m0) != 0) {
1038 ifp->if_flags |= IFF_OACTIVE;
1039 break;
1040 }
1041
1042 /* packet put in h/w queue, remove from s/w queue */
1043 IFQ_DEQUEUE(&ifp->if_snd, m0);
1044
1045 #if NBPFILTER > 0
1046 if (ifp->if_bpf != NULL)
1047 bpf_mtap(ifp->if_bpf, m0);
1048 #endif
1049 }
1050 if (sc->txq.cur == old) /* nothing sent */
1051 return;
1052
1053 if (sc->sc_flags & NFE_40BIT_ADDR)
1054 nfe_txdesc64_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1055 else
1056 nfe_txdesc32_rsync(sc, old, sc->txq.cur, BUS_DMASYNC_PREWRITE);
1057
1058 /* kick Tx */
1059 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
1060
1061 /*
1062 * Set a timeout in case the chip goes out to lunch.
1063 */
1064 ifp->if_timer = 5;
1065 }
1066
1067 void
1068 nfe_watchdog(struct ifnet *ifp)
1069 {
1070 struct nfe_softc *sc = ifp->if_softc;
1071
1072 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1073
1074 ifp->if_flags &= ~IFF_RUNNING;
1075 nfe_init(ifp);
1076
1077 ifp->if_oerrors++;
1078 }
1079
1080 int
1081 nfe_init(struct ifnet *ifp)
1082 {
1083 struct nfe_softc *sc = ifp->if_softc;
1084 uint32_t tmp;
1085
1086 if (ifp->if_flags & IFF_RUNNING)
1087 return 0;
1088
1089 nfe_stop(ifp, 0);
1090
1091 NFE_WRITE(sc, NFE_TX_UNK, 0);
1092 NFE_WRITE(sc, NFE_STATUS, 0);
1093
1094 sc->rxtxctl = NFE_RXTX_BIT2;
1095 if (sc->sc_flags & NFE_40BIT_ADDR)
1096 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
1097 else if (sc->sc_flags & NFE_JUMBO_SUP)
1098 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
1099 #ifdef NFE_CSUM
1100 if (sc->sc_flags & NFE_HW_CSUM)
1101 sc->rxtxctl |= NFE_RXTX_RXCSUM;
1102 #endif
1103 #if NVLAN > 0
1104 /*
1105 * Although the adapter is capable of stripping VLAN tags from received
1106 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on
1107 * purpose. This will be done in software by our network stack.
1108 */
1109 if (sc->sc_flags & NFE_HW_VLAN)
1110 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT;
1111 #endif
1112 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
1113 DELAY(10);
1114 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1115
1116 #if NVLAN
1117 if (sc->sc_flags & NFE_HW_VLAN)
1118 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
1119 #endif
1120
1121 NFE_WRITE(sc, NFE_SETUP_R6, 0);
1122
1123 /* set MAC address */
1124 nfe_set_macaddr(sc, sc->sc_enaddr);
1125
1126 /* tell MAC where rings are in memory */
1127 #ifdef __LP64__
1128 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32);
1129 #endif
1130 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff);
1131 #ifdef __LP64__
1132 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32);
1133 #endif
1134 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff);
1135
1136 NFE_WRITE(sc, NFE_RING_SIZE,
1137 (NFE_RX_RING_COUNT - 1) << 16 |
1138 (NFE_TX_RING_COUNT - 1));
1139
1140 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz);
1141
1142 /* force MAC to wakeup */
1143 tmp = NFE_READ(sc, NFE_PWR_STATE);
1144 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP);
1145 DELAY(10);
1146 tmp = NFE_READ(sc, NFE_PWR_STATE);
1147 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID);
1148
1149 #if 1
1150 /* configure interrupts coalescing/mitigation */
1151 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
1152 #else
1153 /* no interrupt mitigation: one interrupt per packet */
1154 NFE_WRITE(sc, NFE_IMTIMER, 970);
1155 #endif
1156
1157 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC);
1158 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
1159 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
1160
1161 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
1162 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
1163
1164 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
1165 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
1166
1167 sc->rxtxctl &= ~NFE_RXTX_BIT2;
1168 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
1169 DELAY(10);
1170 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
1171
1172 /* set Rx filter */
1173 nfe_setmulti(sc);
1174
1175 nfe_ifmedia_upd(ifp);
1176
1177 /* enable Rx */
1178 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
1179
1180 /* enable Tx */
1181 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
1182
1183 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1184
1185 /* enable interrupts */
1186 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1187
1188 callout_schedule(&sc->sc_tick_ch, hz);
1189
1190 ifp->if_flags |= IFF_RUNNING;
1191 ifp->if_flags &= ~IFF_OACTIVE;
1192
1193 return 0;
1194 }
1195
1196 void
1197 nfe_stop(struct ifnet *ifp, int disable)
1198 {
1199 struct nfe_softc *sc = ifp->if_softc;
1200
1201 callout_stop(&sc->sc_tick_ch);
1202
1203 ifp->if_timer = 0;
1204 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1205
1206 mii_down(&sc->sc_mii);
1207
1208 /* abort Tx */
1209 NFE_WRITE(sc, NFE_TX_CTL, 0);
1210
1211 /* disable Rx */
1212 NFE_WRITE(sc, NFE_RX_CTL, 0);
1213
1214 /* disable interrupts */
1215 NFE_WRITE(sc, NFE_IRQ_MASK, 0);
1216
1217 /* reset Tx and Rx rings */
1218 nfe_reset_tx_ring(sc, &sc->txq);
1219 nfe_reset_rx_ring(sc, &sc->rxq);
1220 }
1221
1222 int
1223 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1224 {
1225 struct nfe_desc32 *desc32;
1226 struct nfe_desc64 *desc64;
1227 struct nfe_rx_data *data;
1228 struct nfe_jbuf *jbuf;
1229 void **desc;
1230 bus_addr_t physaddr;
1231 int i, nsegs, error, descsize;
1232
1233 if (sc->sc_flags & NFE_40BIT_ADDR) {
1234 desc = (void **)&ring->desc64;
1235 descsize = sizeof (struct nfe_desc64);
1236 } else {
1237 desc = (void **)&ring->desc32;
1238 descsize = sizeof (struct nfe_desc32);
1239 }
1240
1241 ring->cur = ring->next = 0;
1242 ring->bufsz = MCLBYTES;
1243
1244 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1,
1245 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1246 if (error != 0) {
1247 printf("%s: could not create desc DMA map\n",
1248 sc->sc_dev.dv_xname);
1249 goto fail;
1250 }
1251
1252 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize,
1253 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1254 if (error != 0) {
1255 printf("%s: could not allocate DMA memory\n",
1256 sc->sc_dev.dv_xname);
1257 goto fail;
1258 }
1259
1260 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1261 NFE_RX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1262 if (error != 0) {
1263 printf("%s: could not map desc DMA memory\n",
1264 sc->sc_dev.dv_xname);
1265 goto fail;
1266 }
1267
1268 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1269 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1270 if (error != 0) {
1271 printf("%s: could not load desc DMA map\n",
1272 sc->sc_dev.dv_xname);
1273 goto fail;
1274 }
1275
1276 bzero(*desc, NFE_RX_RING_COUNT * descsize);
1277 ring->physaddr = ring->map->dm_segs[0].ds_addr;
1278
1279 if (sc->sc_flags & NFE_USE_JUMBO) {
1280 ring->bufsz = NFE_JBYTES;
1281 if ((error = nfe_jpool_alloc(sc)) != 0) {
1282 printf("%s: could not allocate jumbo frames\n",
1283 sc->sc_dev.dv_xname);
1284 goto fail;
1285 }
1286 }
1287
1288 /*
1289 * Pre-allocate Rx buffers and populate Rx ring.
1290 */
1291 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1292 data = &sc->rxq.data[i];
1293
1294 MGETHDR(data->m, M_DONTWAIT, MT_DATA);
1295 if (data->m == NULL) {
1296 printf("%s: could not allocate rx mbuf\n",
1297 sc->sc_dev.dv_xname);
1298 error = ENOMEM;
1299 goto fail;
1300 }
1301
1302 if (sc->sc_flags & NFE_USE_JUMBO) {
1303 if ((jbuf = nfe_jalloc(sc)) == NULL) {
1304 printf("%s: could not allocate jumbo buffer\n",
1305 sc->sc_dev.dv_xname);
1306 goto fail;
1307 }
1308 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree,
1309 sc);
1310
1311 physaddr = jbuf->physaddr;
1312 } else {
1313 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1314 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1315 if (error != 0) {
1316 printf("%s: could not create DMA map\n",
1317 sc->sc_dev.dv_xname);
1318 goto fail;
1319 }
1320 MCLGET(data->m, M_DONTWAIT);
1321 if (!(data->m->m_flags & M_EXT)) {
1322 printf("%s: could not allocate mbuf cluster\n",
1323 sc->sc_dev.dv_xname);
1324 error = ENOMEM;
1325 goto fail;
1326 }
1327
1328 error = bus_dmamap_load(sc->sc_dmat, data->map,
1329 mtod(data->m, void *), MCLBYTES, NULL,
1330 BUS_DMA_READ | BUS_DMA_NOWAIT);
1331 if (error != 0) {
1332 printf("%s: could not load rx buf DMA map",
1333 sc->sc_dev.dv_xname);
1334 goto fail;
1335 }
1336 physaddr = data->map->dm_segs[0].ds_addr;
1337 }
1338
1339 if (sc->sc_flags & NFE_40BIT_ADDR) {
1340 desc64 = &sc->rxq.desc64[i];
1341 #if defined(__LP64__)
1342 desc64->physaddr[0] = htole32(physaddr >> 32);
1343 #endif
1344 desc64->physaddr[1] = htole32(physaddr & 0xffffffff);
1345 desc64->length = htole16(sc->rxq.bufsz);
1346 desc64->flags = htole16(NFE_RX_READY);
1347 } else {
1348 desc32 = &sc->rxq.desc32[i];
1349 desc32->physaddr = htole32(physaddr);
1350 desc32->length = htole16(sc->rxq.bufsz);
1351 desc32->flags = htole16(NFE_RX_READY);
1352 }
1353 }
1354
1355 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1356 BUS_DMASYNC_PREWRITE);
1357
1358 return 0;
1359
1360 fail: nfe_free_rx_ring(sc, ring);
1361 return error;
1362 }
1363
1364 void
1365 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1366 {
1367 int i;
1368
1369 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1370 if (sc->sc_flags & NFE_40BIT_ADDR) {
1371 ring->desc64[i].length = htole16(ring->bufsz);
1372 ring->desc64[i].flags = htole16(NFE_RX_READY);
1373 } else {
1374 ring->desc32[i].length = htole16(ring->bufsz);
1375 ring->desc32[i].flags = htole16(NFE_RX_READY);
1376 }
1377 }
1378
1379 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1380 BUS_DMASYNC_PREWRITE);
1381
1382 ring->cur = ring->next = 0;
1383 }
1384
1385 void
1386 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1387 {
1388 struct nfe_rx_data *data;
1389 void *desc;
1390 int i, descsize;
1391
1392 if (sc->sc_flags & NFE_40BIT_ADDR) {
1393 desc = ring->desc64;
1394 descsize = sizeof (struct nfe_desc64);
1395 } else {
1396 desc = ring->desc32;
1397 descsize = sizeof (struct nfe_desc32);
1398 }
1399
1400 if (desc != NULL) {
1401 bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1402 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1403 bus_dmamap_unload(sc->sc_dmat, ring->map);
1404 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1405 NFE_RX_RING_COUNT * descsize);
1406 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1407 }
1408
1409 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1410 data = &ring->data[i];
1411
1412 if (data->map != NULL) {
1413 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1414 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1415 bus_dmamap_unload(sc->sc_dmat, data->map);
1416 bus_dmamap_destroy(sc->sc_dmat, data->map);
1417 }
1418 if (data->m != NULL)
1419 m_freem(data->m);
1420 }
1421 }
1422
1423 struct nfe_jbuf *
1424 nfe_jalloc(struct nfe_softc *sc)
1425 {
1426 struct nfe_jbuf *jbuf;
1427
1428 jbuf = SLIST_FIRST(&sc->rxq.jfreelist);
1429 if (jbuf == NULL)
1430 return NULL;
1431 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext);
1432 return jbuf;
1433 }
1434
1435 /*
1436 * This is called automatically by the network stack when the mbuf is freed.
1437 * Caution must be taken that the NIC might be reset by the time the mbuf is
1438 * freed.
1439 */
1440 void
1441 nfe_jfree(struct mbuf *m, caddr_t buf, size_t size, void *arg)
1442 {
1443 struct nfe_softc *sc = arg;
1444 struct nfe_jbuf *jbuf;
1445 int i;
1446
1447 /* find the jbuf from the base pointer */
1448 i = (buf - sc->rxq.jpool) / NFE_JBYTES;
1449 if (i < 0 || i >= NFE_JPOOL_COUNT) {
1450 printf("%s: request to free a buffer (%p) not managed by us\n",
1451 sc->sc_dev.dv_xname, buf);
1452 return;
1453 }
1454 jbuf = &sc->rxq.jbuf[i];
1455
1456 /* ..and put it back in the free list */
1457 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext);
1458
1459 if (m != NULL)
1460 pool_cache_put(&mbpool_cache, m);
1461 }
1462
1463 int
1464 nfe_jpool_alloc(struct nfe_softc *sc)
1465 {
1466 struct nfe_rx_ring *ring = &sc->rxq;
1467 struct nfe_jbuf *jbuf;
1468 bus_addr_t physaddr;
1469 caddr_t buf;
1470 int i, nsegs, error;
1471
1472 /*
1473 * Allocate a big chunk of DMA'able memory.
1474 */
1475 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1,
1476 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap);
1477 if (error != 0) {
1478 printf("%s: could not create jumbo DMA map\n",
1479 sc->sc_dev.dv_xname);
1480 goto fail;
1481 }
1482
1483 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0,
1484 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT);
1485 if (error != 0) {
1486 printf("%s could not allocate jumbo DMA memory\n",
1487 sc->sc_dev.dv_xname);
1488 goto fail;
1489 }
1490
1491 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE,
1492 &ring->jpool, BUS_DMA_NOWAIT);
1493 if (error != 0) {
1494 printf("%s: could not map jumbo DMA memory\n",
1495 sc->sc_dev.dv_xname);
1496 goto fail;
1497 }
1498
1499 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool,
1500 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
1501 if (error != 0) {
1502 printf("%s: could not load jumbo DMA map\n",
1503 sc->sc_dev.dv_xname);
1504 goto fail;
1505 }
1506
1507 /* ..and split it into 9KB chunks */
1508 SLIST_INIT(&ring->jfreelist);
1509
1510 buf = ring->jpool;
1511 physaddr = ring->jmap->dm_segs[0].ds_addr;
1512 for (i = 0; i < NFE_JPOOL_COUNT; i++) {
1513 jbuf = &ring->jbuf[i];
1514
1515 jbuf->buf = buf;
1516 jbuf->physaddr = physaddr;
1517
1518 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext);
1519
1520 buf += NFE_JBYTES;
1521 physaddr += NFE_JBYTES;
1522 }
1523
1524 return 0;
1525
1526 fail: nfe_jpool_free(sc);
1527 return error;
1528 }
1529
1530 void
1531 nfe_jpool_free(struct nfe_softc *sc)
1532 {
1533 struct nfe_rx_ring *ring = &sc->rxq;
1534
1535 if (ring->jmap != NULL) {
1536 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0,
1537 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1538 bus_dmamap_unload(sc->sc_dmat, ring->jmap);
1539 bus_dmamap_destroy(sc->sc_dmat, ring->jmap);
1540 }
1541 if (ring->jpool != NULL) {
1542 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE);
1543 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1);
1544 }
1545 }
1546
1547 int
1548 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1549 {
1550 int i, nsegs, error;
1551 void **desc;
1552 int descsize;
1553
1554 if (sc->sc_flags & NFE_40BIT_ADDR) {
1555 desc = (void **)&ring->desc64;
1556 descsize = sizeof (struct nfe_desc64);
1557 } else {
1558 desc = (void **)&ring->desc32;
1559 descsize = sizeof (struct nfe_desc32);
1560 }
1561
1562 ring->queued = 0;
1563 ring->cur = ring->next = 0;
1564
1565 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1,
1566 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map);
1567
1568 if (error != 0) {
1569 printf("%s: could not create desc DMA map\n",
1570 sc->sc_dev.dv_xname);
1571 goto fail;
1572 }
1573
1574 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize,
1575 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT);
1576 if (error != 0) {
1577 printf("%s: could not allocate DMA memory\n",
1578 sc->sc_dev.dv_xname);
1579 goto fail;
1580 }
1581
1582 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs,
1583 NFE_TX_RING_COUNT * descsize, (caddr_t *)desc, BUS_DMA_NOWAIT);
1584 if (error != 0) {
1585 printf("%s: could not map desc DMA memory\n",
1586 sc->sc_dev.dv_xname);
1587 goto fail;
1588 }
1589
1590 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc,
1591 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT);
1592 if (error != 0) {
1593 printf("%s: could not load desc DMA map\n",
1594 sc->sc_dev.dv_xname);
1595 goto fail;
1596 }
1597
1598 bzero(*desc, NFE_TX_RING_COUNT * descsize);
1599 ring->physaddr = ring->map->dm_segs[0].ds_addr;
1600
1601 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1602 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES,
1603 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT,
1604 &ring->data[i].map);
1605 if (error != 0) {
1606 printf("%s: could not create DMA map\n",
1607 sc->sc_dev.dv_xname);
1608 goto fail;
1609 }
1610 }
1611
1612 return 0;
1613
1614 fail: nfe_free_tx_ring(sc, ring);
1615 return error;
1616 }
1617
1618 void
1619 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1620 {
1621 struct nfe_tx_data *data;
1622 int i;
1623
1624 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1625 if (sc->sc_flags & NFE_40BIT_ADDR)
1626 ring->desc64[i].flags = 0;
1627 else
1628 ring->desc32[i].flags = 0;
1629
1630 data = &ring->data[i];
1631
1632 if (data->m != NULL) {
1633 bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1634 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1635 bus_dmamap_unload(sc->sc_dmat, data->active);
1636 m_freem(data->m);
1637 data->m = NULL;
1638 }
1639 }
1640
1641 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize,
1642 BUS_DMASYNC_PREWRITE);
1643
1644 ring->queued = 0;
1645 ring->cur = ring->next = 0;
1646 }
1647
1648 void
1649 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1650 {
1651 struct nfe_tx_data *data;
1652 void *desc;
1653 int i, descsize;
1654
1655 if (sc->sc_flags & NFE_40BIT_ADDR) {
1656 desc = ring->desc64;
1657 descsize = sizeof (struct nfe_desc64);
1658 } else {
1659 desc = ring->desc32;
1660 descsize = sizeof (struct nfe_desc32);
1661 }
1662
1663 if (desc != NULL) {
1664 bus_dmamap_sync(sc->sc_dmat, ring->map, 0,
1665 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1666 bus_dmamap_unload(sc->sc_dmat, ring->map);
1667 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)desc,
1668 NFE_TX_RING_COUNT * descsize);
1669 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1);
1670 }
1671
1672 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1673 data = &ring->data[i];
1674
1675 if (data->m != NULL) {
1676 bus_dmamap_sync(sc->sc_dmat, data->active, 0,
1677 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1678 bus_dmamap_unload(sc->sc_dmat, data->active);
1679 m_freem(data->m);
1680 }
1681 }
1682
1683 /* ..and now actually destroy the DMA mappings */
1684 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1685 data = &ring->data[i];
1686 if (data->map == NULL)
1687 continue;
1688 bus_dmamap_destroy(sc->sc_dmat, data->map);
1689 }
1690 }
1691
1692 int
1693 nfe_ifmedia_upd(struct ifnet *ifp)
1694 {
1695 struct nfe_softc *sc = ifp->if_softc;
1696 struct mii_data *mii = &sc->sc_mii;
1697 struct mii_softc *miisc;
1698
1699 if (mii->mii_instance != 0) {
1700 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1701 mii_phy_reset(miisc);
1702 }
1703 return mii_mediachg(mii);
1704 }
1705
1706 void
1707 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1708 {
1709 struct nfe_softc *sc = ifp->if_softc;
1710 struct mii_data *mii = &sc->sc_mii;
1711
1712 mii_pollstat(mii);
1713 ifmr->ifm_status = mii->mii_media_status;
1714 ifmr->ifm_active = mii->mii_media_active;
1715 }
1716
1717 void
1718 nfe_setmulti(struct nfe_softc *sc)
1719 {
1720 struct ethercom *ec = &sc->sc_ethercom;
1721 struct ifnet *ifp = &ec->ec_if;
1722 struct ether_multi *enm;
1723 struct ether_multistep step;
1724 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
1725 uint32_t filter = NFE_RXFILTER_MAGIC;
1726 int i;
1727
1728 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
1729 bzero(addr, ETHER_ADDR_LEN);
1730 bzero(mask, ETHER_ADDR_LEN);
1731 goto done;
1732 }
1733
1734 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
1735 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
1736
1737 ETHER_FIRST_MULTI(step, ec, enm);
1738 while (enm != NULL) {
1739 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1740 ifp->if_flags |= IFF_ALLMULTI;
1741 bzero(addr, ETHER_ADDR_LEN);
1742 bzero(mask, ETHER_ADDR_LEN);
1743 goto done;
1744 }
1745 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1746 addr[i] &= enm->enm_addrlo[i];
1747 mask[i] &= ~enm->enm_addrlo[i];
1748 }
1749 ETHER_NEXT_MULTI(step, enm);
1750 }
1751 for (i = 0; i < ETHER_ADDR_LEN; i++)
1752 mask[i] |= addr[i];
1753
1754 done:
1755 addr[0] |= 0x01; /* make sure multicast bit is set */
1756
1757 NFE_WRITE(sc, NFE_MULTIADDR_HI,
1758 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1759 NFE_WRITE(sc, NFE_MULTIADDR_LO,
1760 addr[5] << 8 | addr[4]);
1761 NFE_WRITE(sc, NFE_MULTIMASK_HI,
1762 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
1763 NFE_WRITE(sc, NFE_MULTIMASK_LO,
1764 mask[5] << 8 | mask[4]);
1765
1766 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M;
1767 NFE_WRITE(sc, NFE_RXFILTER, filter);
1768 }
1769
1770 void
1771 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
1772 {
1773 uint32_t tmp;
1774
1775 tmp = NFE_READ(sc, NFE_MACADDR_LO);
1776 addr[0] = (tmp >> 8) & 0xff;
1777 addr[1] = (tmp & 0xff);
1778
1779 tmp = NFE_READ(sc, NFE_MACADDR_HI);
1780 addr[2] = (tmp >> 24) & 0xff;
1781 addr[3] = (tmp >> 16) & 0xff;
1782 addr[4] = (tmp >> 8) & 0xff;
1783 addr[5] = (tmp & 0xff);
1784 }
1785
1786 void
1787 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr)
1788 {
1789 NFE_WRITE(sc, NFE_MACADDR_LO,
1790 addr[5] << 8 | addr[4]);
1791 NFE_WRITE(sc, NFE_MACADDR_HI,
1792 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
1793 }
1794
1795 void
1796 nfe_tick(void *arg)
1797 {
1798 struct nfe_softc *sc = arg;
1799 int s;
1800
1801 s = splnet();
1802 mii_tick(&sc->sc_mii);
1803 splx(s);
1804
1805 callout_schedule(&sc->sc_tick_ch, hz);
1806 }
1807