if_age.c revision 1.3 1 /* $NetBSD: if_age.c,v 1.3 2009/01/16 23:10:32 cegger Exp $ */
2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */
3
4 /*-
5 * Copyright (c) 2008, Pyun YongHyeon <yongari (at) FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.3 2009/01/16 23:10:32 cegger Exp $");
35
36 #include "bpfilter.h"
37 #include "vlan.h"
38
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/types.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/queue.h>
47 #include <sys/kernel.h>
48 #include <sys/device.h>
49 #include <sys/callout.h>
50 #include <sys/socket.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #endif
63
64 #include <net/if_types.h>
65 #include <net/if_vlanvar.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <sys/rnd.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79
80 #include <dev/pci/if_agereg.h>
81
82 static int age_match(device_t, cfdata_t, void *);
83 static void age_attach(device_t, device_t, void *);
84 static int age_detach(device_t, int);
85
86 static bool age_suspend(device_t PMF_FN_PROTO);
87 static bool age_resume(device_t PMF_FN_PROTO);
88
89 static int age_miibus_readreg(device_t, int, int);
90 static void age_miibus_writereg(device_t, int, int, int);
91 static void age_miibus_statchg(device_t);
92
93 static int age_init(struct ifnet *);
94 static int age_ioctl(struct ifnet *, u_long, void *);
95 static void age_start(struct ifnet *);
96 static void age_watchdog(struct ifnet *);
97 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
98 static int age_mediachange(struct ifnet *);
99
100 static int age_intr(void *);
101 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t, uint32_t *);
102 static int age_dma_alloc(struct age_softc *);
103 static void age_dma_free(struct age_softc *);
104 static void age_get_macaddr(struct age_softc *, uint8_t[]);
105 static void age_phy_reset(struct age_softc *);
106
107 static int age_encap(struct age_softc *, struct mbuf **);
108 static void age_init_tx_ring(struct age_softc *);
109 static int age_init_rx_ring(struct age_softc *);
110 static void age_init_rr_ring(struct age_softc *);
111 static void age_init_cmb_block(struct age_softc *);
112 static void age_init_smb_block(struct age_softc *);
113 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int);
114 static void age_mac_config(struct age_softc *);
115 static void age_txintr(struct age_softc *, int);
116 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
117 static void age_rxintr(struct age_softc *, int);
118 static void age_tick(void *);
119 static void age_reset(struct age_softc *);
120 static void age_stop(struct age_softc *);
121 static void age_stats_update(struct age_softc *);
122 static void age_stop_txmac(struct age_softc *);
123 static void age_stop_rxmac(struct age_softc *);
124 static void age_rxvlan(struct age_softc *sc);
125 static void age_rxfilter(struct age_softc *);
126
127 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
128 age_match, age_attach, age_detach, NULL);
129
130 int agedebug = 0;
131 #define DPRINTF(x) do { if (agedebug) printf x; } while (0)
132
133 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
134
135 static int
136 age_match(device_t dev, cfdata_t match, void *aux)
137 {
138 struct pci_attach_args *pa = aux;
139
140 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
141 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
142 }
143
144 static void
145 age_attach(device_t parent, device_t self, void *aux)
146 {
147 struct age_softc *sc = device_private(self);
148 struct pci_attach_args *pa = aux;
149 pci_intr_handle_t ih;
150 const char *intrstr;
151 struct ifnet *ifp = &sc->sc_ec.ec_if;
152 pcireg_t memtype;
153 int error = 0;
154
155 aprint_naive("\n");
156 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
157
158 sc->sc_dev = self;
159 sc->sc_dmat = pa->pa_dmat;
160 sc->sc_pct = pa->pa_pc;
161 sc->sc_pcitag = pa->pa_tag;
162
163 /*
164 * Allocate IO memory
165 */
166 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
167 switch (memtype) {
168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
169 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
170 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
171 break;
172 default:
173 aprint_error_dev(self, "invalid base address register\n");
174 break;
175 }
176
177 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
178 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
179 aprint_error_dev(self, "could not map mem space\n");
180 return;
181 }
182
183 if (pci_intr_map(pa, &ih) != 0) {
184 aprint_error_dev(self, "could not map interrupt\n");
185 return;
186 }
187
188 /*
189 * Allocate IRQ
190 */
191 intrstr = pci_intr_string(sc->sc_pct, ih);
192 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
193 age_intr, sc);
194 if (sc->sc_irq_handle == NULL) {
195 aprint_error_dev(self, "could not establish interrupt");
196 if (intrstr != NULL)
197 aprint_error(" at %s", intrstr);
198 aprint_error("\n");
199 return;
200 }
201 aprint_normal_dev(self, "%s", intrstr);
202
203 /* Set PHY address. */
204 sc->age_phyaddr = AGE_PHY_ADDR;
205
206 /* Reset PHY. */
207 age_phy_reset(sc);
208
209 /* Reset the ethernet controller. */
210 age_reset(sc);
211
212 /* Get PCI and chip id/revision. */
213 sc->age_rev = PCI_REVISION(pa->pa_class);
214 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
215 MASTER_CHIP_REV_SHIFT;
216
217 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
218 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
219
220 if (agedebug) {
221 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
222 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
223 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
224 }
225
226 /* Set max allowable DMA size. */
227 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
228 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
229
230 /* Allocate DMA stuffs */
231 error = age_dma_alloc(sc);
232 if (error)
233 goto fail;
234
235 callout_init(&sc->sc_tick_ch, 0);
236 callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
237
238 /* Load station address. */
239 age_get_macaddr(sc, sc->sc_enaddr);
240
241 aprint_normal_dev(self, "Ethernet address %s\n",
242 ether_sprintf(sc->sc_enaddr));
243
244 ifp->if_softc = sc;
245 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
246 ifp->if_init = age_init;
247 ifp->if_ioctl = age_ioctl;
248 ifp->if_start = age_start;
249 ifp->if_watchdog = age_watchdog;
250 ifp->if_baudrate = IF_Gbps(1);
251 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
252 IFQ_SET_READY(&ifp->if_snd);
253 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
254
255 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
256
257 #ifdef AGE_CHECKSUM
258 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
259 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
260 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
261 #endif
262
263 #if NVLAN > 0
264 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
265 #endif
266
267 /* Set up MII bus. */
268 sc->sc_miibus.mii_ifp = ifp;
269 sc->sc_miibus.mii_readreg = age_miibus_readreg;
270 sc->sc_miibus.mii_writereg = age_miibus_writereg;
271 sc->sc_miibus.mii_statchg = age_miibus_statchg;
272
273 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
274 age_mediastatus);
275 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
276 MII_OFFSET_ANY, 0);
277
278 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
279 aprint_error_dev(self, "no PHY found!\n");
280 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
281 0, NULL);
282 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
283 } else
284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
285
286 if_attach(ifp);
287 ether_ifattach(ifp, sc->sc_enaddr);
288
289 if (!pmf_device_register(self, age_suspend, age_resume))
290 aprint_error_dev(self, "couldn't establish power handler\n");
291 else
292 pmf_class_network_register(self, ifp);
293
294 return;
295 fail:
296 age_detach(sc->sc_dev, 0);
297 }
298
299 static int
300 age_detach(device_t self, int flags)
301 {
302 struct age_softc *sc = device_private(self);
303 struct ifnet *ifp = &sc->sc_ec.ec_if;
304 int s;
305
306 s = splnet();
307 age_stop(sc);
308 splx(s);
309
310 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
311
312 /* Delete all remaining media. */
313 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
314
315 ether_ifdetach(ifp);
316 if_detach(ifp);
317 age_dma_free(sc);
318
319 if (sc->sc_irq_handle != NULL) {
320 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
321 sc->sc_irq_handle = NULL;
322 }
323
324 return (0);
325 }
326
327 /*
328 * Read a PHY register on the MII of the L1.
329 */
330 static int
331 age_miibus_readreg(struct device *dev, int phy, int reg)
332 {
333 struct age_softc *sc = device_private(dev);
334 uint32_t v;
335 int i;
336
337 if (phy != sc->age_phyaddr)
338 return (0);
339
340 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
341 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
342 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
343 DELAY(1);
344 v = CSR_READ_4(sc, AGE_MDIO);
345 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
346 break;
347 }
348
349 if (i == 0) {
350 printf("%s: phy read timeout: phy %d, reg %d\n",
351 device_xname(sc->sc_dev), phy, reg);
352 return (0);
353 }
354
355 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
356 }
357
358 /*
359 * Write a PHY register on the MII of the L1.
360 */
361 static void
362 age_miibus_writereg(struct device *dev, int phy, int reg, int val)
363 {
364 struct age_softc *sc = device_private(dev);
365 uint32_t v;
366 int i;
367
368 if (phy != sc->age_phyaddr)
369 return;
370
371 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
372 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
373 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
374
375 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
376 DELAY(1);
377 v = CSR_READ_4(sc, AGE_MDIO);
378 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
379 break;
380 }
381
382 if (i == 0) {
383 printf("%s: phy write timeout: phy %d, reg %d\n",
384 device_xname(sc->sc_dev), phy, reg);
385 }
386 }
387
388 /*
389 * Callback from MII layer when media changes.
390 */
391 static void
392 age_miibus_statchg(device_t dev)
393 {
394 struct age_softc *sc = device_private(dev);
395 struct ifnet *ifp = &sc->sc_ec.ec_if;
396 struct mii_data *mii;
397
398 if ((ifp->if_flags & IFF_RUNNING) == 0)
399 return;
400
401 mii = &sc->sc_miibus;
402
403 sc->age_flags &= ~AGE_FLAG_LINK;
404 if ((mii->mii_media_status & IFM_AVALID) != 0) {
405 switch (IFM_SUBTYPE(mii->mii_media_active)) {
406 case IFM_10_T:
407 case IFM_100_TX:
408 case IFM_1000_T:
409 sc->age_flags |= AGE_FLAG_LINK;
410 break;
411 default:
412 break;
413 }
414 }
415
416 /* Stop Rx/Tx MACs. */
417 age_stop_rxmac(sc);
418 age_stop_txmac(sc);
419
420 /* Program MACs with resolved speed/duplex/flow-control. */
421 if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
422 uint32_t reg;
423
424 age_mac_config(sc);
425 reg = CSR_READ_4(sc, AGE_MAC_CFG);
426 /* Restart DMA engine and Tx/Rx MAC. */
427 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
428 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
429 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
430 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
431 }
432 }
433
434 /*
435 * Get the current interface media status.
436 */
437 static void
438 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
439 {
440 struct age_softc *sc = ifp->if_softc;
441 struct mii_data *mii = &sc->sc_miibus;
442
443 mii_pollstat(mii);
444 ifmr->ifm_status = mii->mii_media_status;
445 ifmr->ifm_active = mii->mii_media_active;
446 }
447
448 /*
449 * Set hardware to newly-selected media.
450 */
451 static int
452 age_mediachange(struct ifnet *ifp)
453 {
454 struct age_softc *sc = ifp->if_softc;
455 struct mii_data *mii = &sc->sc_miibus;
456 int error;
457
458 if (mii->mii_instance != 0) {
459 struct mii_softc *miisc;
460
461 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
462 mii_phy_reset(miisc);
463 }
464 error = mii_mediachg(mii);
465
466 return (error);
467 }
468
469 static int
470 age_intr(void *arg)
471 {
472 struct age_softc *sc = arg;
473 struct ifnet *ifp = &sc->sc_ec.ec_if;
474 struct cmb *cmb;
475 uint32_t status;
476
477 status = CSR_READ_4(sc, AGE_INTR_STATUS);
478 if (status == 0 || (status & AGE_INTRS) == 0)
479 return (0);
480
481 /* Disable interrupts. */
482 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
483
484 cmb = sc->age_rdata.age_cmb_block;
485
486 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
487 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
488 status = le32toh(cmb->intr_status);
489 if ((status & AGE_INTRS) == 0)
490 goto back;
491
492 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
493 TPD_CONS_SHIFT;
494 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
495 RRD_PROD_SHIFT;
496
497 /* Let hardware know CMB was served. */
498 cmb->intr_status = 0;
499 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
500 sc->age_cdata.age_cmb_block_map->dm_mapsize,
501 BUS_DMASYNC_PREWRITE);
502
503 if (ifp->if_flags & IFF_RUNNING) {
504 if (status & INTR_CMB_RX)
505 age_rxintr(sc, sc->age_rr_prod);
506
507 if (status & INTR_CMB_TX)
508 age_txintr(sc, sc->age_tpd_cons);
509
510 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
511 if (status & INTR_DMA_RD_TO_RST)
512 printf("%s: DMA read error! -- resetting\n",
513 device_xname(sc->sc_dev));
514 if (status & INTR_DMA_WR_TO_RST)
515 printf("%s: DMA write error! -- resetting\n",
516 device_xname(sc->sc_dev));
517 age_init(ifp);
518 }
519
520 if (!IFQ_IS_EMPTY(&ifp->if_snd))
521 age_start(ifp);
522
523 if (status & INTR_SMB)
524 age_stats_update(sc);
525 }
526
527 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
528 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
529 sc->age_cdata.age_cmb_block_map->dm_mapsize,
530 BUS_DMASYNC_POSTREAD);
531
532 back:
533 /* Re-enable interrupts. */
534 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
535
536 return (1);
537 }
538
539 static int
540 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset,
541 uint32_t *word)
542 {
543 int i;
544 pcireg_t rv;
545
546 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_VPD_ADDRESS(vpdc),
547 offset << PCI_VPD_ADDRESS_SHIFT);
548 for (i = AGE_TIMEOUT; i > 0; i--) {
549 DELAY(10);
550 rv = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
551 PCI_VPD_ADDRESS(vpdc));
552 if ((rv & PCI_VPD_OPFLAG) == PCI_VPD_OPFLAG)
553 break;
554 }
555 if (i == 0) {
556 printf("%s: VPD read timeout!\n", device_xname(sc->sc_dev));
557 *word = 0;
558 return ETIMEDOUT;
559 }
560
561 *word = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_VPD_DATAREG(vpdc));
562 return 0;
563 }
564
565 static void
566 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
567 {
568 uint32_t ea[2], off, reg, word;
569 int vpd_error, match, vpdc;
570
571 reg = CSR_READ_4(sc, AGE_SPI_CTRL);
572 if ((reg & SPI_VPD_ENB) != 0) {
573 /* Get VPD stored in TWSI EEPROM. */
574 reg &= ~SPI_VPD_ENB;
575 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
576 }
577
578 vpd_error = 0;
579 ea[0] = ea[1] = 0;
580 if ((vpd_error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
581 PCI_CAP_VPD, &vpdc, NULL))) {
582 /*
583 * PCI VPD capability exists, but it seems that it's
584 * not in the standard form as stated in PCI VPD
585 * specification such that driver could not use
586 * pci_get_vpd_readonly(9) with keyword 'NA'.
587 * Search VPD data starting at address 0x0100. The data
588 * should be used as initializers to set AGE_PAR0,
589 * AGE_PAR1 register including other PCI configuration
590 * registers.
591 */
592 word = 0;
593 match = 0;
594 reg = 0;
595 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END;
596 off += sizeof(uint32_t)) {
597 vpd_error = age_read_vpd_word(sc, vpdc, off, &word);
598 if (vpd_error != 0)
599 break;
600 if (match != 0) {
601 switch (reg) {
602 case AGE_PAR0:
603 ea[0] = word;
604 break;
605 case AGE_PAR1:
606 ea[1] = word;
607 break;
608 default:
609 break;
610 }
611 match = 0;
612 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) {
613 match = 1;
614 reg = word >> 16;
615 } else
616 break;
617 }
618 if (off >= AGE_VPD_REG_CONF_END)
619 vpd_error = ENOENT;
620 if (vpd_error == 0) {
621 /*
622 * Don't blindly trust ethernet address obtained
623 * from VPD. Check whether ethernet address is
624 * valid one. Otherwise fall-back to reading
625 * PAR register.
626 */
627 ea[1] &= 0xFFFF;
628 if ((ea[0] == 0 && ea[1] == 0) ||
629 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
630 if (agedebug)
631 printf("%s: invalid ethernet address "
632 "returned from VPD.\n",
633 device_xname(sc->sc_dev));
634 vpd_error = EINVAL;
635 }
636 }
637 if (vpd_error != 0 && (agedebug))
638 printf("%s: VPD access failure!\n",
639 device_xname(sc->sc_dev));
640 } else {
641 if (agedebug)
642 printf("%s: PCI VPD capability not found!\n",
643 device_xname(sc->sc_dev));
644 }
645
646 /*
647 * It seems that L1 also provides a way to extract ethernet
648 * address via SPI flash interface. Because SPI flash memory
649 * device of different vendors vary in their instruction
650 * codes for read ID instruction, it's very hard to get
651 * instructions codes without detailed information for the
652 * flash memory device used on ethernet controller. To simplify
653 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet
654 * address which is supposed to be set by hardware during
655 * power on reset.
656 */
657 if (vpd_error != 0) {
658 /*
659 * VPD is mapped to SPI flash memory or BIOS set it.
660 */
661 ea[0] = CSR_READ_4(sc, AGE_PAR0);
662 ea[1] = CSR_READ_4(sc, AGE_PAR1);
663 }
664
665 ea[1] &= 0xFFFF;
666 eaddr[0] = (ea[1] >> 8) & 0xFF;
667 eaddr[1] = (ea[1] >> 0) & 0xFF;
668 eaddr[2] = (ea[0] >> 24) & 0xFF;
669 eaddr[3] = (ea[0] >> 16) & 0xFF;
670 eaddr[4] = (ea[0] >> 8) & 0xFF;
671 eaddr[5] = (ea[0] >> 0) & 0xFF;
672 }
673
674 static void
675 age_phy_reset(struct age_softc *sc)
676 {
677 /* Reset PHY. */
678 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
679 DELAY(1000);
680 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
681 DELAY(1000);
682 }
683
684 static int
685 age_dma_alloc(struct age_softc *sc)
686 {
687 struct age_txdesc *txd;
688 struct age_rxdesc *rxd;
689 int nsegs, error, i;
690
691 /*
692 * Create DMA stuffs for TX ring
693 */
694 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
695 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
696 if (error)
697 return (ENOBUFS);
698
699 /* Allocate DMA'able memory for TX ring */
700 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
701 PAGE_SIZE, 0, &sc->age_rdata.age_tx_ring_seg, 1,
702 &nsegs, BUS_DMA_WAITOK);
703 if (error) {
704 printf("%s: could not allocate DMA'able memory for Tx ring.\n",
705 device_xname(sc->sc_dev));
706 return error;
707 }
708
709 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
710 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
711 BUS_DMA_NOWAIT);
712 if (error)
713 return (ENOBUFS);
714
715 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
716
717 /* Load the DMA map for Tx ring. */
718 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
719 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
720 if (error) {
721 printf("%s: could not load DMA'able memory for Tx ring.\n",
722 device_xname(sc->sc_dev));
723 bus_dmamem_free(sc->sc_dmat,
724 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1);
725 return error;
726 }
727
728 sc->age_rdata.age_tx_ring_paddr =
729 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
730
731 /*
732 * Create DMA stuffs for RX ring
733 */
734 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
735 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
736 if (error)
737 return (ENOBUFS);
738
739 /* Allocate DMA'able memory for RX ring */
740 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
741 PAGE_SIZE, 0, &sc->age_rdata.age_rx_ring_seg, 1,
742 &nsegs, BUS_DMA_WAITOK);
743 if (error) {
744 printf("%s: could not allocate DMA'able memory for Rx ring.\n",
745 device_xname(sc->sc_dev));
746 return error;
747 }
748
749 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
750 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
751 BUS_DMA_NOWAIT);
752 if (error)
753 return (ENOBUFS);
754
755 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
756
757 /* Load the DMA map for Rx ring. */
758 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
759 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
760 if (error) {
761 printf("%s: could not load DMA'able memory for Rx ring.\n",
762 device_xname(sc->sc_dev));
763 bus_dmamem_free(sc->sc_dmat,
764 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1);
765 return error;
766 }
767
768 sc->age_rdata.age_rx_ring_paddr =
769 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
770
771 /*
772 * Create DMA stuffs for RX return ring
773 */
774 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
775 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
776 if (error)
777 return (ENOBUFS);
778
779 /* Allocate DMA'able memory for RX return ring */
780 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
781 PAGE_SIZE, 0, &sc->age_rdata.age_rr_ring_seg, 1,
782 &nsegs, BUS_DMA_WAITOK);
783 if (error) {
784 printf("%s: could not allocate DMA'able memory for Rx "
785 "return ring.\n", device_xname(sc->sc_dev));
786 return error;
787 }
788
789 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
790 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
791 BUS_DMA_NOWAIT);
792 if (error)
793 return (ENOBUFS);
794
795 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
796
797 /* Load the DMA map for Rx return ring. */
798 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
799 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
800 if (error) {
801 printf("%s: could not load DMA'able memory for Rx return ring."
802 "\n", device_xname(sc->sc_dev));
803 bus_dmamem_free(sc->sc_dmat,
804 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1);
805 return error;
806 }
807
808 sc->age_rdata.age_rr_ring_paddr =
809 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
810
811 /*
812 * Create DMA stuffs for CMB block
813 */
814 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
815 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
816 &sc->age_cdata.age_cmb_block_map);
817 if (error)
818 return (ENOBUFS);
819
820 /* Allocate DMA'able memory for CMB block */
821 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
822 PAGE_SIZE, 0, &sc->age_rdata.age_cmb_block_seg, 1,
823 &nsegs, BUS_DMA_WAITOK);
824 if (error) {
825 printf("%s: could not allocate DMA'able memory for "
826 "CMB block\n", device_xname(sc->sc_dev));
827 return error;
828 }
829
830 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
831 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
832 BUS_DMA_NOWAIT);
833 if (error)
834 return (ENOBUFS);
835
836 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
837
838 /* Load the DMA map for CMB block. */
839 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
840 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
841 BUS_DMA_WAITOK);
842 if (error) {
843 printf("%s: could not load DMA'able memory for CMB block\n",
844 device_xname(sc->sc_dev));
845 bus_dmamem_free(sc->sc_dmat,
846 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1);
847 return error;
848 }
849
850 sc->age_rdata.age_cmb_block_paddr =
851 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
852
853 /*
854 * Create DMA stuffs for SMB block
855 */
856 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
857 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
858 &sc->age_cdata.age_smb_block_map);
859 if (error)
860 return (ENOBUFS);
861
862 /* Allocate DMA'able memory for SMB block */
863 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
864 PAGE_SIZE, 0, &sc->age_rdata.age_smb_block_seg, 1,
865 &nsegs, BUS_DMA_WAITOK);
866 if (error) {
867 printf("%s: could not allocate DMA'able memory for "
868 "SMB block\n", device_xname(sc->sc_dev));
869 return error;
870 }
871
872 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
873 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
874 BUS_DMA_NOWAIT);
875 if (error)
876 return (ENOBUFS);
877
878 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
879
880 /* Load the DMA map for SMB block */
881 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
882 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
883 BUS_DMA_WAITOK);
884 if (error) {
885 printf("%s: could not load DMA'able memory for SMB block\n",
886 device_xname(sc->sc_dev));
887 bus_dmamem_free(sc->sc_dmat,
888 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1);
889 return error;
890 }
891
892 sc->age_rdata.age_smb_block_paddr =
893 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
894
895 /* Create DMA maps for Tx buffers. */
896 for (i = 0; i < AGE_TX_RING_CNT; i++) {
897 txd = &sc->age_cdata.age_txdesc[i];
898 txd->tx_m = NULL;
899 txd->tx_dmamap = NULL;
900 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
901 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
902 &txd->tx_dmamap);
903 if (error) {
904 printf("%s: could not create Tx dmamap.\n",
905 device_xname(sc->sc_dev));
906 return error;
907 }
908 }
909
910 /* Create DMA maps for Rx buffers. */
911 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
912 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
913 if (error) {
914 printf("%s: could not create spare Rx dmamap.\n",
915 device_xname(sc->sc_dev));
916 return error;
917 }
918 for (i = 0; i < AGE_RX_RING_CNT; i++) {
919 rxd = &sc->age_cdata.age_rxdesc[i];
920 rxd->rx_m = NULL;
921 rxd->rx_dmamap = NULL;
922 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
923 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
924 if (error) {
925 printf("%s: could not create Rx dmamap.\n",
926 device_xname(sc->sc_dev));
927 return error;
928 }
929 }
930
931 return (0);
932 }
933
934 static void
935 age_dma_free(struct age_softc *sc)
936 {
937 struct age_txdesc *txd;
938 struct age_rxdesc *rxd;
939 int i;
940
941 /* Tx buffers */
942 for (i = 0; i < AGE_TX_RING_CNT; i++) {
943 txd = &sc->age_cdata.age_txdesc[i];
944 if (txd->tx_dmamap != NULL) {
945 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
946 txd->tx_dmamap = NULL;
947 }
948 }
949 /* Rx buffers */
950 for (i = 0; i < AGE_RX_RING_CNT; i++) {
951 rxd = &sc->age_cdata.age_rxdesc[i];
952 if (rxd->rx_dmamap != NULL) {
953 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
954 rxd->rx_dmamap = NULL;
955 }
956 }
957 if (sc->age_cdata.age_rx_sparemap != NULL) {
958 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
959 sc->age_cdata.age_rx_sparemap = NULL;
960 }
961
962 /* Tx ring. */
963 if (sc->age_cdata.age_tx_ring_map != NULL)
964 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
965 if (sc->age_cdata.age_tx_ring_map != NULL &&
966 sc->age_rdata.age_tx_ring != NULL)
967 bus_dmamem_free(sc->sc_dmat,
968 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1);
969 sc->age_rdata.age_tx_ring = NULL;
970 sc->age_cdata.age_tx_ring_map = NULL;
971
972 /* Rx ring. */
973 if (sc->age_cdata.age_rx_ring_map != NULL)
974 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
975 if (sc->age_cdata.age_rx_ring_map != NULL &&
976 sc->age_rdata.age_rx_ring != NULL)
977 bus_dmamem_free(sc->sc_dmat,
978 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1);
979 sc->age_rdata.age_rx_ring = NULL;
980 sc->age_cdata.age_rx_ring_map = NULL;
981
982 /* Rx return ring. */
983 if (sc->age_cdata.age_rr_ring_map != NULL)
984 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
985 if (sc->age_cdata.age_rr_ring_map != NULL &&
986 sc->age_rdata.age_rr_ring != NULL)
987 bus_dmamem_free(sc->sc_dmat,
988 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1);
989 sc->age_rdata.age_rr_ring = NULL;
990 sc->age_cdata.age_rr_ring_map = NULL;
991
992 /* CMB block */
993 if (sc->age_cdata.age_cmb_block_map != NULL)
994 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
995 if (sc->age_cdata.age_cmb_block_map != NULL &&
996 sc->age_rdata.age_cmb_block != NULL)
997 bus_dmamem_free(sc->sc_dmat,
998 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1);
999 sc->age_rdata.age_cmb_block = NULL;
1000 sc->age_cdata.age_cmb_block_map = NULL;
1001
1002 /* SMB block */
1003 if (sc->age_cdata.age_smb_block_map != NULL)
1004 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1005 if (sc->age_cdata.age_smb_block_map != NULL &&
1006 sc->age_rdata.age_smb_block != NULL)
1007 bus_dmamem_free(sc->sc_dmat,
1008 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1);
1009 }
1010
1011 static void
1012 age_start(struct ifnet *ifp)
1013 {
1014 struct age_softc *sc = ifp->if_softc;
1015 struct mbuf *m_head;
1016 int enq;
1017
1018 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1019 return;
1020
1021 enq = 0;
1022 for (;;) {
1023 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1024 if (m_head == NULL)
1025 break;
1026
1027 /*
1028 * Pack the data into the transmit ring. If we
1029 * don't have room, set the OACTIVE flag and wait
1030 * for the NIC to drain the ring.
1031 */
1032 if (age_encap(sc, &m_head)) {
1033 if (m_head == NULL)
1034 break;
1035 ifp->if_flags |= IFF_OACTIVE;
1036 break;
1037 }
1038 enq = 1;
1039
1040 #if NBPFILTER > 0
1041 /*
1042 * If there's a BPF listener, bounce a copy of this frame
1043 * to him.
1044 */
1045 if (ifp->if_bpf != NULL)
1046 bpf_mtap(ifp->if_bpf, m_head);
1047 #endif
1048 }
1049
1050 if (enq) {
1051 /* Update mbox. */
1052 AGE_COMMIT_MBOX(sc);
1053 /* Set a timeout in case the chip goes out to lunch. */
1054 ifp->if_timer = AGE_TX_TIMEOUT;
1055 }
1056 }
1057
1058 static void
1059 age_watchdog(struct ifnet *ifp)
1060 {
1061 struct age_softc *sc = ifp->if_softc;
1062
1063 if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1064 printf("%s: watchdog timeout (missed link)\n",
1065 device_xname(sc->sc_dev));
1066 ifp->if_oerrors++;
1067 age_init(ifp);
1068 return;
1069 }
1070
1071 if (sc->age_cdata.age_tx_cnt == 0) {
1072 printf("%s: watchdog timeout (missed Tx interrupts) "
1073 "-- recovering\n", device_xname(sc->sc_dev));
1074 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1075 age_start(ifp);
1076 return;
1077 }
1078
1079 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1080 ifp->if_oerrors++;
1081 age_init(ifp);
1082
1083 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1084 age_start(ifp);
1085 }
1086
1087 static int
1088 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1089 {
1090 struct age_softc *sc = ifp->if_softc;
1091 struct mii_data *mii = &sc->sc_miibus;
1092 struct ifreq *ifr = (struct ifreq *)data;
1093 int s, error = 0;
1094
1095 s = splnet();
1096
1097 switch (cmd) {
1098 case SIOCSIFADDR:
1099 ifp->if_flags |= IFF_UP;
1100 if (!(ifp->if_flags & IFF_RUNNING))
1101 age_init(ifp);
1102 #ifdef INET
1103 if (ifa->ifa_addr->sa_family == AF_INET)
1104 arp_ifinit(&sc->sc_ec, ifa);
1105 #endif
1106 break;
1107
1108 case SIOCSIFFLAGS:
1109 error = ifioctl_common(ifp, cmd, data);
1110 if (error)
1111 break;
1112 if (ifp->if_flags & IFF_UP) {
1113 if (ifp->if_flags & IFF_RUNNING)
1114 age_rxfilter(sc);
1115 else
1116 age_init(ifp);
1117 } else {
1118 if (ifp->if_flags & IFF_RUNNING)
1119 age_stop(sc);
1120 }
1121 sc->age_if_flags = ifp->if_flags;
1122 break;
1123
1124 case SIOCADDMULTI:
1125 case SIOCDELMULTI:
1126 error = ether_ioctl(ifp, cmd, data);
1127 break;
1128
1129 case SIOCSIFMEDIA:
1130 case SIOCGIFMEDIA:
1131 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1132 break;
1133 default:
1134 error = ether_ioctl(ifp, cmd, data);
1135 break;
1136 }
1137
1138 if (error == ENETRESET) {
1139 if (ifp->if_flags & IFF_RUNNING)
1140 age_rxfilter(sc);
1141 error = 0;
1142 }
1143
1144 splx(s);
1145 return (error);
1146 }
1147
1148 static void
1149 age_mac_config(struct age_softc *sc)
1150 {
1151 struct mii_data *mii;
1152 uint32_t reg;
1153
1154 mii = &sc->sc_miibus;
1155
1156 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1157 reg &= ~MAC_CFG_FULL_DUPLEX;
1158 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1159 reg &= ~MAC_CFG_SPEED_MASK;
1160
1161 /* Reprogram MAC with resolved speed/duplex. */
1162 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1163 case IFM_10_T:
1164 case IFM_100_TX:
1165 reg |= MAC_CFG_SPEED_10_100;
1166 break;
1167 case IFM_1000_T:
1168 reg |= MAC_CFG_SPEED_1000;
1169 break;
1170 }
1171 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1172 reg |= MAC_CFG_FULL_DUPLEX;
1173 #ifdef notyet
1174 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1175 reg |= MAC_CFG_TX_FC;
1176 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1177 reg |= MAC_CFG_RX_FC;
1178 #endif
1179 }
1180
1181 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1182 }
1183
1184 static bool
1185 age_suspend(device_t dv PMF_FN_ARGS)
1186 {
1187 struct age_softc *sc = device_private(dv);
1188 uint16_t pmstat;
1189
1190 age_stop(sc);
1191
1192 /* XXXcegger Do we have Wake-On-LAN ? */
1193
1194 /* Request PME. */
1195 pmstat = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1196 PCI_PMCSR);
1197 pmstat &= ~(PCI_PMCSR_PME | PCI_PMCSR_PME_EN);
1198 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1199 PCI_PMCSR, pmstat);
1200
1201 return true;
1202 }
1203
1204 static bool
1205 age_resume(device_t dv PMF_FN_ARGS)
1206 {
1207 struct age_softc *sc = device_private(dv);
1208 struct ifnet *ifp = &sc->sc_ec.ec_if;
1209 uint16_t cmd;
1210
1211 /*
1212 * Clear INTx emulation disable for hardware that
1213 * is set in resume event. From Linux.
1214 */
1215 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1216 if ((cmd & 0x0400) != 0) {
1217 cmd &= ~0x0400;
1218 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1219 PCI_COMMAND_STATUS_REG, cmd);
1220 }
1221 if ((ifp->if_flags & IFF_UP) != 0)
1222 age_init(ifp);
1223
1224 return true;
1225 }
1226
1227 static int
1228 age_encap(struct age_softc *sc, struct mbuf **m_head)
1229 {
1230 struct age_txdesc *txd, *txd_last;
1231 struct tx_desc *desc;
1232 struct mbuf *m;
1233 bus_dmamap_t map;
1234 uint32_t cflags, poff, vtag;
1235 int error, i, nsegs, prod;
1236 struct m_tag *mtag;
1237
1238 m = *m_head;
1239 cflags = vtag = 0;
1240 poff = 0;
1241
1242 prod = sc->age_cdata.age_tx_prod;
1243 txd = &sc->age_cdata.age_txdesc[prod];
1244 txd_last = txd;
1245 map = txd->tx_dmamap;
1246
1247 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1248
1249 if (error != 0) {
1250 bus_dmamap_unload(sc->sc_dmat, map);
1251 error = EFBIG;
1252 }
1253 if (error == EFBIG) {
1254 error = 0;
1255
1256 MGETHDR(m, M_DONTWAIT, MT_DATA);
1257 if (m == NULL) {
1258 printf("%s: can't defrag TX mbuf\n",
1259 device_xname(sc->sc_dev));
1260 m_freem(*m_head);
1261 *m_head = NULL;
1262 return (ENOBUFS);
1263 }
1264
1265 MCLGET(m, M_DONTWAIT);
1266 if (!(m->m_flags & M_EXT)) {
1267 m_freem(m);
1268 *m_head = NULL;
1269 return (ENOBUFS);
1270 }
1271 m->m_len = m->m_pkthdr.len;
1272 *m_head = m;
1273
1274 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1275 BUS_DMA_NOWAIT);
1276
1277 if (error != 0) {
1278 printf("%s: could not load defragged TX mbuf\n",
1279 device_xname(sc->sc_dev));
1280 if (!error) {
1281 bus_dmamap_unload(sc->sc_dmat, map);
1282 error = EFBIG;
1283 }
1284 m_freem(*m_head);
1285 *m_head = NULL;
1286 return (error);
1287 }
1288 } else if (error) {
1289 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1290 return (error);
1291 }
1292
1293 nsegs = map->dm_nsegs;
1294
1295 if (nsegs == 0) {
1296 m_freem(*m_head);
1297 *m_head = NULL;
1298 return (EIO);
1299 }
1300
1301 /* Check descriptor overrun. */
1302 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1303 bus_dmamap_unload(sc->sc_dmat, map);
1304 return (ENOBUFS);
1305 }
1306
1307 m = *m_head;
1308 /* Configure Tx IP/TCP/UDP checksum offload. */
1309 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1310 cflags |= AGE_TD_CSUM;
1311 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1312 cflags |= AGE_TD_TCPCSUM;
1313 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1314 cflags |= AGE_TD_UDPCSUM;
1315 /* Set checksum start offset. */
1316 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1317 }
1318
1319 #if NVLAN > 0
1320 /* Configure VLAN hardware tag insertion. */
1321 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1322 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag)));
1323 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1324 cflags |= AGE_TD_INSERT_VLAN_TAG;
1325 }
1326 #endif
1327
1328 desc = NULL;
1329 for (i = 0; i < nsegs; i++) {
1330 desc = &sc->age_rdata.age_tx_ring[prod];
1331 desc->addr = htole64(map->dm_segs[i].ds_addr);
1332 desc->len =
1333 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1334 desc->flags = htole32(cflags);
1335 sc->age_cdata.age_tx_cnt++;
1336 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1337 }
1338
1339 /* Update producer index. */
1340 sc->age_cdata.age_tx_prod = prod;
1341
1342 /* Set EOP on the last descriptor. */
1343 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1344 desc = &sc->age_rdata.age_tx_ring[prod];
1345 desc->flags |= htole32(AGE_TD_EOP);
1346
1347 /* Swap dmamap of the first and the last. */
1348 txd = &sc->age_cdata.age_txdesc[prod];
1349 map = txd_last->tx_dmamap;
1350 txd_last->tx_dmamap = txd->tx_dmamap;
1351 txd->tx_dmamap = map;
1352 txd->tx_m = m;
1353
1354 /* Sync descriptors. */
1355 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1356 BUS_DMASYNC_PREWRITE);
1357 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1358 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1359
1360 return (0);
1361 }
1362
1363 static void
1364 age_txintr(struct age_softc *sc, int tpd_cons)
1365 {
1366 struct ifnet *ifp = &sc->sc_ec.ec_if;
1367 struct age_txdesc *txd;
1368 int cons, prog;
1369
1370 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1371 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1372
1373 /*
1374 * Go through our Tx list and free mbufs for those
1375 * frames which have been transmitted.
1376 */
1377 cons = sc->age_cdata.age_tx_cons;
1378 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1379 if (sc->age_cdata.age_tx_cnt <= 0)
1380 break;
1381 prog++;
1382 ifp->if_flags &= ~IFF_OACTIVE;
1383 sc->age_cdata.age_tx_cnt--;
1384 txd = &sc->age_cdata.age_txdesc[cons];
1385 /*
1386 * Clear Tx descriptors, it's not required but would
1387 * help debugging in case of Tx issues.
1388 */
1389 txd->tx_desc->addr = 0;
1390 txd->tx_desc->len = 0;
1391 txd->tx_desc->flags = 0;
1392
1393 if (txd->tx_m == NULL)
1394 continue;
1395 /* Reclaim transmitted mbufs. */
1396 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1397 m_freem(txd->tx_m);
1398 txd->tx_m = NULL;
1399 }
1400
1401 if (prog > 0) {
1402 sc->age_cdata.age_tx_cons = cons;
1403
1404 /*
1405 * Unarm watchdog timer only when there are no pending
1406 * Tx descriptors in queue.
1407 */
1408 if (sc->age_cdata.age_tx_cnt == 0)
1409 ifp->if_timer = 0;
1410
1411 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1412 sc->age_cdata.age_tx_ring_map->dm_mapsize,
1413 BUS_DMASYNC_PREWRITE);
1414 }
1415 }
1416
1417 /* Receive a frame. */
1418 static void
1419 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1420 {
1421 struct ifnet *ifp = &sc->sc_ec.ec_if;
1422 struct age_rxdesc *rxd;
1423 struct rx_desc *desc;
1424 struct mbuf *mp, *m;
1425 uint32_t status, index, vtag;
1426 int count, nsegs, pktlen;
1427 int rx_cons;
1428
1429 status = le32toh(rxrd->flags);
1430 index = le32toh(rxrd->index);
1431 rx_cons = AGE_RX_CONS(index);
1432 nsegs = AGE_RX_NSEGS(index);
1433
1434 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1435 if ((status & AGE_RRD_ERROR) != 0 &&
1436 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1437 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1438 /*
1439 * We want to pass the following frames to upper
1440 * layer regardless of error status of Rx return
1441 * ring.
1442 *
1443 * o IP/TCP/UDP checksum is bad.
1444 * o frame length and protocol specific length
1445 * does not match.
1446 */
1447 sc->age_cdata.age_rx_cons += nsegs;
1448 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1449 return;
1450 }
1451
1452 pktlen = 0;
1453 for (count = 0; count < nsegs; count++,
1454 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1455 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1456 mp = rxd->rx_m;
1457 desc = rxd->rx_desc;
1458 /* Add a new receive buffer to the ring. */
1459 if (age_newbuf(sc, rxd, 0) != 0) {
1460 ifp->if_iqdrops++;
1461 /* Reuse Rx buffers. */
1462 if (sc->age_cdata.age_rxhead != NULL) {
1463 m_freem(sc->age_cdata.age_rxhead);
1464 AGE_RXCHAIN_RESET(sc);
1465 }
1466 break;
1467 }
1468
1469 /* The length of the first mbuf is computed last. */
1470 if (count != 0) {
1471 mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1472 pktlen += mp->m_len;
1473 }
1474
1475 /* Chain received mbufs. */
1476 if (sc->age_cdata.age_rxhead == NULL) {
1477 sc->age_cdata.age_rxhead = mp;
1478 sc->age_cdata.age_rxtail = mp;
1479 } else {
1480 mp->m_flags &= ~M_PKTHDR;
1481 sc->age_cdata.age_rxprev_tail =
1482 sc->age_cdata.age_rxtail;
1483 sc->age_cdata.age_rxtail->m_next = mp;
1484 sc->age_cdata.age_rxtail = mp;
1485 }
1486
1487 if (count == nsegs - 1) {
1488 /*
1489 * It seems that L1 controller has no way
1490 * to tell hardware to strip CRC bytes.
1491 */
1492 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1493 if (nsegs > 1) {
1494 /* Remove the CRC bytes in chained mbufs. */
1495 pktlen -= ETHER_CRC_LEN;
1496 if (mp->m_len <= ETHER_CRC_LEN) {
1497 sc->age_cdata.age_rxtail =
1498 sc->age_cdata.age_rxprev_tail;
1499 sc->age_cdata.age_rxtail->m_len -=
1500 (ETHER_CRC_LEN - mp->m_len);
1501 sc->age_cdata.age_rxtail->m_next = NULL;
1502 m_freem(mp);
1503 } else {
1504 mp->m_len -= ETHER_CRC_LEN;
1505 }
1506 }
1507
1508 m = sc->age_cdata.age_rxhead;
1509 m->m_flags |= M_PKTHDR;
1510 m->m_pkthdr.rcvif = ifp;
1511 m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1512 /* Set the first mbuf length. */
1513 m->m_len = sc->age_cdata.age_rxlen - pktlen;
1514
1515 /*
1516 * Set checksum information.
1517 * It seems that L1 controller can compute partial
1518 * checksum. The partial checksum value can be used
1519 * to accelerate checksum computation for fragmented
1520 * TCP/UDP packets. Upper network stack already
1521 * takes advantage of the partial checksum value in
1522 * IP reassembly stage. But I'm not sure the
1523 * correctness of the partial hardware checksum
1524 * assistance due to lack of data sheet. If it is
1525 * proven to work on L1 I'll enable it.
1526 */
1527 if (status & AGE_RRD_IPV4) {
1528 if (!(status & AGE_RRD_IPCSUM_NOK))
1529 m->m_pkthdr.csum_flags |=
1530 M_CSUM_IPv4_BAD;
1531 if (!((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1532 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0)) {
1533 m->m_pkthdr.csum_flags |=
1534 M_CSUM_TCP_UDP_BAD;
1535 }
1536 /*
1537 * Don't mark bad checksum for TCP/UDP frames
1538 * as fragmented frames may always have set
1539 * bad checksummed bit of descriptor status.
1540 */
1541 }
1542 #if NVLAN > 0
1543 /* Check for VLAN tagged frames. */
1544 if (status & AGE_RRD_VLAN) {
1545 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1546 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag),
1547 continue);
1548 }
1549 #endif
1550
1551 #if NBPFILTER > 0
1552 if (ifp->if_bpf)
1553 bpf_mtap(ifp->if_bpf, m);
1554 #endif
1555 /* Pass it on. */
1556 ether_input(ifp, m);
1557
1558 /* Reset mbuf chains. */
1559 AGE_RXCHAIN_RESET(sc);
1560 }
1561 }
1562
1563 if (count != nsegs) {
1564 sc->age_cdata.age_rx_cons += nsegs;
1565 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1566 } else
1567 sc->age_cdata.age_rx_cons = rx_cons;
1568 }
1569
1570 static void
1571 age_rxintr(struct age_softc *sc, int rr_prod)
1572 {
1573 struct rx_rdesc *rxrd;
1574 int rr_cons, nsegs, pktlen, prog;
1575
1576 rr_cons = sc->age_cdata.age_rr_cons;
1577 if (rr_cons == rr_prod)
1578 return;
1579
1580 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1581 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1582 BUS_DMASYNC_POSTREAD);
1583
1584 for (prog = 0; rr_cons != rr_prod; prog++) {
1585 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1586 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1587 if (nsegs == 0)
1588 break;
1589 /*
1590 * Check number of segments against received bytes
1591 * Non-matching value would indicate that hardware
1592 * is still trying to update Rx return descriptors.
1593 * I'm not sure whether this check is really needed.
1594 */
1595 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1596 if (nsegs != ((pktlen + (MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN)) /
1597 (MCLBYTES - ETHER_HDR_LEN)))
1598 break;
1599
1600 /* Received a frame. */
1601 age_rxeof(sc, rxrd);
1602
1603 /* Clear return ring. */
1604 rxrd->index = 0;
1605 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1606 }
1607
1608 if (prog > 0) {
1609 /* Update the consumer index. */
1610 sc->age_cdata.age_rr_cons = rr_cons;
1611
1612 /* Sync descriptors. */
1613 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1614 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1615 BUS_DMASYNC_PREWRITE);
1616
1617 /* Notify hardware availability of new Rx buffers. */
1618 AGE_COMMIT_MBOX(sc);
1619 }
1620 }
1621
1622 static void
1623 age_tick(void *xsc)
1624 {
1625 struct age_softc *sc = xsc;
1626 struct mii_data *mii = &sc->sc_miibus;
1627 int s;
1628
1629 s = splnet();
1630 mii_tick(mii);
1631 splx(s);
1632
1633 callout_schedule(&sc->sc_tick_ch, hz);
1634 }
1635
1636 static void
1637 age_reset(struct age_softc *sc)
1638 {
1639 uint32_t reg;
1640 int i;
1641
1642 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1643 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1644 DELAY(1);
1645 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0)
1646 break;
1647 }
1648 if (i == 0)
1649 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
1650
1651 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1652 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1653 break;
1654 DELAY(10);
1655 }
1656
1657 if (i == 0)
1658 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1659 reg);
1660
1661 /* Initialize PCIe module. From Linux. */
1662 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1663 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1664 }
1665
1666 static int
1667 age_init(struct ifnet *ifp)
1668 {
1669 struct age_softc *sc = ifp->if_softc;
1670 struct mii_data *mii;
1671 uint8_t eaddr[ETHER_ADDR_LEN];
1672 bus_addr_t paddr;
1673 uint32_t reg, fsize;
1674 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1675 int error;
1676
1677 /*
1678 * Cancel any pending I/O.
1679 */
1680 age_stop(sc);
1681
1682 /*
1683 * Reset the chip to a known state.
1684 */
1685 age_reset(sc);
1686
1687 /* Initialize descriptors. */
1688 error = age_init_rx_ring(sc);
1689 if (error != 0) {
1690 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1691 age_stop(sc);
1692 return (error);
1693 }
1694 age_init_rr_ring(sc);
1695 age_init_tx_ring(sc);
1696 age_init_cmb_block(sc);
1697 age_init_smb_block(sc);
1698
1699 /* Reprogram the station address. */
1700 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1701 CSR_WRITE_4(sc, AGE_PAR0,
1702 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1703 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1704
1705 /* Set descriptor base addresses. */
1706 paddr = sc->age_rdata.age_tx_ring_paddr;
1707 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1708 paddr = sc->age_rdata.age_rx_ring_paddr;
1709 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1710 paddr = sc->age_rdata.age_rr_ring_paddr;
1711 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1712 paddr = sc->age_rdata.age_tx_ring_paddr;
1713 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1714 paddr = sc->age_rdata.age_cmb_block_paddr;
1715 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1716 paddr = sc->age_rdata.age_smb_block_paddr;
1717 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1718
1719 /* Set Rx/Rx return descriptor counter. */
1720 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1721 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1722 DESC_RRD_CNT_MASK) |
1723 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1724
1725 /* Set Tx descriptor counter. */
1726 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1727 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1728
1729 /* Tell hardware that we're ready to load descriptors. */
1730 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1731
1732 /*
1733 * Initialize mailbox register.
1734 * Updated producer/consumer index information is exchanged
1735 * through this mailbox register. However Tx producer and
1736 * Rx return consumer/Rx producer are all shared such that
1737 * it's hard to separate code path between Tx and Rx without
1738 * locking. If L1 hardware have a separate mail box register
1739 * for Tx and Rx consumer/producer management we could have
1740 * indepent Tx/Rx handler which in turn Rx handler could have
1741 * been run without any locking.
1742 */
1743 AGE_COMMIT_MBOX(sc);
1744
1745 /* Configure IPG/IFG parameters. */
1746 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1747 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1748 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1749 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1750 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1751
1752 /* Set parameters for half-duplex media. */
1753 CSR_WRITE_4(sc, AGE_HDPX_CFG,
1754 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1755 HDPX_CFG_LCOL_MASK) |
1756 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1757 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1758 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1759 HDPX_CFG_ABEBT_MASK) |
1760 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1761 HDPX_CFG_JAMIPG_MASK));
1762
1763 /* Configure interrupt moderation timer. */
1764 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1765 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1766 reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1767 reg &= ~MASTER_MTIMER_ENB;
1768 if (AGE_USECS(sc->age_int_mod) == 0)
1769 reg &= ~MASTER_ITIMER_ENB;
1770 else
1771 reg |= MASTER_ITIMER_ENB;
1772 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1773 if (agedebug)
1774 printf("%s: interrupt moderation is %d us.\n",
1775 device_xname(sc->sc_dev), sc->age_int_mod);
1776 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1777
1778 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1779 if (ifp->if_mtu < ETHERMTU)
1780 sc->age_max_frame_size = ETHERMTU;
1781 else
1782 sc->age_max_frame_size = ifp->if_mtu;
1783 sc->age_max_frame_size += ETHER_HDR_LEN +
1784 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1785 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1786
1787 /* Configure jumbo frame. */
1788 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1789 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1790 (((fsize / sizeof(uint64_t)) <<
1791 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1792 ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1793 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1794 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1795 RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1796
1797 /* Configure flow-control parameters. From Linux. */
1798 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1799 /*
1800 * Magic workaround for old-L1.
1801 * Don't know which hw revision requires this magic.
1802 */
1803 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1804 /*
1805 * Another magic workaround for flow-control mode
1806 * change. From Linux.
1807 */
1808 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1809 }
1810 /*
1811 * TODO
1812 * Should understand pause parameter relationships between FIFO
1813 * size and number of Rx descriptors and Rx return descriptors.
1814 *
1815 * Magic parameters came from Linux.
1816 */
1817 switch (sc->age_chip_rev) {
1818 case 0x8001:
1819 case 0x9001:
1820 case 0x9002:
1821 case 0x9003:
1822 rxf_hi = AGE_RX_RING_CNT / 16;
1823 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1824 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1825 rrd_lo = AGE_RR_RING_CNT / 16;
1826 break;
1827 default:
1828 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1829 rxf_lo = reg / 16;
1830 if (rxf_lo < 192)
1831 rxf_lo = 192;
1832 rxf_hi = (reg * 7) / 8;
1833 if (rxf_hi < rxf_lo)
1834 rxf_hi = rxf_lo + 16;
1835 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1836 rrd_lo = reg / 8;
1837 rrd_hi = (reg * 7) / 8;
1838 if (rrd_lo < 2)
1839 rrd_lo = 2;
1840 if (rrd_hi < rrd_lo)
1841 rrd_hi = rrd_lo + 3;
1842 break;
1843 }
1844 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1845 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1846 RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1847 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1848 RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1849 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1850 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1851 RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1852 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1853 RXQ_RRD_PAUSE_THRESH_HI_MASK));
1854
1855 /* Configure RxQ. */
1856 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1857 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1858 RXQ_CFG_RD_BURST_MASK) |
1859 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1860 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1861 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1862 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1863 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1864
1865 /* Configure TxQ. */
1866 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1867 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1868 TXQ_CFG_TPD_BURST_MASK) |
1869 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1870 TXQ_CFG_TX_FIFO_BURST_MASK) |
1871 ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1872 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1873 TXQ_CFG_ENB);
1874
1875 /* Configure DMA parameters. */
1876 CSR_WRITE_4(sc, AGE_DMA_CFG,
1877 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1878 sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1879 sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1880
1881 /* Configure CMB DMA write threshold. */
1882 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1883 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1884 CMB_WR_THRESH_RRD_MASK) |
1885 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1886 CMB_WR_THRESH_TPD_MASK));
1887
1888 /* Set CMB/SMB timer and enable them. */
1889 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1890 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1891 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1892
1893 /* Request SMB updates for every seconds. */
1894 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1895 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1896
1897 /*
1898 * Disable all WOL bits as WOL can interfere normal Rx
1899 * operation.
1900 */
1901 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1902
1903 /*
1904 * Configure Tx/Rx MACs.
1905 * - Auto-padding for short frames.
1906 * - Enable CRC generation.
1907 * Start with full-duplex/1000Mbps media. Actual reconfiguration
1908 * of MAC is followed after link establishment.
1909 */
1910 CSR_WRITE_4(sc, AGE_MAC_CFG,
1911 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1912 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1913 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1914 MAC_CFG_PREAMBLE_MASK));
1915
1916 /* Set up the receive filter. */
1917 age_rxfilter(sc);
1918 age_rxvlan(sc);
1919
1920 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1921 reg |= MAC_CFG_RXCSUM_ENB;
1922
1923 /* Ack all pending interrupts and clear it. */
1924 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1925 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1926
1927 /* Finally enable Tx/Rx MAC. */
1928 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1929
1930 sc->age_flags &= ~AGE_FLAG_LINK;
1931
1932 /* Switch to the current media. */
1933 mii = &sc->sc_miibus;
1934 mii_mediachg(mii);
1935
1936 callout_schedule(&sc->sc_tick_ch, hz);
1937
1938 ifp->if_flags |= IFF_RUNNING;
1939 ifp->if_flags &= ~IFF_OACTIVE;
1940
1941 return (0);
1942 }
1943
1944 static void
1945 age_stop(struct age_softc *sc)
1946 {
1947 struct ifnet *ifp = &sc->sc_ec.ec_if;
1948 struct age_txdesc *txd;
1949 struct age_rxdesc *rxd;
1950 uint32_t reg;
1951 int i;
1952
1953 callout_stop(&sc->sc_tick_ch);
1954
1955 /*
1956 * Mark the interface down and cancel the watchdog timer.
1957 */
1958 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1959 ifp->if_timer = 0;
1960
1961 sc->age_flags &= ~AGE_FLAG_LINK;
1962
1963 /*
1964 * Disable interrupts.
1965 */
1966 CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1967 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1968
1969 /* Stop CMB/SMB updates. */
1970 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1971
1972 /* Stop Rx/Tx MAC. */
1973 age_stop_rxmac(sc);
1974 age_stop_txmac(sc);
1975
1976 /* Stop DMA. */
1977 CSR_WRITE_4(sc, AGE_DMA_CFG,
1978 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1979
1980 /* Stop TxQ/RxQ. */
1981 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1982 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1983 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1984 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1985 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1986 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1987 break;
1988 DELAY(10);
1989 }
1990 if (i == 0)
1991 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1992 device_xname(sc->sc_dev), reg);
1993
1994 /* Reclaim Rx buffers that have been processed. */
1995 if (sc->age_cdata.age_rxhead != NULL)
1996 m_freem(sc->age_cdata.age_rxhead);
1997 AGE_RXCHAIN_RESET(sc);
1998
1999 /*
2000 * Free RX and TX mbufs still in the queues.
2001 */
2002 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2003 rxd = &sc->age_cdata.age_rxdesc[i];
2004 if (rxd->rx_m != NULL) {
2005 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2006 m_freem(rxd->rx_m);
2007 rxd->rx_m = NULL;
2008 }
2009 }
2010 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2011 txd = &sc->age_cdata.age_txdesc[i];
2012 if (txd->tx_m != NULL) {
2013 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2014 m_freem(txd->tx_m);
2015 txd->tx_m = NULL;
2016 }
2017 }
2018 }
2019
2020 static void
2021 age_stats_update(struct age_softc *sc)
2022 {
2023 struct ifnet *ifp = &sc->sc_ec.ec_if;
2024 struct age_stats *stat;
2025 struct smb *smb;
2026
2027 stat = &sc->age_stat;
2028
2029 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2030 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2031
2032 smb = sc->age_rdata.age_smb_block;
2033 if (smb->updated == 0)
2034 return;
2035
2036 /* Rx stats. */
2037 stat->rx_frames += smb->rx_frames;
2038 stat->rx_bcast_frames += smb->rx_bcast_frames;
2039 stat->rx_mcast_frames += smb->rx_mcast_frames;
2040 stat->rx_pause_frames += smb->rx_pause_frames;
2041 stat->rx_control_frames += smb->rx_control_frames;
2042 stat->rx_crcerrs += smb->rx_crcerrs;
2043 stat->rx_lenerrs += smb->rx_lenerrs;
2044 stat->rx_bytes += smb->rx_bytes;
2045 stat->rx_runts += smb->rx_runts;
2046 stat->rx_fragments += smb->rx_fragments;
2047 stat->rx_pkts_64 += smb->rx_pkts_64;
2048 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2049 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2050 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2051 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2052 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2053 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2054 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2055 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2056 stat->rx_desc_oflows += smb->rx_desc_oflows;
2057 stat->rx_alignerrs += smb->rx_alignerrs;
2058 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2059 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2060 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2061
2062 /* Tx stats. */
2063 stat->tx_frames += smb->tx_frames;
2064 stat->tx_bcast_frames += smb->tx_bcast_frames;
2065 stat->tx_mcast_frames += smb->tx_mcast_frames;
2066 stat->tx_pause_frames += smb->tx_pause_frames;
2067 stat->tx_excess_defer += smb->tx_excess_defer;
2068 stat->tx_control_frames += smb->tx_control_frames;
2069 stat->tx_deferred += smb->tx_deferred;
2070 stat->tx_bytes += smb->tx_bytes;
2071 stat->tx_pkts_64 += smb->tx_pkts_64;
2072 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2073 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2074 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2075 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2076 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2077 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2078 stat->tx_single_colls += smb->tx_single_colls;
2079 stat->tx_multi_colls += smb->tx_multi_colls;
2080 stat->tx_late_colls += smb->tx_late_colls;
2081 stat->tx_excess_colls += smb->tx_excess_colls;
2082 stat->tx_underrun += smb->tx_underrun;
2083 stat->tx_desc_underrun += smb->tx_desc_underrun;
2084 stat->tx_lenerrs += smb->tx_lenerrs;
2085 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2086 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2087 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2088
2089 /* Update counters in ifnet. */
2090 ifp->if_opackets += smb->tx_frames;
2091
2092 ifp->if_collisions += smb->tx_single_colls +
2093 smb->tx_multi_colls + smb->tx_late_colls +
2094 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2095
2096 ifp->if_oerrors += smb->tx_excess_colls +
2097 smb->tx_late_colls + smb->tx_underrun +
2098 smb->tx_pkts_truncated;
2099
2100 ifp->if_ipackets += smb->rx_frames;
2101
2102 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2103 smb->rx_runts + smb->rx_pkts_truncated +
2104 smb->rx_fifo_oflows + smb->rx_desc_oflows +
2105 smb->rx_alignerrs;
2106
2107 /* Update done, clear. */
2108 smb->updated = 0;
2109
2110 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2111 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2112 }
2113
2114 static void
2115 age_stop_txmac(struct age_softc *sc)
2116 {
2117 uint32_t reg;
2118 int i;
2119
2120 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2121 if ((reg & MAC_CFG_TX_ENB) != 0) {
2122 reg &= ~MAC_CFG_TX_ENB;
2123 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2124 }
2125 /* Stop Tx DMA engine. */
2126 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2127 if ((reg & DMA_CFG_RD_ENB) != 0) {
2128 reg &= ~DMA_CFG_RD_ENB;
2129 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2130 }
2131 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2132 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2133 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2134 break;
2135 DELAY(10);
2136 }
2137 if (i == 0)
2138 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2139 }
2140
2141 static void
2142 age_stop_rxmac(struct age_softc *sc)
2143 {
2144 uint32_t reg;
2145 int i;
2146
2147 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2148 if ((reg & MAC_CFG_RX_ENB) != 0) {
2149 reg &= ~MAC_CFG_RX_ENB;
2150 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2151 }
2152 /* Stop Rx DMA engine. */
2153 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2154 if ((reg & DMA_CFG_WR_ENB) != 0) {
2155 reg &= ~DMA_CFG_WR_ENB;
2156 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2157 }
2158 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2159 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2160 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2161 break;
2162 DELAY(10);
2163 }
2164 if (i == 0)
2165 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2166 }
2167
2168 static void
2169 age_init_tx_ring(struct age_softc *sc)
2170 {
2171 struct age_ring_data *rd;
2172 struct age_txdesc *txd;
2173 int i;
2174
2175 sc->age_cdata.age_tx_prod = 0;
2176 sc->age_cdata.age_tx_cons = 0;
2177 sc->age_cdata.age_tx_cnt = 0;
2178
2179 rd = &sc->age_rdata;
2180 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2181 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2182 txd = &sc->age_cdata.age_txdesc[i];
2183 txd->tx_desc = &rd->age_tx_ring[i];
2184 txd->tx_m = NULL;
2185 }
2186 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2187 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2188 }
2189
2190 static int
2191 age_init_rx_ring(struct age_softc *sc)
2192 {
2193 struct age_ring_data *rd;
2194 struct age_rxdesc *rxd;
2195 int i;
2196
2197 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2198 rd = &sc->age_rdata;
2199 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2200 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2201 rxd = &sc->age_cdata.age_rxdesc[i];
2202 rxd->rx_m = NULL;
2203 rxd->rx_desc = &rd->age_rx_ring[i];
2204 if (age_newbuf(sc, rxd, 1) != 0)
2205 return (ENOBUFS);
2206 }
2207
2208 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2209 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2210
2211 return (0);
2212 }
2213
2214 static void
2215 age_init_rr_ring(struct age_softc *sc)
2216 {
2217 struct age_ring_data *rd;
2218
2219 sc->age_cdata.age_rr_cons = 0;
2220 AGE_RXCHAIN_RESET(sc);
2221
2222 rd = &sc->age_rdata;
2223 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2224 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2225 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2226 }
2227
2228 static void
2229 age_init_cmb_block(struct age_softc *sc)
2230 {
2231 struct age_ring_data *rd;
2232
2233 rd = &sc->age_rdata;
2234 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2235 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2236 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2237 }
2238
2239 static void
2240 age_init_smb_block(struct age_softc *sc)
2241 {
2242 struct age_ring_data *rd;
2243
2244 rd = &sc->age_rdata;
2245 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2246 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2247 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2248 }
2249
2250 static int
2251 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2252 {
2253 struct rx_desc *desc;
2254 struct mbuf *m;
2255 bus_dmamap_t map;
2256 int error;
2257
2258 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2259 if (m == NULL)
2260 return (ENOBUFS);
2261 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2262 if (!(m->m_flags & M_EXT)) {
2263 m_freem(m);
2264 return (ENOBUFS);
2265 }
2266
2267 m->m_len = m->m_pkthdr.len = MCLBYTES;
2268 m_adj(m, PAGE_SIZE);
2269
2270 error = bus_dmamap_load_mbuf(sc->sc_dmat,
2271 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2272
2273 if (error != 0) {
2274 if (!error) {
2275 bus_dmamap_unload(sc->sc_dmat,
2276 sc->age_cdata.age_rx_sparemap);
2277 error = EFBIG;
2278 printf("%s: too many segments?!\n",
2279 device_xname(sc->sc_dev));
2280 }
2281 m_freem(m);
2282
2283 if (init)
2284 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2285 return (error);
2286 }
2287
2288 if (rxd->rx_m != NULL) {
2289 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2290 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2291 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2292 }
2293 map = rxd->rx_dmamap;
2294 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2295 sc->age_cdata.age_rx_sparemap = map;
2296 rxd->rx_m = m;
2297
2298 desc = rxd->rx_desc;
2299 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2300 desc->len =
2301 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2302 AGE_RD_LEN_SHIFT);
2303
2304 return (0);
2305 }
2306
2307 static void
2308 age_rxvlan(struct age_softc *sc)
2309 {
2310 uint32_t reg;
2311
2312 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2313 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2314 if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING)
2315 reg |= MAC_CFG_VLAN_TAG_STRIP;
2316 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2317 }
2318
2319 static void
2320 age_rxfilter(struct age_softc *sc)
2321 {
2322 struct ethercom *ec = &sc->sc_ec;
2323 struct ifnet *ifp = &sc->sc_ec.ec_if;
2324 struct ether_multi *enm;
2325 struct ether_multistep step;
2326 uint32_t crc;
2327 uint32_t mchash[2];
2328 uint32_t rxcfg;
2329
2330 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2331 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2332
2333 if (ifp->if_flags & IFF_BROADCAST)
2334 rxcfg |= MAC_CFG_BCAST;
2335 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2336 if (ifp->if_flags & IFF_PROMISC)
2337 rxcfg |= MAC_CFG_PROMISC;
2338 if (ifp->if_flags & IFF_ALLMULTI)
2339 rxcfg |= MAC_CFG_ALLMULTI;
2340 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
2341 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
2342 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2343 return;
2344 }
2345
2346 /* Program new filter. */
2347 memset(mchash, 0, sizeof(mchash));
2348
2349 ETHER_FIRST_MULTI(step, ec, enm);
2350 while (enm != NULL) {
2351 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2352 enm->enm_addrlo), ETHER_ADDR_LEN);
2353
2354 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2355 ETHER_NEXT_MULTI(step, enm);
2356 }
2357
2358 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2359 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2360 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2361 }
2362