if_age.c revision 1.5 1 /* $NetBSD: if_age.c,v 1.5 2009/01/16 23:58:05 cegger Exp $ */
2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */
3
4 /*-
5 * Copyright (c) 2008, Pyun YongHyeon <yongari (at) FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.5 2009/01/16 23:58:05 cegger Exp $");
35
36 #include "bpfilter.h"
37 #include "vlan.h"
38
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/types.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/queue.h>
47 #include <sys/kernel.h>
48 #include <sys/device.h>
49 #include <sys/callout.h>
50 #include <sys/socket.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #endif
63
64 #include <net/if_types.h>
65 #include <net/if_vlanvar.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <sys/rnd.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79
80 #include <dev/pci/if_agereg.h>
81
82 static int age_match(device_t, cfdata_t, void *);
83 static void age_attach(device_t, device_t, void *);
84 static int age_detach(device_t, int);
85
86 static bool age_suspend(device_t PMF_FN_PROTO);
87 static bool age_resume(device_t PMF_FN_PROTO);
88
89 static int age_miibus_readreg(device_t, int, int);
90 static void age_miibus_writereg(device_t, int, int, int);
91 static void age_miibus_statchg(device_t);
92
93 static int age_init(struct ifnet *);
94 static int age_ioctl(struct ifnet *, u_long, void *);
95 static void age_start(struct ifnet *);
96 static void age_watchdog(struct ifnet *);
97 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
98 static int age_mediachange(struct ifnet *);
99
100 static int age_intr(void *);
101 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t, uint32_t *);
102 static int age_dma_alloc(struct age_softc *);
103 static void age_dma_free(struct age_softc *);
104 static void age_get_macaddr(struct age_softc *, uint8_t[]);
105 static void age_phy_reset(struct age_softc *);
106
107 static int age_encap(struct age_softc *, struct mbuf **);
108 static void age_init_tx_ring(struct age_softc *);
109 static int age_init_rx_ring(struct age_softc *);
110 static void age_init_rr_ring(struct age_softc *);
111 static void age_init_cmb_block(struct age_softc *);
112 static void age_init_smb_block(struct age_softc *);
113 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int);
114 static void age_mac_config(struct age_softc *);
115 static void age_txintr(struct age_softc *, int);
116 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
117 static void age_rxintr(struct age_softc *, int);
118 static void age_tick(void *);
119 static void age_reset(struct age_softc *);
120 static void age_stop(struct age_softc *);
121 static void age_stats_update(struct age_softc *);
122 static void age_stop_txmac(struct age_softc *);
123 static void age_stop_rxmac(struct age_softc *);
124 static void age_rxvlan(struct age_softc *sc);
125 static void age_rxfilter(struct age_softc *);
126
127 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
128 age_match, age_attach, age_detach, NULL);
129
130 int agedebug = 0;
131 #define DPRINTF(x) do { if (agedebug) printf x; } while (0)
132
133 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
134
135 static int
136 age_match(device_t dev, cfdata_t match, void *aux)
137 {
138 struct pci_attach_args *pa = aux;
139
140 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
141 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
142 }
143
144 static void
145 age_attach(device_t parent, device_t self, void *aux)
146 {
147 struct age_softc *sc = device_private(self);
148 struct pci_attach_args *pa = aux;
149 pci_intr_handle_t ih;
150 const char *intrstr;
151 struct ifnet *ifp = &sc->sc_ec.ec_if;
152 pcireg_t memtype;
153 int error = 0;
154
155 aprint_naive("\n");
156 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
157
158 sc->sc_dev = self;
159 sc->sc_dmat = pa->pa_dmat;
160 sc->sc_pct = pa->pa_pc;
161 sc->sc_pcitag = pa->pa_tag;
162
163 /*
164 * Allocate IO memory
165 */
166 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
167 switch (memtype) {
168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
169 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
170 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
171 break;
172 default:
173 aprint_error_dev(self, "invalid base address register\n");
174 break;
175 }
176
177 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
178 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
179 aprint_error_dev(self, "could not map mem space\n");
180 return;
181 }
182
183 if (pci_intr_map(pa, &ih) != 0) {
184 aprint_error_dev(self, "could not map interrupt\n");
185 return;
186 }
187
188 /*
189 * Allocate IRQ
190 */
191 intrstr = pci_intr_string(sc->sc_pct, ih);
192 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
193 age_intr, sc);
194 if (sc->sc_irq_handle == NULL) {
195 aprint_error_dev(self, "could not establish interrupt");
196 if (intrstr != NULL)
197 aprint_error(" at %s", intrstr);
198 aprint_error("\n");
199 return;
200 }
201 aprint_normal_dev(self, "%s", intrstr);
202
203 /* Set PHY address. */
204 sc->age_phyaddr = AGE_PHY_ADDR;
205
206 /* Reset PHY. */
207 age_phy_reset(sc);
208
209 /* Reset the ethernet controller. */
210 age_reset(sc);
211
212 /* Get PCI and chip id/revision. */
213 sc->age_rev = PCI_REVISION(pa->pa_class);
214 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
215 MASTER_CHIP_REV_SHIFT;
216
217 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
218 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
219
220 if (agedebug) {
221 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
222 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
223 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
224 }
225
226 /* Set max allowable DMA size. */
227 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
228 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
229
230 /* Allocate DMA stuffs */
231 error = age_dma_alloc(sc);
232 if (error)
233 goto fail;
234
235 callout_init(&sc->sc_tick_ch, 0);
236 callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
237
238 /* Load station address. */
239 age_get_macaddr(sc, sc->sc_enaddr);
240
241 aprint_normal_dev(self, "Ethernet address %s\n",
242 ether_sprintf(sc->sc_enaddr));
243
244 ifp->if_softc = sc;
245 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
246 ifp->if_init = age_init;
247 ifp->if_ioctl = age_ioctl;
248 ifp->if_start = age_start;
249 ifp->if_watchdog = age_watchdog;
250 ifp->if_baudrate = IF_Gbps(1);
251 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
252 IFQ_SET_READY(&ifp->if_snd);
253 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
254
255 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
256
257 #ifdef AGE_CHECKSUM
258 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
259 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
260 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
261 #endif
262
263 #if NVLAN > 0
264 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
265 #endif
266
267 /* Set up MII bus. */
268 sc->sc_miibus.mii_ifp = ifp;
269 sc->sc_miibus.mii_readreg = age_miibus_readreg;
270 sc->sc_miibus.mii_writereg = age_miibus_writereg;
271 sc->sc_miibus.mii_statchg = age_miibus_statchg;
272
273 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
274 age_mediastatus);
275 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
276 MII_OFFSET_ANY, 0);
277
278 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
279 aprint_error_dev(self, "no PHY found!\n");
280 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
281 0, NULL);
282 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
283 } else
284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
285
286 if_attach(ifp);
287 ether_ifattach(ifp, sc->sc_enaddr);
288
289 if (!pmf_device_register(self, age_suspend, age_resume))
290 aprint_error_dev(self, "couldn't establish power handler\n");
291 else
292 pmf_class_network_register(self, ifp);
293
294 return;
295 fail:
296 age_detach(sc->sc_dev, 0);
297 }
298
299 static int
300 age_detach(device_t self, int flags)
301 {
302 struct age_softc *sc = device_private(self);
303 struct ifnet *ifp = &sc->sc_ec.ec_if;
304 int s;
305
306 s = splnet();
307 age_stop(sc);
308 splx(s);
309
310 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
311
312 /* Delete all remaining media. */
313 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
314
315 ether_ifdetach(ifp);
316 if_detach(ifp);
317 age_dma_free(sc);
318
319 if (sc->sc_irq_handle != NULL) {
320 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
321 sc->sc_irq_handle = NULL;
322 }
323
324 return (0);
325 }
326
327 /*
328 * Read a PHY register on the MII of the L1.
329 */
330 static int
331 age_miibus_readreg(struct device *dev, int phy, int reg)
332 {
333 struct age_softc *sc = device_private(dev);
334 uint32_t v;
335 int i;
336
337 if (phy != sc->age_phyaddr)
338 return (0);
339
340 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
341 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
342 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
343 DELAY(1);
344 v = CSR_READ_4(sc, AGE_MDIO);
345 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
346 break;
347 }
348
349 if (i == 0) {
350 printf("%s: phy read timeout: phy %d, reg %d\n",
351 device_xname(sc->sc_dev), phy, reg);
352 return (0);
353 }
354
355 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
356 }
357
358 /*
359 * Write a PHY register on the MII of the L1.
360 */
361 static void
362 age_miibus_writereg(struct device *dev, int phy, int reg, int val)
363 {
364 struct age_softc *sc = device_private(dev);
365 uint32_t v;
366 int i;
367
368 if (phy != sc->age_phyaddr)
369 return;
370
371 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
372 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
373 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
374
375 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
376 DELAY(1);
377 v = CSR_READ_4(sc, AGE_MDIO);
378 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
379 break;
380 }
381
382 if (i == 0) {
383 printf("%s: phy write timeout: phy %d, reg %d\n",
384 device_xname(sc->sc_dev), phy, reg);
385 }
386 }
387
388 /*
389 * Callback from MII layer when media changes.
390 */
391 static void
392 age_miibus_statchg(device_t dev)
393 {
394 struct age_softc *sc = device_private(dev);
395 struct ifnet *ifp = &sc->sc_ec.ec_if;
396 struct mii_data *mii;
397
398 if ((ifp->if_flags & IFF_RUNNING) == 0)
399 return;
400
401 mii = &sc->sc_miibus;
402
403 sc->age_flags &= ~AGE_FLAG_LINK;
404 if ((mii->mii_media_status & IFM_AVALID) != 0) {
405 switch (IFM_SUBTYPE(mii->mii_media_active)) {
406 case IFM_10_T:
407 case IFM_100_TX:
408 case IFM_1000_T:
409 sc->age_flags |= AGE_FLAG_LINK;
410 break;
411 default:
412 break;
413 }
414 }
415
416 /* Stop Rx/Tx MACs. */
417 age_stop_rxmac(sc);
418 age_stop_txmac(sc);
419
420 /* Program MACs with resolved speed/duplex/flow-control. */
421 if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
422 uint32_t reg;
423
424 age_mac_config(sc);
425 reg = CSR_READ_4(sc, AGE_MAC_CFG);
426 /* Restart DMA engine and Tx/Rx MAC. */
427 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
428 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
429 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
430 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
431 }
432 }
433
434 /*
435 * Get the current interface media status.
436 */
437 static void
438 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
439 {
440 struct age_softc *sc = ifp->if_softc;
441 struct mii_data *mii = &sc->sc_miibus;
442
443 mii_pollstat(mii);
444 ifmr->ifm_status = mii->mii_media_status;
445 ifmr->ifm_active = mii->mii_media_active;
446 }
447
448 /*
449 * Set hardware to newly-selected media.
450 */
451 static int
452 age_mediachange(struct ifnet *ifp)
453 {
454 struct age_softc *sc = ifp->if_softc;
455 struct mii_data *mii = &sc->sc_miibus;
456 int error;
457
458 if (mii->mii_instance != 0) {
459 struct mii_softc *miisc;
460
461 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
462 mii_phy_reset(miisc);
463 }
464 error = mii_mediachg(mii);
465
466 return (error);
467 }
468
469 static int
470 age_intr(void *arg)
471 {
472 struct age_softc *sc = arg;
473 struct ifnet *ifp = &sc->sc_ec.ec_if;
474 struct cmb *cmb;
475 uint32_t status;
476
477 status = CSR_READ_4(sc, AGE_INTR_STATUS);
478 if (status == 0 || (status & AGE_INTRS) == 0)
479 return (0);
480
481 /* Disable interrupts. */
482 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
483
484 cmb = sc->age_rdata.age_cmb_block;
485
486 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
487 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
488 status = le32toh(cmb->intr_status);
489 if ((status & AGE_INTRS) == 0)
490 goto back;
491
492 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
493 TPD_CONS_SHIFT;
494 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
495 RRD_PROD_SHIFT;
496
497 /* Let hardware know CMB was served. */
498 cmb->intr_status = 0;
499 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
500 sc->age_cdata.age_cmb_block_map->dm_mapsize,
501 BUS_DMASYNC_PREWRITE);
502
503 if (ifp->if_flags & IFF_RUNNING) {
504 if (status & INTR_CMB_RX)
505 age_rxintr(sc, sc->age_rr_prod);
506
507 if (status & INTR_CMB_TX)
508 age_txintr(sc, sc->age_tpd_cons);
509
510 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
511 if (status & INTR_DMA_RD_TO_RST)
512 printf("%s: DMA read error! -- resetting\n",
513 device_xname(sc->sc_dev));
514 if (status & INTR_DMA_WR_TO_RST)
515 printf("%s: DMA write error! -- resetting\n",
516 device_xname(sc->sc_dev));
517 age_init(ifp);
518 }
519
520 if (!IFQ_IS_EMPTY(&ifp->if_snd))
521 age_start(ifp);
522
523 if (status & INTR_SMB)
524 age_stats_update(sc);
525 }
526
527 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
528 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
529 sc->age_cdata.age_cmb_block_map->dm_mapsize,
530 BUS_DMASYNC_POSTREAD);
531
532 back:
533 /* Re-enable interrupts. */
534 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
535
536 return (1);
537 }
538
539 static int
540 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset,
541 uint32_t *word)
542 {
543 int i;
544 pcireg_t rv;
545
546 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_VPD_ADDRESS(vpdc),
547 offset << PCI_VPD_ADDRESS_SHIFT);
548 for (i = AGE_TIMEOUT; i > 0; i--) {
549 DELAY(10);
550 rv = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
551 PCI_VPD_ADDRESS(vpdc));
552 if ((rv & PCI_VPD_OPFLAG) == PCI_VPD_OPFLAG)
553 break;
554 }
555 if (i == 0) {
556 printf("%s: VPD read timeout!\n", device_xname(sc->sc_dev));
557 *word = 0;
558 return ETIMEDOUT;
559 }
560
561 *word = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_VPD_DATAREG(vpdc));
562 return 0;
563 }
564
565 static void
566 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
567 {
568 uint32_t ea[2], off, reg, word;
569 int vpd_error, match, vpdc;
570
571 reg = CSR_READ_4(sc, AGE_SPI_CTRL);
572 if ((reg & SPI_VPD_ENB) != 0) {
573 /* Get VPD stored in TWSI EEPROM. */
574 reg &= ~SPI_VPD_ENB;
575 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
576 }
577
578 vpd_error = 0;
579 ea[0] = ea[1] = 0;
580 if ((vpd_error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
581 PCI_CAP_VPD, &vpdc, NULL))) {
582 /*
583 * PCI VPD capability exists, but it seems that it's
584 * not in the standard form as stated in PCI VPD
585 * specification such that driver could not use
586 * pci_get_vpd_readonly(9) with keyword 'NA'.
587 * Search VPD data starting at address 0x0100. The data
588 * should be used as initializers to set AGE_PAR0,
589 * AGE_PAR1 register including other PCI configuration
590 * registers.
591 */
592 word = 0;
593 match = 0;
594 reg = 0;
595 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END;
596 off += sizeof(uint32_t)) {
597 vpd_error = age_read_vpd_word(sc, vpdc, off, &word);
598 if (vpd_error != 0)
599 break;
600 if (match != 0) {
601 switch (reg) {
602 case AGE_PAR0:
603 ea[0] = word;
604 break;
605 case AGE_PAR1:
606 ea[1] = word;
607 break;
608 default:
609 break;
610 }
611 match = 0;
612 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) {
613 match = 1;
614 reg = word >> 16;
615 } else
616 break;
617 }
618 if (off >= AGE_VPD_REG_CONF_END)
619 vpd_error = ENOENT;
620 if (vpd_error == 0) {
621 /*
622 * Don't blindly trust ethernet address obtained
623 * from VPD. Check whether ethernet address is
624 * valid one. Otherwise fall-back to reading
625 * PAR register.
626 */
627 ea[1] &= 0xFFFF;
628 if ((ea[0] == 0 && ea[1] == 0) ||
629 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
630 if (agedebug)
631 printf("%s: invalid ethernet address "
632 "returned from VPD.\n",
633 device_xname(sc->sc_dev));
634 vpd_error = EINVAL;
635 }
636 }
637 if (vpd_error != 0 && (agedebug))
638 printf("%s: VPD access failure!\n",
639 device_xname(sc->sc_dev));
640 } else {
641 if (agedebug)
642 printf("%s: PCI VPD capability not found!\n",
643 device_xname(sc->sc_dev));
644 }
645
646 /*
647 * It seems that L1 also provides a way to extract ethernet
648 * address via SPI flash interface. Because SPI flash memory
649 * device of different vendors vary in their instruction
650 * codes for read ID instruction, it's very hard to get
651 * instructions codes without detailed information for the
652 * flash memory device used on ethernet controller. To simplify
653 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet
654 * address which is supposed to be set by hardware during
655 * power on reset.
656 */
657 if (vpd_error != 0) {
658 /*
659 * VPD is mapped to SPI flash memory or BIOS set it.
660 */
661 ea[0] = CSR_READ_4(sc, AGE_PAR0);
662 ea[1] = CSR_READ_4(sc, AGE_PAR1);
663 }
664
665 ea[1] &= 0xFFFF;
666 eaddr[0] = (ea[1] >> 8) & 0xFF;
667 eaddr[1] = (ea[1] >> 0) & 0xFF;
668 eaddr[2] = (ea[0] >> 24) & 0xFF;
669 eaddr[3] = (ea[0] >> 16) & 0xFF;
670 eaddr[4] = (ea[0] >> 8) & 0xFF;
671 eaddr[5] = (ea[0] >> 0) & 0xFF;
672 }
673
674 static void
675 age_phy_reset(struct age_softc *sc)
676 {
677 /* Reset PHY. */
678 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
679 DELAY(1000);
680 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
681 DELAY(1000);
682 }
683
684 static int
685 age_dma_alloc(struct age_softc *sc)
686 {
687 struct age_txdesc *txd;
688 struct age_rxdesc *rxd;
689 int nsegs, error, i;
690
691 /*
692 * Create DMA stuffs for TX ring
693 */
694 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
695 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
696 if (error)
697 return (ENOBUFS);
698
699 /* Allocate DMA'able memory for TX ring */
700 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
701 PAGE_SIZE, 0, &sc->age_rdata.age_tx_ring_seg, 1,
702 &nsegs, BUS_DMA_WAITOK);
703 if (error) {
704 printf("%s: could not allocate DMA'able memory for Tx ring.\n",
705 device_xname(sc->sc_dev));
706 return error;
707 }
708
709 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
710 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
711 BUS_DMA_NOWAIT);
712 if (error)
713 return (ENOBUFS);
714
715 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
716
717 /* Load the DMA map for Tx ring. */
718 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
719 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
720 if (error) {
721 printf("%s: could not load DMA'able memory for Tx ring.\n",
722 device_xname(sc->sc_dev));
723 bus_dmamem_free(sc->sc_dmat,
724 (bus_dma_segment_t *)&sc->age_rdata.age_tx_ring, 1);
725 return error;
726 }
727
728 sc->age_rdata.age_tx_ring_paddr =
729 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
730
731 /*
732 * Create DMA stuffs for RX ring
733 */
734 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
735 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
736 if (error)
737 return (ENOBUFS);
738
739 /* Allocate DMA'able memory for RX ring */
740 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
741 PAGE_SIZE, 0, &sc->age_rdata.age_rx_ring_seg, 1,
742 &nsegs, BUS_DMA_WAITOK);
743 if (error) {
744 printf("%s: could not allocate DMA'able memory for Rx ring.\n",
745 device_xname(sc->sc_dev));
746 return error;
747 }
748
749 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
750 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
751 BUS_DMA_NOWAIT);
752 if (error)
753 return (ENOBUFS);
754
755 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
756
757 /* Load the DMA map for Rx ring. */
758 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
759 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
760 if (error) {
761 printf("%s: could not load DMA'able memory for Rx ring.\n",
762 device_xname(sc->sc_dev));
763 bus_dmamem_free(sc->sc_dmat,
764 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1);
765 return error;
766 }
767
768 sc->age_rdata.age_rx_ring_paddr =
769 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
770
771 /*
772 * Create DMA stuffs for RX return ring
773 */
774 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
775 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
776 if (error)
777 return (ENOBUFS);
778
779 /* Allocate DMA'able memory for RX return ring */
780 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
781 PAGE_SIZE, 0, &sc->age_rdata.age_rr_ring_seg, 1,
782 &nsegs, BUS_DMA_WAITOK);
783 if (error) {
784 printf("%s: could not allocate DMA'able memory for Rx "
785 "return ring.\n", device_xname(sc->sc_dev));
786 return error;
787 }
788
789 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
790 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
791 BUS_DMA_NOWAIT);
792 if (error)
793 return (ENOBUFS);
794
795 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
796
797 /* Load the DMA map for Rx return ring. */
798 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
799 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
800 if (error) {
801 printf("%s: could not load DMA'able memory for Rx return ring."
802 "\n", device_xname(sc->sc_dev));
803 bus_dmamem_free(sc->sc_dmat,
804 (bus_dma_segment_t *)&sc->age_rdata.age_rr_ring, 1);
805 return error;
806 }
807
808 sc->age_rdata.age_rr_ring_paddr =
809 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
810
811 /*
812 * Create DMA stuffs for CMB block
813 */
814 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
815 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
816 &sc->age_cdata.age_cmb_block_map);
817 if (error)
818 return (ENOBUFS);
819
820 /* Allocate DMA'able memory for CMB block */
821 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
822 PAGE_SIZE, 0, &sc->age_rdata.age_cmb_block_seg, 1,
823 &nsegs, BUS_DMA_WAITOK);
824 if (error) {
825 printf("%s: could not allocate DMA'able memory for "
826 "CMB block\n", device_xname(sc->sc_dev));
827 return error;
828 }
829
830 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
831 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
832 BUS_DMA_NOWAIT);
833 if (error)
834 return (ENOBUFS);
835
836 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
837
838 /* Load the DMA map for CMB block. */
839 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
840 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
841 BUS_DMA_WAITOK);
842 if (error) {
843 printf("%s: could not load DMA'able memory for CMB block\n",
844 device_xname(sc->sc_dev));
845 bus_dmamem_free(sc->sc_dmat,
846 (bus_dma_segment_t *)&sc->age_rdata.age_cmb_block, 1);
847 return error;
848 }
849
850 sc->age_rdata.age_cmb_block_paddr =
851 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
852
853 /*
854 * Create DMA stuffs for SMB block
855 */
856 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
857 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
858 &sc->age_cdata.age_smb_block_map);
859 if (error)
860 return (ENOBUFS);
861
862 /* Allocate DMA'able memory for SMB block */
863 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
864 PAGE_SIZE, 0, &sc->age_rdata.age_smb_block_seg, 1,
865 &nsegs, BUS_DMA_WAITOK);
866 if (error) {
867 printf("%s: could not allocate DMA'able memory for "
868 "SMB block\n", device_xname(sc->sc_dev));
869 return error;
870 }
871
872 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
873 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
874 BUS_DMA_NOWAIT);
875 if (error)
876 return (ENOBUFS);
877
878 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
879
880 /* Load the DMA map for SMB block */
881 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
882 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
883 BUS_DMA_WAITOK);
884 if (error) {
885 printf("%s: could not load DMA'able memory for SMB block\n",
886 device_xname(sc->sc_dev));
887 bus_dmamem_free(sc->sc_dmat,
888 (bus_dma_segment_t *)&sc->age_rdata.age_smb_block, 1);
889 return error;
890 }
891
892 sc->age_rdata.age_smb_block_paddr =
893 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
894
895 /* Create DMA maps for Tx buffers. */
896 for (i = 0; i < AGE_TX_RING_CNT; i++) {
897 txd = &sc->age_cdata.age_txdesc[i];
898 txd->tx_m = NULL;
899 txd->tx_dmamap = NULL;
900 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
901 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
902 &txd->tx_dmamap);
903 if (error) {
904 printf("%s: could not create Tx dmamap.\n",
905 device_xname(sc->sc_dev));
906 return error;
907 }
908 }
909
910 /* Create DMA maps for Rx buffers. */
911 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
912 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
913 if (error) {
914 printf("%s: could not create spare Rx dmamap.\n",
915 device_xname(sc->sc_dev));
916 return error;
917 }
918 for (i = 0; i < AGE_RX_RING_CNT; i++) {
919 rxd = &sc->age_cdata.age_rxdesc[i];
920 rxd->rx_m = NULL;
921 rxd->rx_dmamap = NULL;
922 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
923 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
924 if (error) {
925 printf("%s: could not create Rx dmamap.\n",
926 device_xname(sc->sc_dev));
927 return error;
928 }
929 }
930
931 return (0);
932 }
933
934 static void
935 age_dma_free(struct age_softc *sc)
936 {
937 struct age_txdesc *txd;
938 struct age_rxdesc *rxd;
939 int i;
940
941 /* Tx buffers */
942 for (i = 0; i < AGE_TX_RING_CNT; i++) {
943 txd = &sc->age_cdata.age_txdesc[i];
944 if (txd->tx_dmamap != NULL) {
945 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
946 txd->tx_dmamap = NULL;
947 }
948 }
949 /* Rx buffers */
950 for (i = 0; i < AGE_RX_RING_CNT; i++) {
951 rxd = &sc->age_cdata.age_rxdesc[i];
952 if (rxd->rx_dmamap != NULL) {
953 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
954 rxd->rx_dmamap = NULL;
955 }
956 }
957 if (sc->age_cdata.age_rx_sparemap != NULL) {
958 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
959 sc->age_cdata.age_rx_sparemap = NULL;
960 }
961
962 /* Tx ring. */
963 if (sc->age_cdata.age_tx_ring_map != NULL)
964 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
965 if (sc->age_cdata.age_tx_ring_map != NULL &&
966 sc->age_rdata.age_tx_ring != NULL)
967 bus_dmamem_free(sc->sc_dmat,
968 (bus_dma_segment_t *)sc->age_rdata.age_tx_ring, 1);
969 sc->age_rdata.age_tx_ring = NULL;
970 sc->age_cdata.age_tx_ring_map = NULL;
971
972 /* Rx ring. */
973 if (sc->age_cdata.age_rx_ring_map != NULL)
974 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
975 if (sc->age_cdata.age_rx_ring_map != NULL &&
976 sc->age_rdata.age_rx_ring != NULL)
977 bus_dmamem_free(sc->sc_dmat,
978 (bus_dma_segment_t *)sc->age_rdata.age_rx_ring, 1);
979 sc->age_rdata.age_rx_ring = NULL;
980 sc->age_cdata.age_rx_ring_map = NULL;
981
982 /* Rx return ring. */
983 if (sc->age_cdata.age_rr_ring_map != NULL)
984 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
985 if (sc->age_cdata.age_rr_ring_map != NULL &&
986 sc->age_rdata.age_rr_ring != NULL)
987 bus_dmamem_free(sc->sc_dmat,
988 (bus_dma_segment_t *)sc->age_rdata.age_rr_ring, 1);
989 sc->age_rdata.age_rr_ring = NULL;
990 sc->age_cdata.age_rr_ring_map = NULL;
991
992 /* CMB block */
993 if (sc->age_cdata.age_cmb_block_map != NULL)
994 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
995 if (sc->age_cdata.age_cmb_block_map != NULL &&
996 sc->age_rdata.age_cmb_block != NULL)
997 bus_dmamem_free(sc->sc_dmat,
998 (bus_dma_segment_t *)sc->age_rdata.age_cmb_block, 1);
999 sc->age_rdata.age_cmb_block = NULL;
1000 sc->age_cdata.age_cmb_block_map = NULL;
1001
1002 /* SMB block */
1003 if (sc->age_cdata.age_smb_block_map != NULL)
1004 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1005 if (sc->age_cdata.age_smb_block_map != NULL &&
1006 sc->age_rdata.age_smb_block != NULL)
1007 bus_dmamem_free(sc->sc_dmat,
1008 (bus_dma_segment_t *)sc->age_rdata.age_smb_block, 1);
1009 }
1010
1011 static void
1012 age_start(struct ifnet *ifp)
1013 {
1014 struct age_softc *sc = ifp->if_softc;
1015 struct mbuf *m_head;
1016 int enq;
1017
1018 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1019 return;
1020
1021 enq = 0;
1022 for (;;) {
1023 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1024 if (m_head == NULL)
1025 break;
1026
1027 /*
1028 * Pack the data into the transmit ring. If we
1029 * don't have room, set the OACTIVE flag and wait
1030 * for the NIC to drain the ring.
1031 */
1032 if (age_encap(sc, &m_head)) {
1033 if (m_head == NULL)
1034 break;
1035 ifp->if_flags |= IFF_OACTIVE;
1036 break;
1037 }
1038 enq = 1;
1039
1040 #if NBPFILTER > 0
1041 /*
1042 * If there's a BPF listener, bounce a copy of this frame
1043 * to him.
1044 */
1045 if (ifp->if_bpf != NULL)
1046 bpf_mtap(ifp->if_bpf, m_head);
1047 #endif
1048 }
1049
1050 if (enq) {
1051 /* Update mbox. */
1052 AGE_COMMIT_MBOX(sc);
1053 /* Set a timeout in case the chip goes out to lunch. */
1054 ifp->if_timer = AGE_TX_TIMEOUT;
1055 }
1056 }
1057
1058 static void
1059 age_watchdog(struct ifnet *ifp)
1060 {
1061 struct age_softc *sc = ifp->if_softc;
1062
1063 if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1064 printf("%s: watchdog timeout (missed link)\n",
1065 device_xname(sc->sc_dev));
1066 ifp->if_oerrors++;
1067 age_init(ifp);
1068 return;
1069 }
1070
1071 if (sc->age_cdata.age_tx_cnt == 0) {
1072 printf("%s: watchdog timeout (missed Tx interrupts) "
1073 "-- recovering\n", device_xname(sc->sc_dev));
1074 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1075 age_start(ifp);
1076 return;
1077 }
1078
1079 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1080 ifp->if_oerrors++;
1081 age_init(ifp);
1082
1083 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1084 age_start(ifp);
1085 }
1086
1087 static int
1088 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1089 {
1090 struct age_softc *sc = ifp->if_softc;
1091 struct mii_data *mii = &sc->sc_miibus;
1092 struct ifreq *ifr = (struct ifreq *)data;
1093 int s, error = 0;
1094
1095 s = splnet();
1096
1097 switch (cmd) {
1098 case SIOCSIFADDR:
1099 ifp->if_flags |= IFF_UP;
1100 if (!(ifp->if_flags & IFF_RUNNING))
1101 age_init(ifp);
1102 #ifdef INET
1103 if (ifa->ifa_addr->sa_family == AF_INET)
1104 arp_ifinit(&sc->sc_ec, ifa);
1105 #endif
1106 break;
1107
1108 case SIOCSIFFLAGS:
1109 error = ifioctl_common(ifp, cmd, data);
1110 if (error)
1111 break;
1112 if (ifp->if_flags & IFF_UP) {
1113 if (ifp->if_flags & IFF_RUNNING)
1114 age_rxfilter(sc);
1115 else
1116 age_init(ifp);
1117 } else {
1118 if (ifp->if_flags & IFF_RUNNING)
1119 age_stop(sc);
1120 }
1121 sc->age_if_flags = ifp->if_flags;
1122 break;
1123
1124 case SIOCADDMULTI:
1125 case SIOCDELMULTI:
1126 error = ether_ioctl(ifp, cmd, data);
1127 break;
1128
1129 case SIOCSIFMEDIA:
1130 case SIOCGIFMEDIA:
1131 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1132 break;
1133 default:
1134 error = ether_ioctl(ifp, cmd, data);
1135 break;
1136 }
1137
1138 if (error == ENETRESET) {
1139 if (ifp->if_flags & IFF_RUNNING)
1140 age_rxfilter(sc);
1141 error = 0;
1142 }
1143
1144 splx(s);
1145 return (error);
1146 }
1147
1148 static void
1149 age_mac_config(struct age_softc *sc)
1150 {
1151 struct mii_data *mii;
1152 uint32_t reg;
1153
1154 mii = &sc->sc_miibus;
1155
1156 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1157 reg &= ~MAC_CFG_FULL_DUPLEX;
1158 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1159 reg &= ~MAC_CFG_SPEED_MASK;
1160
1161 /* Reprogram MAC with resolved speed/duplex. */
1162 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1163 case IFM_10_T:
1164 case IFM_100_TX:
1165 reg |= MAC_CFG_SPEED_10_100;
1166 break;
1167 case IFM_1000_T:
1168 reg |= MAC_CFG_SPEED_1000;
1169 break;
1170 }
1171 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1172 reg |= MAC_CFG_FULL_DUPLEX;
1173 #ifdef notyet
1174 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1175 reg |= MAC_CFG_TX_FC;
1176 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1177 reg |= MAC_CFG_RX_FC;
1178 #endif
1179 }
1180
1181 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1182 }
1183
1184 static bool
1185 age_suspend(device_t dv PMF_FN_ARGS)
1186 {
1187 struct age_softc *sc = device_private(dv);
1188 uint16_t pmstat;
1189
1190 /* XXXcegger Do we have Wake-On-LAN ? */
1191
1192 /* Request PME. */
1193 pmstat = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1194 PCI_PMCSR);
1195 pmstat &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
1196 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1197 PCI_PMCSR, pmstat);
1198
1199 return true;
1200 }
1201
1202 static bool
1203 age_resume(device_t dv PMF_FN_ARGS)
1204 {
1205 struct age_softc *sc = device_private(dv);
1206 uint16_t cmd;
1207
1208 /*
1209 * Clear INTx emulation disable for hardware that
1210 * is set in resume event. From Linux.
1211 */
1212 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1213 if ((cmd & 0x0400) != 0) {
1214 cmd &= ~0x0400;
1215 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1216 PCI_COMMAND_STATUS_REG, cmd);
1217 }
1218
1219 return true;
1220 }
1221
1222 static int
1223 age_encap(struct age_softc *sc, struct mbuf **m_head)
1224 {
1225 struct age_txdesc *txd, *txd_last;
1226 struct tx_desc *desc;
1227 struct mbuf *m;
1228 bus_dmamap_t map;
1229 uint32_t cflags, poff, vtag;
1230 int error, i, nsegs, prod;
1231 struct m_tag *mtag;
1232
1233 m = *m_head;
1234 cflags = vtag = 0;
1235 poff = 0;
1236
1237 prod = sc->age_cdata.age_tx_prod;
1238 txd = &sc->age_cdata.age_txdesc[prod];
1239 txd_last = txd;
1240 map = txd->tx_dmamap;
1241
1242 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1243
1244 if (error != 0) {
1245 bus_dmamap_unload(sc->sc_dmat, map);
1246 error = EFBIG;
1247 }
1248 if (error == EFBIG) {
1249 error = 0;
1250
1251 MGETHDR(m, M_DONTWAIT, MT_DATA);
1252 if (m == NULL) {
1253 printf("%s: can't defrag TX mbuf\n",
1254 device_xname(sc->sc_dev));
1255 m_freem(*m_head);
1256 *m_head = NULL;
1257 return (ENOBUFS);
1258 }
1259
1260 MCLGET(m, M_DONTWAIT);
1261 if (!(m->m_flags & M_EXT)) {
1262 m_freem(m);
1263 *m_head = NULL;
1264 return (ENOBUFS);
1265 }
1266 m->m_len = m->m_pkthdr.len;
1267 *m_head = m;
1268
1269 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1270 BUS_DMA_NOWAIT);
1271
1272 if (error != 0) {
1273 printf("%s: could not load defragged TX mbuf\n",
1274 device_xname(sc->sc_dev));
1275 if (!error) {
1276 bus_dmamap_unload(sc->sc_dmat, map);
1277 error = EFBIG;
1278 }
1279 m_freem(*m_head);
1280 *m_head = NULL;
1281 return (error);
1282 }
1283 } else if (error) {
1284 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1285 return (error);
1286 }
1287
1288 nsegs = map->dm_nsegs;
1289
1290 if (nsegs == 0) {
1291 m_freem(*m_head);
1292 *m_head = NULL;
1293 return (EIO);
1294 }
1295
1296 /* Check descriptor overrun. */
1297 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1298 bus_dmamap_unload(sc->sc_dmat, map);
1299 return (ENOBUFS);
1300 }
1301
1302 m = *m_head;
1303 /* Configure Tx IP/TCP/UDP checksum offload. */
1304 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1305 cflags |= AGE_TD_CSUM;
1306 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1307 cflags |= AGE_TD_TCPCSUM;
1308 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1309 cflags |= AGE_TD_UDPCSUM;
1310 /* Set checksum start offset. */
1311 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1312 }
1313
1314 #if NVLAN > 0
1315 /* Configure VLAN hardware tag insertion. */
1316 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1317 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag)));
1318 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1319 cflags |= AGE_TD_INSERT_VLAN_TAG;
1320 }
1321 #endif
1322
1323 desc = NULL;
1324 for (i = 0; i < nsegs; i++) {
1325 desc = &sc->age_rdata.age_tx_ring[prod];
1326 desc->addr = htole64(map->dm_segs[i].ds_addr);
1327 desc->len =
1328 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1329 desc->flags = htole32(cflags);
1330 sc->age_cdata.age_tx_cnt++;
1331 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1332 }
1333
1334 /* Update producer index. */
1335 sc->age_cdata.age_tx_prod = prod;
1336
1337 /* Set EOP on the last descriptor. */
1338 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1339 desc = &sc->age_rdata.age_tx_ring[prod];
1340 desc->flags |= htole32(AGE_TD_EOP);
1341
1342 /* Swap dmamap of the first and the last. */
1343 txd = &sc->age_cdata.age_txdesc[prod];
1344 map = txd_last->tx_dmamap;
1345 txd_last->tx_dmamap = txd->tx_dmamap;
1346 txd->tx_dmamap = map;
1347 txd->tx_m = m;
1348
1349 /* Sync descriptors. */
1350 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1351 BUS_DMASYNC_PREWRITE);
1352 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1353 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1354
1355 return (0);
1356 }
1357
1358 static void
1359 age_txintr(struct age_softc *sc, int tpd_cons)
1360 {
1361 struct ifnet *ifp = &sc->sc_ec.ec_if;
1362 struct age_txdesc *txd;
1363 int cons, prog;
1364
1365 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1366 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1367
1368 /*
1369 * Go through our Tx list and free mbufs for those
1370 * frames which have been transmitted.
1371 */
1372 cons = sc->age_cdata.age_tx_cons;
1373 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1374 if (sc->age_cdata.age_tx_cnt <= 0)
1375 break;
1376 prog++;
1377 ifp->if_flags &= ~IFF_OACTIVE;
1378 sc->age_cdata.age_tx_cnt--;
1379 txd = &sc->age_cdata.age_txdesc[cons];
1380 /*
1381 * Clear Tx descriptors, it's not required but would
1382 * help debugging in case of Tx issues.
1383 */
1384 txd->tx_desc->addr = 0;
1385 txd->tx_desc->len = 0;
1386 txd->tx_desc->flags = 0;
1387
1388 if (txd->tx_m == NULL)
1389 continue;
1390 /* Reclaim transmitted mbufs. */
1391 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1392 m_freem(txd->tx_m);
1393 txd->tx_m = NULL;
1394 }
1395
1396 if (prog > 0) {
1397 sc->age_cdata.age_tx_cons = cons;
1398
1399 /*
1400 * Unarm watchdog timer only when there are no pending
1401 * Tx descriptors in queue.
1402 */
1403 if (sc->age_cdata.age_tx_cnt == 0)
1404 ifp->if_timer = 0;
1405
1406 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1407 sc->age_cdata.age_tx_ring_map->dm_mapsize,
1408 BUS_DMASYNC_PREWRITE);
1409 }
1410 }
1411
1412 /* Receive a frame. */
1413 static void
1414 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1415 {
1416 struct ifnet *ifp = &sc->sc_ec.ec_if;
1417 struct age_rxdesc *rxd;
1418 struct rx_desc *desc;
1419 struct mbuf *mp, *m;
1420 uint32_t status, index, vtag;
1421 int count, nsegs, pktlen;
1422 int rx_cons;
1423
1424 status = le32toh(rxrd->flags);
1425 index = le32toh(rxrd->index);
1426 rx_cons = AGE_RX_CONS(index);
1427 nsegs = AGE_RX_NSEGS(index);
1428
1429 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1430 if ((status & AGE_RRD_ERROR) != 0 &&
1431 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1432 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1433 /*
1434 * We want to pass the following frames to upper
1435 * layer regardless of error status of Rx return
1436 * ring.
1437 *
1438 * o IP/TCP/UDP checksum is bad.
1439 * o frame length and protocol specific length
1440 * does not match.
1441 */
1442 sc->age_cdata.age_rx_cons += nsegs;
1443 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1444 return;
1445 }
1446
1447 pktlen = 0;
1448 for (count = 0; count < nsegs; count++,
1449 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1450 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1451 mp = rxd->rx_m;
1452 desc = rxd->rx_desc;
1453 /* Add a new receive buffer to the ring. */
1454 if (age_newbuf(sc, rxd, 0) != 0) {
1455 ifp->if_iqdrops++;
1456 /* Reuse Rx buffers. */
1457 if (sc->age_cdata.age_rxhead != NULL) {
1458 m_freem(sc->age_cdata.age_rxhead);
1459 AGE_RXCHAIN_RESET(sc);
1460 }
1461 break;
1462 }
1463
1464 /* The length of the first mbuf is computed last. */
1465 if (count != 0) {
1466 mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1467 pktlen += mp->m_len;
1468 }
1469
1470 /* Chain received mbufs. */
1471 if (sc->age_cdata.age_rxhead == NULL) {
1472 sc->age_cdata.age_rxhead = mp;
1473 sc->age_cdata.age_rxtail = mp;
1474 } else {
1475 mp->m_flags &= ~M_PKTHDR;
1476 sc->age_cdata.age_rxprev_tail =
1477 sc->age_cdata.age_rxtail;
1478 sc->age_cdata.age_rxtail->m_next = mp;
1479 sc->age_cdata.age_rxtail = mp;
1480 }
1481
1482 if (count == nsegs - 1) {
1483 /*
1484 * It seems that L1 controller has no way
1485 * to tell hardware to strip CRC bytes.
1486 */
1487 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1488 if (nsegs > 1) {
1489 /* Remove the CRC bytes in chained mbufs. */
1490 pktlen -= ETHER_CRC_LEN;
1491 if (mp->m_len <= ETHER_CRC_LEN) {
1492 sc->age_cdata.age_rxtail =
1493 sc->age_cdata.age_rxprev_tail;
1494 sc->age_cdata.age_rxtail->m_len -=
1495 (ETHER_CRC_LEN - mp->m_len);
1496 sc->age_cdata.age_rxtail->m_next = NULL;
1497 m_freem(mp);
1498 } else {
1499 mp->m_len -= ETHER_CRC_LEN;
1500 }
1501 }
1502
1503 m = sc->age_cdata.age_rxhead;
1504 m->m_flags |= M_PKTHDR;
1505 m->m_pkthdr.rcvif = ifp;
1506 m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1507 /* Set the first mbuf length. */
1508 m->m_len = sc->age_cdata.age_rxlen - pktlen;
1509
1510 /*
1511 * Set checksum information.
1512 * It seems that L1 controller can compute partial
1513 * checksum. The partial checksum value can be used
1514 * to accelerate checksum computation for fragmented
1515 * TCP/UDP packets. Upper network stack already
1516 * takes advantage of the partial checksum value in
1517 * IP reassembly stage. But I'm not sure the
1518 * correctness of the partial hardware checksum
1519 * assistance due to lack of data sheet. If it is
1520 * proven to work on L1 I'll enable it.
1521 */
1522 if (status & AGE_RRD_IPV4) {
1523 if (!(status & AGE_RRD_IPCSUM_NOK))
1524 m->m_pkthdr.csum_flags |=
1525 M_CSUM_IPv4_BAD;
1526 if (!((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1527 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0)) {
1528 m->m_pkthdr.csum_flags |=
1529 M_CSUM_TCP_UDP_BAD;
1530 }
1531 /*
1532 * Don't mark bad checksum for TCP/UDP frames
1533 * as fragmented frames may always have set
1534 * bad checksummed bit of descriptor status.
1535 */
1536 }
1537 #if NVLAN > 0
1538 /* Check for VLAN tagged frames. */
1539 if (status & AGE_RRD_VLAN) {
1540 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1541 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag),
1542 continue);
1543 }
1544 #endif
1545
1546 #if NBPFILTER > 0
1547 if (ifp->if_bpf)
1548 bpf_mtap(ifp->if_bpf, m);
1549 #endif
1550 /* Pass it on. */
1551 ether_input(ifp, m);
1552
1553 /* Reset mbuf chains. */
1554 AGE_RXCHAIN_RESET(sc);
1555 }
1556 }
1557
1558 if (count != nsegs) {
1559 sc->age_cdata.age_rx_cons += nsegs;
1560 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1561 } else
1562 sc->age_cdata.age_rx_cons = rx_cons;
1563 }
1564
1565 static void
1566 age_rxintr(struct age_softc *sc, int rr_prod)
1567 {
1568 struct rx_rdesc *rxrd;
1569 int rr_cons, nsegs, pktlen, prog;
1570
1571 rr_cons = sc->age_cdata.age_rr_cons;
1572 if (rr_cons == rr_prod)
1573 return;
1574
1575 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1576 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1577 BUS_DMASYNC_POSTREAD);
1578
1579 for (prog = 0; rr_cons != rr_prod; prog++) {
1580 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1581 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1582 if (nsegs == 0)
1583 break;
1584 /*
1585 * Check number of segments against received bytes
1586 * Non-matching value would indicate that hardware
1587 * is still trying to update Rx return descriptors.
1588 * I'm not sure whether this check is really needed.
1589 */
1590 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1591 if (nsegs != ((pktlen + (MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN)) /
1592 (MCLBYTES - ETHER_HDR_LEN)))
1593 break;
1594
1595 /* Received a frame. */
1596 age_rxeof(sc, rxrd);
1597
1598 /* Clear return ring. */
1599 rxrd->index = 0;
1600 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1601 }
1602
1603 if (prog > 0) {
1604 /* Update the consumer index. */
1605 sc->age_cdata.age_rr_cons = rr_cons;
1606
1607 /* Sync descriptors. */
1608 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1609 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1610 BUS_DMASYNC_PREWRITE);
1611
1612 /* Notify hardware availability of new Rx buffers. */
1613 AGE_COMMIT_MBOX(sc);
1614 }
1615 }
1616
1617 static void
1618 age_tick(void *xsc)
1619 {
1620 struct age_softc *sc = xsc;
1621 struct mii_data *mii = &sc->sc_miibus;
1622 int s;
1623
1624 s = splnet();
1625 mii_tick(mii);
1626 splx(s);
1627
1628 callout_schedule(&sc->sc_tick_ch, hz);
1629 }
1630
1631 static void
1632 age_reset(struct age_softc *sc)
1633 {
1634 uint32_t reg;
1635 int i;
1636
1637 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1638 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1639 DELAY(1);
1640 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0)
1641 break;
1642 }
1643 if (i == 0)
1644 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
1645
1646 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1647 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1648 break;
1649 DELAY(10);
1650 }
1651
1652 if (i == 0)
1653 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1654 reg);
1655
1656 /* Initialize PCIe module. From Linux. */
1657 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1658 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1659 }
1660
1661 static int
1662 age_init(struct ifnet *ifp)
1663 {
1664 struct age_softc *sc = ifp->if_softc;
1665 struct mii_data *mii;
1666 uint8_t eaddr[ETHER_ADDR_LEN];
1667 bus_addr_t paddr;
1668 uint32_t reg, fsize;
1669 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1670 int error;
1671
1672 /*
1673 * Cancel any pending I/O.
1674 */
1675 age_stop(sc);
1676
1677 /*
1678 * Reset the chip to a known state.
1679 */
1680 age_reset(sc);
1681
1682 /* Initialize descriptors. */
1683 error = age_init_rx_ring(sc);
1684 if (error != 0) {
1685 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1686 age_stop(sc);
1687 return (error);
1688 }
1689 age_init_rr_ring(sc);
1690 age_init_tx_ring(sc);
1691 age_init_cmb_block(sc);
1692 age_init_smb_block(sc);
1693
1694 /* Reprogram the station address. */
1695 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1696 CSR_WRITE_4(sc, AGE_PAR0,
1697 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1698 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1699
1700 /* Set descriptor base addresses. */
1701 paddr = sc->age_rdata.age_tx_ring_paddr;
1702 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1703 paddr = sc->age_rdata.age_rx_ring_paddr;
1704 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1705 paddr = sc->age_rdata.age_rr_ring_paddr;
1706 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1707 paddr = sc->age_rdata.age_tx_ring_paddr;
1708 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1709 paddr = sc->age_rdata.age_cmb_block_paddr;
1710 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1711 paddr = sc->age_rdata.age_smb_block_paddr;
1712 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1713
1714 /* Set Rx/Rx return descriptor counter. */
1715 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1716 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1717 DESC_RRD_CNT_MASK) |
1718 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1719
1720 /* Set Tx descriptor counter. */
1721 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1722 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1723
1724 /* Tell hardware that we're ready to load descriptors. */
1725 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1726
1727 /*
1728 * Initialize mailbox register.
1729 * Updated producer/consumer index information is exchanged
1730 * through this mailbox register. However Tx producer and
1731 * Rx return consumer/Rx producer are all shared such that
1732 * it's hard to separate code path between Tx and Rx without
1733 * locking. If L1 hardware have a separate mail box register
1734 * for Tx and Rx consumer/producer management we could have
1735 * indepent Tx/Rx handler which in turn Rx handler could have
1736 * been run without any locking.
1737 */
1738 AGE_COMMIT_MBOX(sc);
1739
1740 /* Configure IPG/IFG parameters. */
1741 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1742 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1743 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1744 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1745 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1746
1747 /* Set parameters for half-duplex media. */
1748 CSR_WRITE_4(sc, AGE_HDPX_CFG,
1749 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1750 HDPX_CFG_LCOL_MASK) |
1751 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1752 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1753 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1754 HDPX_CFG_ABEBT_MASK) |
1755 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1756 HDPX_CFG_JAMIPG_MASK));
1757
1758 /* Configure interrupt moderation timer. */
1759 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1760 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1761 reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1762 reg &= ~MASTER_MTIMER_ENB;
1763 if (AGE_USECS(sc->age_int_mod) == 0)
1764 reg &= ~MASTER_ITIMER_ENB;
1765 else
1766 reg |= MASTER_ITIMER_ENB;
1767 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1768 if (agedebug)
1769 printf("%s: interrupt moderation is %d us.\n",
1770 device_xname(sc->sc_dev), sc->age_int_mod);
1771 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1772
1773 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1774 if (ifp->if_mtu < ETHERMTU)
1775 sc->age_max_frame_size = ETHERMTU;
1776 else
1777 sc->age_max_frame_size = ifp->if_mtu;
1778 sc->age_max_frame_size += ETHER_HDR_LEN +
1779 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1780 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1781
1782 /* Configure jumbo frame. */
1783 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1784 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1785 (((fsize / sizeof(uint64_t)) <<
1786 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1787 ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1788 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1789 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1790 RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1791
1792 /* Configure flow-control parameters. From Linux. */
1793 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1794 /*
1795 * Magic workaround for old-L1.
1796 * Don't know which hw revision requires this magic.
1797 */
1798 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1799 /*
1800 * Another magic workaround for flow-control mode
1801 * change. From Linux.
1802 */
1803 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1804 }
1805 /*
1806 * TODO
1807 * Should understand pause parameter relationships between FIFO
1808 * size and number of Rx descriptors and Rx return descriptors.
1809 *
1810 * Magic parameters came from Linux.
1811 */
1812 switch (sc->age_chip_rev) {
1813 case 0x8001:
1814 case 0x9001:
1815 case 0x9002:
1816 case 0x9003:
1817 rxf_hi = AGE_RX_RING_CNT / 16;
1818 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1819 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1820 rrd_lo = AGE_RR_RING_CNT / 16;
1821 break;
1822 default:
1823 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1824 rxf_lo = reg / 16;
1825 if (rxf_lo < 192)
1826 rxf_lo = 192;
1827 rxf_hi = (reg * 7) / 8;
1828 if (rxf_hi < rxf_lo)
1829 rxf_hi = rxf_lo + 16;
1830 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1831 rrd_lo = reg / 8;
1832 rrd_hi = (reg * 7) / 8;
1833 if (rrd_lo < 2)
1834 rrd_lo = 2;
1835 if (rrd_hi < rrd_lo)
1836 rrd_hi = rrd_lo + 3;
1837 break;
1838 }
1839 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1840 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1841 RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1842 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1843 RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1844 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1845 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1846 RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1847 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1848 RXQ_RRD_PAUSE_THRESH_HI_MASK));
1849
1850 /* Configure RxQ. */
1851 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1852 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1853 RXQ_CFG_RD_BURST_MASK) |
1854 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1855 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1856 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1857 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1858 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1859
1860 /* Configure TxQ. */
1861 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1862 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1863 TXQ_CFG_TPD_BURST_MASK) |
1864 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1865 TXQ_CFG_TX_FIFO_BURST_MASK) |
1866 ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1867 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1868 TXQ_CFG_ENB);
1869
1870 /* Configure DMA parameters. */
1871 CSR_WRITE_4(sc, AGE_DMA_CFG,
1872 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1873 sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1874 sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1875
1876 /* Configure CMB DMA write threshold. */
1877 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1878 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1879 CMB_WR_THRESH_RRD_MASK) |
1880 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1881 CMB_WR_THRESH_TPD_MASK));
1882
1883 /* Set CMB/SMB timer and enable them. */
1884 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1885 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1886 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1887
1888 /* Request SMB updates for every seconds. */
1889 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1890 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1891
1892 /*
1893 * Disable all WOL bits as WOL can interfere normal Rx
1894 * operation.
1895 */
1896 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1897
1898 /*
1899 * Configure Tx/Rx MACs.
1900 * - Auto-padding for short frames.
1901 * - Enable CRC generation.
1902 * Start with full-duplex/1000Mbps media. Actual reconfiguration
1903 * of MAC is followed after link establishment.
1904 */
1905 CSR_WRITE_4(sc, AGE_MAC_CFG,
1906 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1907 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1908 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1909 MAC_CFG_PREAMBLE_MASK));
1910
1911 /* Set up the receive filter. */
1912 age_rxfilter(sc);
1913 age_rxvlan(sc);
1914
1915 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1916 reg |= MAC_CFG_RXCSUM_ENB;
1917
1918 /* Ack all pending interrupts and clear it. */
1919 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1920 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1921
1922 /* Finally enable Tx/Rx MAC. */
1923 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1924
1925 sc->age_flags &= ~AGE_FLAG_LINK;
1926
1927 /* Switch to the current media. */
1928 mii = &sc->sc_miibus;
1929 mii_mediachg(mii);
1930
1931 callout_schedule(&sc->sc_tick_ch, hz);
1932
1933 ifp->if_flags |= IFF_RUNNING;
1934 ifp->if_flags &= ~IFF_OACTIVE;
1935
1936 return (0);
1937 }
1938
1939 static void
1940 age_stop(struct age_softc *sc)
1941 {
1942 struct ifnet *ifp = &sc->sc_ec.ec_if;
1943 struct age_txdesc *txd;
1944 struct age_rxdesc *rxd;
1945 uint32_t reg;
1946 int i;
1947
1948 callout_stop(&sc->sc_tick_ch);
1949
1950 /*
1951 * Mark the interface down and cancel the watchdog timer.
1952 */
1953 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1954 ifp->if_timer = 0;
1955
1956 sc->age_flags &= ~AGE_FLAG_LINK;
1957
1958 /*
1959 * Disable interrupts.
1960 */
1961 CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1962 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1963
1964 /* Stop CMB/SMB updates. */
1965 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1966
1967 /* Stop Rx/Tx MAC. */
1968 age_stop_rxmac(sc);
1969 age_stop_txmac(sc);
1970
1971 /* Stop DMA. */
1972 CSR_WRITE_4(sc, AGE_DMA_CFG,
1973 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1974
1975 /* Stop TxQ/RxQ. */
1976 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1977 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1978 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1979 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1980 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1981 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1982 break;
1983 DELAY(10);
1984 }
1985 if (i == 0)
1986 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1987 device_xname(sc->sc_dev), reg);
1988
1989 /* Reclaim Rx buffers that have been processed. */
1990 if (sc->age_cdata.age_rxhead != NULL)
1991 m_freem(sc->age_cdata.age_rxhead);
1992 AGE_RXCHAIN_RESET(sc);
1993
1994 /*
1995 * Free RX and TX mbufs still in the queues.
1996 */
1997 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1998 rxd = &sc->age_cdata.age_rxdesc[i];
1999 if (rxd->rx_m != NULL) {
2000 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2001 m_freem(rxd->rx_m);
2002 rxd->rx_m = NULL;
2003 }
2004 }
2005 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2006 txd = &sc->age_cdata.age_txdesc[i];
2007 if (txd->tx_m != NULL) {
2008 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2009 m_freem(txd->tx_m);
2010 txd->tx_m = NULL;
2011 }
2012 }
2013 }
2014
2015 static void
2016 age_stats_update(struct age_softc *sc)
2017 {
2018 struct ifnet *ifp = &sc->sc_ec.ec_if;
2019 struct age_stats *stat;
2020 struct smb *smb;
2021
2022 stat = &sc->age_stat;
2023
2024 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2025 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2026
2027 smb = sc->age_rdata.age_smb_block;
2028 if (smb->updated == 0)
2029 return;
2030
2031 /* Rx stats. */
2032 stat->rx_frames += smb->rx_frames;
2033 stat->rx_bcast_frames += smb->rx_bcast_frames;
2034 stat->rx_mcast_frames += smb->rx_mcast_frames;
2035 stat->rx_pause_frames += smb->rx_pause_frames;
2036 stat->rx_control_frames += smb->rx_control_frames;
2037 stat->rx_crcerrs += smb->rx_crcerrs;
2038 stat->rx_lenerrs += smb->rx_lenerrs;
2039 stat->rx_bytes += smb->rx_bytes;
2040 stat->rx_runts += smb->rx_runts;
2041 stat->rx_fragments += smb->rx_fragments;
2042 stat->rx_pkts_64 += smb->rx_pkts_64;
2043 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2044 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2045 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2046 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2047 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2048 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2049 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2050 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2051 stat->rx_desc_oflows += smb->rx_desc_oflows;
2052 stat->rx_alignerrs += smb->rx_alignerrs;
2053 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2054 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2055 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2056
2057 /* Tx stats. */
2058 stat->tx_frames += smb->tx_frames;
2059 stat->tx_bcast_frames += smb->tx_bcast_frames;
2060 stat->tx_mcast_frames += smb->tx_mcast_frames;
2061 stat->tx_pause_frames += smb->tx_pause_frames;
2062 stat->tx_excess_defer += smb->tx_excess_defer;
2063 stat->tx_control_frames += smb->tx_control_frames;
2064 stat->tx_deferred += smb->tx_deferred;
2065 stat->tx_bytes += smb->tx_bytes;
2066 stat->tx_pkts_64 += smb->tx_pkts_64;
2067 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2068 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2069 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2070 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2071 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2072 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2073 stat->tx_single_colls += smb->tx_single_colls;
2074 stat->tx_multi_colls += smb->tx_multi_colls;
2075 stat->tx_late_colls += smb->tx_late_colls;
2076 stat->tx_excess_colls += smb->tx_excess_colls;
2077 stat->tx_underrun += smb->tx_underrun;
2078 stat->tx_desc_underrun += smb->tx_desc_underrun;
2079 stat->tx_lenerrs += smb->tx_lenerrs;
2080 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2081 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2082 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2083
2084 /* Update counters in ifnet. */
2085 ifp->if_opackets += smb->tx_frames;
2086
2087 ifp->if_collisions += smb->tx_single_colls +
2088 smb->tx_multi_colls + smb->tx_late_colls +
2089 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2090
2091 ifp->if_oerrors += smb->tx_excess_colls +
2092 smb->tx_late_colls + smb->tx_underrun +
2093 smb->tx_pkts_truncated;
2094
2095 ifp->if_ipackets += smb->rx_frames;
2096
2097 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2098 smb->rx_runts + smb->rx_pkts_truncated +
2099 smb->rx_fifo_oflows + smb->rx_desc_oflows +
2100 smb->rx_alignerrs;
2101
2102 /* Update done, clear. */
2103 smb->updated = 0;
2104
2105 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2106 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2107 }
2108
2109 static void
2110 age_stop_txmac(struct age_softc *sc)
2111 {
2112 uint32_t reg;
2113 int i;
2114
2115 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2116 if ((reg & MAC_CFG_TX_ENB) != 0) {
2117 reg &= ~MAC_CFG_TX_ENB;
2118 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2119 }
2120 /* Stop Tx DMA engine. */
2121 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2122 if ((reg & DMA_CFG_RD_ENB) != 0) {
2123 reg &= ~DMA_CFG_RD_ENB;
2124 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2125 }
2126 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2127 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2128 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2129 break;
2130 DELAY(10);
2131 }
2132 if (i == 0)
2133 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2134 }
2135
2136 static void
2137 age_stop_rxmac(struct age_softc *sc)
2138 {
2139 uint32_t reg;
2140 int i;
2141
2142 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2143 if ((reg & MAC_CFG_RX_ENB) != 0) {
2144 reg &= ~MAC_CFG_RX_ENB;
2145 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2146 }
2147 /* Stop Rx DMA engine. */
2148 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2149 if ((reg & DMA_CFG_WR_ENB) != 0) {
2150 reg &= ~DMA_CFG_WR_ENB;
2151 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2152 }
2153 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2154 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2155 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2156 break;
2157 DELAY(10);
2158 }
2159 if (i == 0)
2160 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2161 }
2162
2163 static void
2164 age_init_tx_ring(struct age_softc *sc)
2165 {
2166 struct age_ring_data *rd;
2167 struct age_txdesc *txd;
2168 int i;
2169
2170 sc->age_cdata.age_tx_prod = 0;
2171 sc->age_cdata.age_tx_cons = 0;
2172 sc->age_cdata.age_tx_cnt = 0;
2173
2174 rd = &sc->age_rdata;
2175 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2176 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2177 txd = &sc->age_cdata.age_txdesc[i];
2178 txd->tx_desc = &rd->age_tx_ring[i];
2179 txd->tx_m = NULL;
2180 }
2181 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2182 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2183 }
2184
2185 static int
2186 age_init_rx_ring(struct age_softc *sc)
2187 {
2188 struct age_ring_data *rd;
2189 struct age_rxdesc *rxd;
2190 int i;
2191
2192 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2193 rd = &sc->age_rdata;
2194 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2195 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2196 rxd = &sc->age_cdata.age_rxdesc[i];
2197 rxd->rx_m = NULL;
2198 rxd->rx_desc = &rd->age_rx_ring[i];
2199 if (age_newbuf(sc, rxd, 1) != 0)
2200 return (ENOBUFS);
2201 }
2202
2203 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2204 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2205
2206 return (0);
2207 }
2208
2209 static void
2210 age_init_rr_ring(struct age_softc *sc)
2211 {
2212 struct age_ring_data *rd;
2213
2214 sc->age_cdata.age_rr_cons = 0;
2215 AGE_RXCHAIN_RESET(sc);
2216
2217 rd = &sc->age_rdata;
2218 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2219 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2220 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2221 }
2222
2223 static void
2224 age_init_cmb_block(struct age_softc *sc)
2225 {
2226 struct age_ring_data *rd;
2227
2228 rd = &sc->age_rdata;
2229 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2230 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2231 sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2232 }
2233
2234 static void
2235 age_init_smb_block(struct age_softc *sc)
2236 {
2237 struct age_ring_data *rd;
2238
2239 rd = &sc->age_rdata;
2240 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2241 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2242 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2243 }
2244
2245 static int
2246 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2247 {
2248 struct rx_desc *desc;
2249 struct mbuf *m;
2250 bus_dmamap_t map;
2251 int error;
2252
2253 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2254 if (m == NULL)
2255 return (ENOBUFS);
2256 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2257 if (!(m->m_flags & M_EXT)) {
2258 m_freem(m);
2259 return (ENOBUFS);
2260 }
2261
2262 m->m_len = m->m_pkthdr.len = MCLBYTES;
2263 m_adj(m, PAGE_SIZE);
2264
2265 error = bus_dmamap_load_mbuf(sc->sc_dmat,
2266 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2267
2268 if (error != 0) {
2269 if (!error) {
2270 bus_dmamap_unload(sc->sc_dmat,
2271 sc->age_cdata.age_rx_sparemap);
2272 error = EFBIG;
2273 printf("%s: too many segments?!\n",
2274 device_xname(sc->sc_dev));
2275 }
2276 m_freem(m);
2277
2278 if (init)
2279 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2280 return (error);
2281 }
2282
2283 if (rxd->rx_m != NULL) {
2284 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2285 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2286 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2287 }
2288 map = rxd->rx_dmamap;
2289 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2290 sc->age_cdata.age_rx_sparemap = map;
2291 rxd->rx_m = m;
2292
2293 desc = rxd->rx_desc;
2294 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2295 desc->len =
2296 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2297 AGE_RD_LEN_SHIFT);
2298
2299 return (0);
2300 }
2301
2302 static void
2303 age_rxvlan(struct age_softc *sc)
2304 {
2305 uint32_t reg;
2306
2307 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2308 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2309 if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING)
2310 reg |= MAC_CFG_VLAN_TAG_STRIP;
2311 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2312 }
2313
2314 static void
2315 age_rxfilter(struct age_softc *sc)
2316 {
2317 struct ethercom *ec = &sc->sc_ec;
2318 struct ifnet *ifp = &sc->sc_ec.ec_if;
2319 struct ether_multi *enm;
2320 struct ether_multistep step;
2321 uint32_t crc;
2322 uint32_t mchash[2];
2323 uint32_t rxcfg;
2324
2325 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2326 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2327
2328 if (ifp->if_flags & IFF_BROADCAST)
2329 rxcfg |= MAC_CFG_BCAST;
2330 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2331 if (ifp->if_flags & IFF_PROMISC)
2332 rxcfg |= MAC_CFG_PROMISC;
2333 if (ifp->if_flags & IFF_ALLMULTI)
2334 rxcfg |= MAC_CFG_ALLMULTI;
2335 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
2336 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
2337 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2338 return;
2339 }
2340
2341 /* Program new filter. */
2342 memset(mchash, 0, sizeof(mchash));
2343
2344 ETHER_FIRST_MULTI(step, ec, enm);
2345 while (enm != NULL) {
2346 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2347 enm->enm_addrlo), ETHER_ADDR_LEN);
2348
2349 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2350 ETHER_NEXT_MULTI(step, enm);
2351 }
2352
2353 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2354 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2355 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2356 }
2357