if_age.c revision 1.60 1 /* $NetBSD: if_age.c,v 1.60 2019/07/09 08:46:58 msaitoh Exp $ */
2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */
3
4 /*-
5 * Copyright (c) 2008, Pyun YongHyeon <yongari (at) FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.60 2019/07/09 08:46:58 msaitoh Exp $");
35
36 #include "vlan.h"
37
38 #include <sys/param.h>
39 #include <sys/proc.h>
40 #include <sys/endian.h>
41 #include <sys/systm.h>
42 #include <sys/types.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/queue.h>
46 #include <sys/kernel.h>
47 #include <sys/device.h>
48 #include <sys/callout.h>
49 #include <sys/socket.h>
50
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_ether.h>
55
56 #ifdef INET
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/in_var.h>
60 #include <netinet/ip.h>
61 #endif
62
63 #include <net/if_types.h>
64 #include <net/if_vlanvar.h>
65
66 #include <net/bpf.h>
67
68 #include <dev/mii/mii.h>
69 #include <dev/mii/miivar.h>
70
71 #include <dev/pci/pcireg.h>
72 #include <dev/pci/pcivar.h>
73 #include <dev/pci/pcidevs.h>
74
75 #include <dev/pci/if_agereg.h>
76
77 static int age_match(device_t, cfdata_t, void *);
78 static void age_attach(device_t, device_t, void *);
79 static int age_detach(device_t, int);
80
81 static bool age_resume(device_t, const pmf_qual_t *);
82
83 static int age_miibus_readreg(device_t, int, int, uint16_t *);
84 static int age_miibus_writereg(device_t, int, int, uint16_t);
85 static void age_miibus_statchg(struct ifnet *);
86
87 static int age_init(struct ifnet *);
88 static int age_ioctl(struct ifnet *, u_long, void *);
89 static void age_start(struct ifnet *);
90 static void age_watchdog(struct ifnet *);
91 static bool age_shutdown(device_t, int);
92 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
93 static int age_mediachange(struct ifnet *);
94
95 static int age_intr(void *);
96 static int age_dma_alloc(struct age_softc *);
97 static void age_dma_free(struct age_softc *);
98 static void age_get_macaddr(struct age_softc *, uint8_t[]);
99 static void age_phy_reset(struct age_softc *);
100
101 static int age_encap(struct age_softc *, struct mbuf **);
102 static void age_init_tx_ring(struct age_softc *);
103 static int age_init_rx_ring(struct age_softc *);
104 static void age_init_rr_ring(struct age_softc *);
105 static void age_init_cmb_block(struct age_softc *);
106 static void age_init_smb_block(struct age_softc *);
107 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int);
108 static void age_mac_config(struct age_softc *);
109 static void age_txintr(struct age_softc *, int);
110 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
111 static void age_rxintr(struct age_softc *, int);
112 static void age_tick(void *);
113 static void age_reset(struct age_softc *);
114 static void age_stop(struct ifnet *, int);
115 static void age_stats_update(struct age_softc *);
116 static void age_stop_txmac(struct age_softc *);
117 static void age_stop_rxmac(struct age_softc *);
118 static void age_rxvlan(struct age_softc *sc);
119 static void age_rxfilter(struct age_softc *);
120
121 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
122 age_match, age_attach, age_detach, NULL);
123
124 int agedebug = 0;
125 #define DPRINTF(x) do { if (agedebug) printf x; } while (0)
126
127 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
128
129 static int
130 age_match(device_t dev, cfdata_t match, void *aux)
131 {
132 struct pci_attach_args *pa = aux;
133
134 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
135 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
136 }
137
138 static void
139 age_attach(device_t parent, device_t self, void *aux)
140 {
141 struct age_softc *sc = device_private(self);
142 struct pci_attach_args *pa = aux;
143 pci_intr_handle_t ih;
144 const char *intrstr;
145 struct ifnet *ifp = &sc->sc_ec.ec_if;
146 struct mii_data * const mii = &sc->sc_miibus;
147 pcireg_t memtype;
148 int error = 0;
149 char intrbuf[PCI_INTRSTR_LEN];
150
151 aprint_naive("\n");
152 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
153
154 sc->sc_dev = self;
155 sc->sc_dmat = pa->pa_dmat;
156 sc->sc_pct = pa->pa_pc;
157 sc->sc_pcitag = pa->pa_tag;
158
159 /*
160 * Allocate IO memory
161 */
162 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
163 switch (memtype) {
164 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
165 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
166 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
167 break;
168 default:
169 aprint_error_dev(self, "invalid base address register\n");
170 break;
171 }
172
173 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
174 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
175 aprint_error_dev(self, "could not map mem space\n");
176 return;
177 }
178
179 if (pci_intr_map(pa, &ih) != 0) {
180 aprint_error_dev(self, "could not map interrupt\n");
181 goto fail;
182 }
183
184 /*
185 * Allocate IRQ
186 */
187 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
188 sc->sc_irq_handle = pci_intr_establish_xname(sc->sc_pct, ih, IPL_NET,
189 age_intr, sc, device_xname(self));
190 if (sc->sc_irq_handle == NULL) {
191 aprint_error_dev(self, "could not establish interrupt");
192 if (intrstr != NULL)
193 aprint_error(" at %s", intrstr);
194 aprint_error("\n");
195 goto fail;
196 }
197 aprint_normal_dev(self, "%s\n", intrstr);
198
199 /* Set PHY address. */
200 sc->age_phyaddr = AGE_PHY_ADDR;
201
202 /* Reset PHY. */
203 age_phy_reset(sc);
204
205 /* Reset the ethernet controller. */
206 age_reset(sc);
207
208 /* Get PCI and chip id/revision. */
209 sc->age_rev = PCI_REVISION(pa->pa_class);
210 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
211 MASTER_CHIP_REV_SHIFT;
212
213 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
214 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
215
216 if (agedebug) {
217 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
218 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
219 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
220 }
221
222 /* Set max allowable DMA size. */
223 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
224 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
225
226 /* Allocate DMA stuffs */
227 error = age_dma_alloc(sc);
228 if (error)
229 goto fail;
230
231 callout_init(&sc->sc_tick_ch, 0);
232 callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
233
234 /* Load station address. */
235 age_get_macaddr(sc, sc->sc_enaddr);
236
237 aprint_normal_dev(self, "Ethernet address %s\n",
238 ether_sprintf(sc->sc_enaddr));
239
240 ifp->if_softc = sc;
241 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
242 ifp->if_init = age_init;
243 ifp->if_ioctl = age_ioctl;
244 ifp->if_start = age_start;
245 ifp->if_stop = age_stop;
246 ifp->if_watchdog = age_watchdog;
247 ifp->if_baudrate = IF_Gbps(1);
248 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
249 IFQ_SET_READY(&ifp->if_snd);
250 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
251
252 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
253
254 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Rx |
255 IFCAP_CSUM_TCPv4_Rx |
256 IFCAP_CSUM_UDPv4_Rx;
257 #ifdef AGE_CHECKSUM
258 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx |
259 IFCAP_CSUM_TCPv4_Tx |
260 IFCAP_CSUM_UDPv4_Tx;
261 #endif
262
263 #if NVLAN > 0
264 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
265 sc->sc_ec.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
266 #endif
267
268 /* Set up MII bus. */
269 mii->mii_ifp = ifp;
270 mii->mii_readreg = age_miibus_readreg;
271 mii->mii_writereg = age_miibus_writereg;
272 mii->mii_statchg = age_miibus_statchg;
273
274 sc->sc_ec.ec_mii = mii;
275 ifmedia_init(&mii->mii_media, 0, age_mediachange, age_mediastatus);
276 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY,
277 MII_OFFSET_ANY, MIIF_DOPAUSE);
278
279 if (LIST_FIRST(&mii->mii_phys) == NULL) {
280 aprint_error_dev(self, "no PHY found!\n");
281 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
282 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
283 } else
284 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
285
286 if_attach(ifp);
287 if_deferred_start_init(ifp, NULL);
288 ether_ifattach(ifp, sc->sc_enaddr);
289
290 if (pmf_device_register1(self, NULL, age_resume, age_shutdown))
291 pmf_class_network_register(self, ifp);
292 else
293 aprint_error_dev(self, "couldn't establish power handler\n");
294
295 return;
296
297 fail:
298 age_dma_free(sc);
299 if (sc->sc_irq_handle != NULL) {
300 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
301 sc->sc_irq_handle = NULL;
302 }
303 if (sc->sc_mem_size) {
304 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
305 sc->sc_mem_size = 0;
306 }
307 }
308
309 static int
310 age_detach(device_t self, int flags)
311 {
312 struct age_softc *sc = device_private(self);
313 struct ifnet *ifp = &sc->sc_ec.ec_if;
314 int s;
315
316 pmf_device_deregister(self);
317 s = splnet();
318 age_stop(ifp, 0);
319 splx(s);
320
321 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
322
323 /* Delete all remaining media. */
324 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
325
326 ether_ifdetach(ifp);
327 if_detach(ifp);
328 age_dma_free(sc);
329
330 if (sc->sc_irq_handle != NULL) {
331 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
332 sc->sc_irq_handle = NULL;
333 }
334 if (sc->sc_mem_size) {
335 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
336 sc->sc_mem_size = 0;
337 }
338 return 0;
339 }
340
341 /*
342 * Read a PHY register on the MII of the L1.
343 */
344 static int
345 age_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val)
346 {
347 struct age_softc *sc = device_private(dev);
348 uint32_t v;
349 int i;
350
351 if (phy != sc->age_phyaddr)
352 return -1;
353
354 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
355 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
356 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
357 DELAY(1);
358 v = CSR_READ_4(sc, AGE_MDIO);
359 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
360 break;
361 }
362
363 if (i == 0) {
364 printf("%s: phy read timeout: phy %d, reg %d\n",
365 device_xname(sc->sc_dev), phy, reg);
366 return ETIMEDOUT;
367 }
368
369 *val = (v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT;
370 return 0;
371 }
372
373 /*
374 * Write a PHY register on the MII of the L1.
375 */
376 static int
377 age_miibus_writereg(device_t dev, int phy, int reg, uint16_t val)
378 {
379 struct age_softc *sc = device_private(dev);
380 uint32_t v;
381 int i;
382
383 if (phy != sc->age_phyaddr)
384 return -1;
385
386 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
387 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
388 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
389
390 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
391 DELAY(1);
392 v = CSR_READ_4(sc, AGE_MDIO);
393 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
394 break;
395 }
396
397 if (i == 0) {
398 printf("%s: phy write timeout: phy %d, reg %d\n",
399 device_xname(sc->sc_dev), phy, reg);
400 return ETIMEDOUT;
401 }
402
403 return 0;
404 }
405
406 /*
407 * Callback from MII layer when media changes.
408 */
409 static void
410 age_miibus_statchg(struct ifnet *ifp)
411 {
412 struct age_softc *sc = ifp->if_softc;
413 struct mii_data *mii = &sc->sc_miibus;
414
415 if ((ifp->if_flags & IFF_RUNNING) == 0)
416 return;
417
418 sc->age_flags &= ~AGE_FLAG_LINK;
419 if ((mii->mii_media_status & IFM_AVALID) != 0) {
420 switch (IFM_SUBTYPE(mii->mii_media_active)) {
421 case IFM_10_T:
422 case IFM_100_TX:
423 case IFM_1000_T:
424 sc->age_flags |= AGE_FLAG_LINK;
425 break;
426 default:
427 break;
428 }
429 }
430
431 /* Stop Rx/Tx MACs. */
432 age_stop_rxmac(sc);
433 age_stop_txmac(sc);
434
435 /* Program MACs with resolved speed/duplex/flow-control. */
436 if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
437 uint32_t reg;
438
439 age_mac_config(sc);
440 reg = CSR_READ_4(sc, AGE_MAC_CFG);
441 /* Restart DMA engine and Tx/Rx MAC. */
442 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
443 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
444 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
445 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
446 }
447 }
448
449 /*
450 * Get the current interface media status.
451 */
452 static void
453 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
454 {
455 struct age_softc *sc = ifp->if_softc;
456 struct mii_data *mii = &sc->sc_miibus;
457
458 mii_pollstat(mii);
459 ifmr->ifm_status = mii->mii_media_status;
460 ifmr->ifm_active = mii->mii_media_active;
461 }
462
463 /*
464 * Set hardware to newly-selected media.
465 */
466 static int
467 age_mediachange(struct ifnet *ifp)
468 {
469 struct age_softc *sc = ifp->if_softc;
470 struct mii_data *mii = &sc->sc_miibus;
471 int error;
472
473 if (mii->mii_instance != 0) {
474 struct mii_softc *miisc;
475
476 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
477 mii_phy_reset(miisc);
478 }
479 error = mii_mediachg(mii);
480
481 return error;
482 }
483
484 static int
485 age_intr(void *arg)
486 {
487 struct age_softc *sc = arg;
488 struct ifnet *ifp = &sc->sc_ec.ec_if;
489 struct cmb *cmb;
490 uint32_t status;
491
492 status = CSR_READ_4(sc, AGE_INTR_STATUS);
493 if (status == 0 || (status & AGE_INTRS) == 0)
494 return 0;
495
496 cmb = sc->age_rdata.age_cmb_block;
497 if (cmb == NULL) {
498 /* Happens when bringing up the interface
499 * w/o having a carrier. Ack the interrupt.
500 */
501 CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
502 return 0;
503 }
504
505 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
506 sc->age_cdata.age_cmb_block_map->dm_mapsize,
507 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
508 status = le32toh(cmb->intr_status);
509 /* ACK/reenable interrupts */
510 CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
511 while ((status & AGE_INTRS) != 0) {
512 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
513 TPD_CONS_SHIFT;
514 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
515 RRD_PROD_SHIFT;
516
517 /* Let hardware know CMB was served. */
518 cmb->intr_status = 0;
519 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
520 sc->age_cdata.age_cmb_block_map->dm_mapsize,
521 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
522
523 if (ifp->if_flags & IFF_RUNNING) {
524 if (status & INTR_CMB_RX)
525 age_rxintr(sc, sc->age_rr_prod);
526
527 if (status & INTR_CMB_TX)
528 age_txintr(sc, sc->age_tpd_cons);
529
530 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
531 if (status & INTR_DMA_RD_TO_RST)
532 printf("%s: DMA read error! -- "
533 "resetting\n",
534 device_xname(sc->sc_dev));
535 if (status & INTR_DMA_WR_TO_RST)
536 printf("%s: DMA write error! -- "
537 "resetting\n",
538 device_xname(sc->sc_dev));
539 age_init(ifp);
540 }
541
542 if_schedule_deferred_start(ifp);
543
544 if (status & INTR_SMB)
545 age_stats_update(sc);
546 }
547 /* check if more interrupts did came in */
548 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
549 sc->age_cdata.age_cmb_block_map->dm_mapsize,
550 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
551 status = le32toh(cmb->intr_status);
552 }
553
554 return 1;
555 }
556
557 static void
558 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
559 {
560 uint32_t ea[2], reg;
561 int i, vpdc;
562
563 reg = CSR_READ_4(sc, AGE_SPI_CTRL);
564 if ((reg & SPI_VPD_ENB) != 0) {
565 /* Get VPD stored in TWSI EEPROM. */
566 reg &= ~SPI_VPD_ENB;
567 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
568 }
569
570 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
571 PCI_CAP_VPD, &vpdc, NULL)) {
572 /*
573 * PCI VPD capability found, let TWSI reload EEPROM.
574 * This will set Ethernet address of controller.
575 */
576 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
577 TWSI_CTRL_SW_LD_START);
578 for (i = 100; i > 0; i++) {
579 DELAY(1000);
580 reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
581 if ((reg & TWSI_CTRL_SW_LD_START) == 0)
582 break;
583 }
584 if (i == 0)
585 printf("%s: reloading EEPROM timeout!\n",
586 device_xname(sc->sc_dev));
587 } else {
588 if (agedebug)
589 printf("%s: PCI VPD capability not found!\n",
590 device_xname(sc->sc_dev));
591 }
592
593 ea[0] = CSR_READ_4(sc, AGE_PAR0);
594 ea[1] = CSR_READ_4(sc, AGE_PAR1);
595
596 eaddr[0] = (ea[1] >> 8) & 0xFF;
597 eaddr[1] = (ea[1] >> 0) & 0xFF;
598 eaddr[2] = (ea[0] >> 24) & 0xFF;
599 eaddr[3] = (ea[0] >> 16) & 0xFF;
600 eaddr[4] = (ea[0] >> 8) & 0xFF;
601 eaddr[5] = (ea[0] >> 0) & 0xFF;
602 }
603
604 static void
605 age_phy_reset(struct age_softc *sc)
606 {
607 uint16_t reg, pn;
608 int i, linkup;
609
610 /* Reset PHY. */
611 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
612 DELAY(2000);
613 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
614 DELAY(2000);
615
616 #define ATPHY_DBG_ADDR 0x1D
617 #define ATPHY_DBG_DATA 0x1E
618 #define ATPHY_CDTC 0x16
619 #define PHY_CDTC_ENB 0x0001
620 #define PHY_CDTC_POFF 8
621 #define ATPHY_CDTS 0x1C
622 #define PHY_CDTS_STAT_OK 0x0000
623 #define PHY_CDTS_STAT_SHORT 0x0100
624 #define PHY_CDTS_STAT_OPEN 0x0200
625 #define PHY_CDTS_STAT_INVAL 0x0300
626 #define PHY_CDTS_STAT_MASK 0x0300
627
628 /* Check power saving mode. Magic from Linux. */
629 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
630 for (linkup = 0, pn = 0; pn < 4; pn++) {
631 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
632 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
633 for (i = 200; i > 0; i--) {
634 DELAY(1000);
635 age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
636 ATPHY_CDTC, ®);
637 if ((reg & PHY_CDTC_ENB) == 0)
638 break;
639 }
640 DELAY(1000);
641 age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
642 ATPHY_CDTS, ®);
643 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
644 linkup++;
645 break;
646 }
647 }
648 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
649 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
650 if (linkup == 0) {
651 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
652 ATPHY_DBG_ADDR, 0);
653 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
654 ATPHY_DBG_DATA, 0x124E);
655 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
656 ATPHY_DBG_ADDR, 1);
657 age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
658 ATPHY_DBG_DATA, ®);
659 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
660 ATPHY_DBG_DATA, reg | 0x03);
661 /* XXX */
662 DELAY(1500 * 1000);
663 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
664 ATPHY_DBG_ADDR, 0);
665 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
666 ATPHY_DBG_DATA, 0x024E);
667 }
668
669 #undef ATPHY_DBG_ADDR
670 #undef ATPHY_DBG_DATA
671 #undef ATPHY_CDTC
672 #undef PHY_CDTC_ENB
673 #undef PHY_CDTC_POFF
674 #undef ATPHY_CDTS
675 #undef PHY_CDTS_STAT_OK
676 #undef PHY_CDTS_STAT_SHORT
677 #undef PHY_CDTS_STAT_OPEN
678 #undef PHY_CDTS_STAT_INVAL
679 #undef PHY_CDTS_STAT_MASK
680 }
681
682 static int
683 age_dma_alloc(struct age_softc *sc)
684 {
685 struct age_txdesc *txd;
686 struct age_rxdesc *rxd;
687 int nsegs, error, i;
688
689 /*
690 * Create DMA stuffs for TX ring
691 */
692 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
693 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
694 if (error) {
695 sc->age_cdata.age_tx_ring_map = NULL;
696 return ENOBUFS;
697 }
698
699 /* Allocate DMA'able memory for TX ring */
700 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
701 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1,
702 &nsegs, BUS_DMA_NOWAIT);
703 if (error) {
704 printf("%s: could not allocate DMA'able memory for Tx ring, "
705 "error = %i\n", device_xname(sc->sc_dev), error);
706 return error;
707 }
708
709 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
710 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
711 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
712 if (error)
713 return ENOBUFS;
714
715 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
716
717 /* Load the DMA map for Tx ring. */
718 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
719 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
720 if (error) {
721 printf("%s: could not load DMA'able memory for Tx ring, "
722 "error = %i\n", device_xname(sc->sc_dev), error);
723 bus_dmamem_free(sc->sc_dmat,
724 &sc->age_rdata.age_tx_ring_seg, 1);
725 return error;
726 }
727
728 sc->age_rdata.age_tx_ring_paddr =
729 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
730
731 /*
732 * Create DMA stuffs for RX ring
733 */
734 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
735 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
736 if (error) {
737 sc->age_cdata.age_rx_ring_map = NULL;
738 return ENOBUFS;
739 }
740
741 /* Allocate DMA'able memory for RX ring */
742 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
743 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1,
744 &nsegs, BUS_DMA_NOWAIT);
745 if (error) {
746 printf("%s: could not allocate DMA'able memory for Rx ring, "
747 "error = %i.\n", device_xname(sc->sc_dev), error);
748 return error;
749 }
750
751 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
752 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
753 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
754 if (error)
755 return ENOBUFS;
756
757 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
758
759 /* Load the DMA map for Rx ring. */
760 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
761 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
762 if (error) {
763 printf("%s: could not load DMA'able memory for Rx ring, "
764 "error = %i.\n", device_xname(sc->sc_dev), error);
765 bus_dmamem_free(sc->sc_dmat,
766 &sc->age_rdata.age_rx_ring_seg, 1);
767 return error;
768 }
769
770 sc->age_rdata.age_rx_ring_paddr =
771 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
772
773 /*
774 * Create DMA stuffs for RX return ring
775 */
776 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
777 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
778 if (error) {
779 sc->age_cdata.age_rr_ring_map = NULL;
780 return ENOBUFS;
781 }
782
783 /* Allocate DMA'able memory for RX return ring */
784 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
785 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1,
786 &nsegs, BUS_DMA_NOWAIT);
787 if (error) {
788 printf("%s: could not allocate DMA'able memory for Rx "
789 "return ring, error = %i.\n",
790 device_xname(sc->sc_dev), error);
791 return error;
792 }
793
794 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
795 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
796 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
797 if (error)
798 return ENOBUFS;
799
800 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
801
802 /* Load the DMA map for Rx return ring. */
803 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
804 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT);
805 if (error) {
806 printf("%s: could not load DMA'able memory for Rx return ring, "
807 "error = %i\n", device_xname(sc->sc_dev), error);
808 bus_dmamem_free(sc->sc_dmat,
809 &sc->age_rdata.age_rr_ring_seg, 1);
810 return error;
811 }
812
813 sc->age_rdata.age_rr_ring_paddr =
814 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
815
816 /*
817 * Create DMA stuffs for CMB block
818 */
819 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
820 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
821 &sc->age_cdata.age_cmb_block_map);
822 if (error) {
823 sc->age_cdata.age_cmb_block_map = NULL;
824 return ENOBUFS;
825 }
826
827 /* Allocate DMA'able memory for CMB block */
828 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
829 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1,
830 &nsegs, BUS_DMA_NOWAIT);
831 if (error) {
832 printf("%s: could not allocate DMA'able memory for "
833 "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
834 return error;
835 }
836
837 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
838 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
839 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
840 if (error)
841 return ENOBUFS;
842
843 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
844
845 /* Load the DMA map for CMB block. */
846 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
847 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
848 BUS_DMA_NOWAIT);
849 if (error) {
850 printf("%s: could not load DMA'able memory for CMB block, "
851 "error = %i\n", device_xname(sc->sc_dev), error);
852 bus_dmamem_free(sc->sc_dmat,
853 &sc->age_rdata.age_cmb_block_seg, 1);
854 return error;
855 }
856
857 sc->age_rdata.age_cmb_block_paddr =
858 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
859
860 /*
861 * Create DMA stuffs for SMB block
862 */
863 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
864 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
865 &sc->age_cdata.age_smb_block_map);
866 if (error) {
867 sc->age_cdata.age_smb_block_map = NULL;
868 return ENOBUFS;
869 }
870
871 /* Allocate DMA'able memory for SMB block */
872 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
873 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1,
874 &nsegs, BUS_DMA_NOWAIT);
875 if (error) {
876 printf("%s: could not allocate DMA'able memory for "
877 "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
878 return error;
879 }
880
881 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
882 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
883 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
884 if (error)
885 return ENOBUFS;
886
887 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
888
889 /* Load the DMA map for SMB block */
890 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
891 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
892 BUS_DMA_NOWAIT);
893 if (error) {
894 printf("%s: could not load DMA'able memory for SMB block, "
895 "error = %i\n", device_xname(sc->sc_dev), error);
896 bus_dmamem_free(sc->sc_dmat,
897 &sc->age_rdata.age_smb_block_seg, 1);
898 return error;
899 }
900
901 sc->age_rdata.age_smb_block_paddr =
902 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
903
904 /* Create DMA maps for Tx buffers. */
905 for (i = 0; i < AGE_TX_RING_CNT; i++) {
906 txd = &sc->age_cdata.age_txdesc[i];
907 txd->tx_m = NULL;
908 txd->tx_dmamap = NULL;
909 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
910 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
911 &txd->tx_dmamap);
912 if (error) {
913 txd->tx_dmamap = NULL;
914 printf("%s: could not create Tx dmamap, error = %i.\n",
915 device_xname(sc->sc_dev), error);
916 return error;
917 }
918 }
919
920 /* Create DMA maps for Rx buffers. */
921 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
922 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
923 if (error) {
924 sc->age_cdata.age_rx_sparemap = NULL;
925 printf("%s: could not create spare Rx dmamap, error = %i.\n",
926 device_xname(sc->sc_dev), error);
927 return error;
928 }
929 for (i = 0; i < AGE_RX_RING_CNT; i++) {
930 rxd = &sc->age_cdata.age_rxdesc[i];
931 rxd->rx_m = NULL;
932 rxd->rx_dmamap = NULL;
933 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
934 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
935 if (error) {
936 rxd->rx_dmamap = NULL;
937 printf("%s: could not create Rx dmamap, error = %i.\n",
938 device_xname(sc->sc_dev), error);
939 return error;
940 }
941 }
942
943 return 0;
944 }
945
946 static void
947 age_dma_free(struct age_softc *sc)
948 {
949 struct age_txdesc *txd;
950 struct age_rxdesc *rxd;
951 int i;
952
953 /* Tx buffers */
954 for (i = 0; i < AGE_TX_RING_CNT; i++) {
955 txd = &sc->age_cdata.age_txdesc[i];
956 if (txd->tx_dmamap != NULL) {
957 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
958 txd->tx_dmamap = NULL;
959 }
960 }
961 /* Rx buffers */
962 for (i = 0; i < AGE_RX_RING_CNT; i++) {
963 rxd = &sc->age_cdata.age_rxdesc[i];
964 if (rxd->rx_dmamap != NULL) {
965 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
966 rxd->rx_dmamap = NULL;
967 }
968 }
969 if (sc->age_cdata.age_rx_sparemap != NULL) {
970 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
971 sc->age_cdata.age_rx_sparemap = NULL;
972 }
973
974 /* Tx ring. */
975 if (sc->age_cdata.age_tx_ring_map != NULL)
976 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
977 if (sc->age_cdata.age_tx_ring_map != NULL &&
978 sc->age_rdata.age_tx_ring != NULL)
979 bus_dmamem_free(sc->sc_dmat,
980 &sc->age_rdata.age_tx_ring_seg, 1);
981 sc->age_rdata.age_tx_ring = NULL;
982 sc->age_cdata.age_tx_ring_map = NULL;
983
984 /* Rx ring. */
985 if (sc->age_cdata.age_rx_ring_map != NULL)
986 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
987 if (sc->age_cdata.age_rx_ring_map != NULL &&
988 sc->age_rdata.age_rx_ring != NULL)
989 bus_dmamem_free(sc->sc_dmat,
990 &sc->age_rdata.age_rx_ring_seg, 1);
991 sc->age_rdata.age_rx_ring = NULL;
992 sc->age_cdata.age_rx_ring_map = NULL;
993
994 /* Rx return ring. */
995 if (sc->age_cdata.age_rr_ring_map != NULL)
996 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
997 if (sc->age_cdata.age_rr_ring_map != NULL &&
998 sc->age_rdata.age_rr_ring != NULL)
999 bus_dmamem_free(sc->sc_dmat,
1000 &sc->age_rdata.age_rr_ring_seg, 1);
1001 sc->age_rdata.age_rr_ring = NULL;
1002 sc->age_cdata.age_rr_ring_map = NULL;
1003
1004 /* CMB block */
1005 if (sc->age_cdata.age_cmb_block_map != NULL)
1006 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1007 if (sc->age_cdata.age_cmb_block_map != NULL &&
1008 sc->age_rdata.age_cmb_block != NULL)
1009 bus_dmamem_free(sc->sc_dmat,
1010 &sc->age_rdata.age_cmb_block_seg, 1);
1011 sc->age_rdata.age_cmb_block = NULL;
1012 sc->age_cdata.age_cmb_block_map = NULL;
1013
1014 /* SMB block */
1015 if (sc->age_cdata.age_smb_block_map != NULL)
1016 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1017 if (sc->age_cdata.age_smb_block_map != NULL &&
1018 sc->age_rdata.age_smb_block != NULL)
1019 bus_dmamem_free(sc->sc_dmat,
1020 &sc->age_rdata.age_smb_block_seg, 1);
1021 sc->age_rdata.age_smb_block = NULL;
1022 sc->age_cdata.age_smb_block_map = NULL;
1023 }
1024
1025 static void
1026 age_start(struct ifnet *ifp)
1027 {
1028 struct age_softc *sc = ifp->if_softc;
1029 struct mbuf *m_head;
1030 int enq;
1031
1032 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1033 return;
1034 if ((sc->age_flags & AGE_FLAG_LINK) == 0)
1035 return;
1036 if (IFQ_IS_EMPTY(&ifp->if_snd))
1037 return;
1038
1039 enq = 0;
1040 for (;;) {
1041 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1042 if (m_head == NULL)
1043 break;
1044
1045 /*
1046 * Pack the data into the transmit ring. If we
1047 * don't have room, set the OACTIVE flag and wait
1048 * for the NIC to drain the ring.
1049 */
1050 if (age_encap(sc, &m_head)) {
1051 if (m_head == NULL)
1052 break;
1053 IF_PREPEND(&ifp->if_snd, m_head);
1054 ifp->if_flags |= IFF_OACTIVE;
1055 break;
1056 }
1057 enq = 1;
1058
1059 /*
1060 * If there's a BPF listener, bounce a copy of this frame
1061 * to him.
1062 */
1063 bpf_mtap(ifp, m_head, BPF_D_OUT);
1064 }
1065
1066 if (enq) {
1067 /* Update mbox. */
1068 AGE_COMMIT_MBOX(sc);
1069 /* Set a timeout in case the chip goes out to lunch. */
1070 ifp->if_timer = AGE_TX_TIMEOUT;
1071 }
1072 }
1073
1074 static void
1075 age_watchdog(struct ifnet *ifp)
1076 {
1077 struct age_softc *sc = ifp->if_softc;
1078
1079 if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1080 printf("%s: watchdog timeout (missed link)\n",
1081 device_xname(sc->sc_dev));
1082 ifp->if_oerrors++;
1083 age_init(ifp);
1084 return;
1085 }
1086
1087 if (sc->age_cdata.age_tx_cnt == 0) {
1088 printf("%s: watchdog timeout (missed Tx interrupts) "
1089 "-- recovering\n", device_xname(sc->sc_dev));
1090 age_start(ifp);
1091 return;
1092 }
1093
1094 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1095 ifp->if_oerrors++;
1096 age_init(ifp);
1097 age_start(ifp);
1098 }
1099
1100 static bool
1101 age_shutdown(device_t self, int howto)
1102 {
1103 struct age_softc *sc;
1104 struct ifnet *ifp;
1105
1106 sc = device_private(self);
1107 ifp = &sc->sc_ec.ec_if;
1108 age_stop(ifp, 1);
1109
1110 return true;
1111 }
1112
1113 static int
1114 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1115 {
1116 struct age_softc *sc = ifp->if_softc;
1117 int s, error;
1118
1119 s = splnet();
1120
1121 error = ether_ioctl(ifp, cmd, data);
1122 if (error == ENETRESET) {
1123 if (ifp->if_flags & IFF_RUNNING)
1124 age_rxfilter(sc);
1125 error = 0;
1126 }
1127
1128 splx(s);
1129 return error;
1130 }
1131
1132 static void
1133 age_mac_config(struct age_softc *sc)
1134 {
1135 struct mii_data *mii;
1136 uint32_t reg;
1137
1138 mii = &sc->sc_miibus;
1139
1140 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1141 reg &= ~MAC_CFG_FULL_DUPLEX;
1142 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1143 reg &= ~MAC_CFG_SPEED_MASK;
1144
1145 /* Reprogram MAC with resolved speed/duplex. */
1146 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1147 case IFM_10_T:
1148 case IFM_100_TX:
1149 reg |= MAC_CFG_SPEED_10_100;
1150 break;
1151 case IFM_1000_T:
1152 reg |= MAC_CFG_SPEED_1000;
1153 break;
1154 }
1155 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1156 reg |= MAC_CFG_FULL_DUPLEX;
1157 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1158 reg |= MAC_CFG_TX_FC;
1159 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1160 reg |= MAC_CFG_RX_FC;
1161 }
1162
1163 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1164 }
1165
1166 static bool
1167 age_resume(device_t dv, const pmf_qual_t *qual)
1168 {
1169 struct age_softc *sc = device_private(dv);
1170 uint16_t cmd;
1171
1172 /*
1173 * Clear INTx emulation disable for hardware that
1174 * is set in resume event. From Linux.
1175 */
1176 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1177 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1178 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1179 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1180 PCI_COMMAND_STATUS_REG, cmd);
1181 }
1182
1183 return true;
1184 }
1185
1186 static int
1187 age_encap(struct age_softc *sc, struct mbuf **m_head)
1188 {
1189 struct age_txdesc *txd, *txd_last;
1190 struct tx_desc *desc;
1191 struct mbuf *m;
1192 bus_dmamap_t map;
1193 uint32_t cflags, poff, vtag;
1194 int error, i, nsegs, prod;
1195
1196 m = *m_head;
1197 cflags = vtag = 0;
1198 poff = 0;
1199
1200 prod = sc->age_cdata.age_tx_prod;
1201 txd = &sc->age_cdata.age_txdesc[prod];
1202 txd_last = txd;
1203 map = txd->tx_dmamap;
1204
1205 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1206
1207 if (error == EFBIG) {
1208 error = 0;
1209
1210 *m_head = m_pullup(*m_head, MHLEN);
1211 if (*m_head == NULL) {
1212 printf("%s: can't defrag TX mbuf\n",
1213 device_xname(sc->sc_dev));
1214 return ENOBUFS;
1215 }
1216
1217 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1218 BUS_DMA_NOWAIT);
1219
1220 if (error != 0) {
1221 printf("%s: could not load defragged TX mbuf\n",
1222 device_xname(sc->sc_dev));
1223 m_freem(*m_head);
1224 *m_head = NULL;
1225 return error;
1226 }
1227 } else if (error) {
1228 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1229 return error;
1230 }
1231
1232 nsegs = map->dm_nsegs;
1233
1234 if (nsegs == 0) {
1235 m_freem(*m_head);
1236 *m_head = NULL;
1237 return EIO;
1238 }
1239
1240 /* Check descriptor overrun. */
1241 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1242 bus_dmamap_unload(sc->sc_dmat, map);
1243 return ENOBUFS;
1244 }
1245 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1246 BUS_DMASYNC_PREWRITE);
1247
1248 m = *m_head;
1249 /* Configure Tx IP/TCP/UDP checksum offload. */
1250 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1251 cflags |= AGE_TD_CSUM;
1252 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1253 cflags |= AGE_TD_TCPCSUM;
1254 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1255 cflags |= AGE_TD_UDPCSUM;
1256 /* Set checksum start offset. */
1257 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1258 }
1259
1260 #if NVLAN > 0
1261 /* Configure VLAN hardware tag insertion. */
1262 if (vlan_has_tag(m)) {
1263 vtag = AGE_TX_VLAN_TAG(htons(vlan_get_tag(m)));
1264 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1265 cflags |= AGE_TD_INSERT_VLAN_TAG;
1266 }
1267 #endif
1268
1269 desc = NULL;
1270 KASSERT(nsegs > 0);
1271 for (i = 0; ; i++) {
1272 desc = &sc->age_rdata.age_tx_ring[prod];
1273 desc->addr = htole64(map->dm_segs[i].ds_addr);
1274 desc->len =
1275 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1276 desc->flags = htole32(cflags);
1277 sc->age_cdata.age_tx_cnt++;
1278 if (i == (nsegs - 1))
1279 break;
1280
1281 /* Sync this descriptor and go to the next one */
1282 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1283 prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1284 BUS_DMASYNC_PREWRITE);
1285 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1286 }
1287
1288 /* Set EOP on the last descriptor and sync it. */
1289 desc->flags |= htole32(AGE_TD_EOP);
1290 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1291 prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1292 BUS_DMASYNC_PREWRITE);
1293
1294 if (nsegs > 1) {
1295 /* Swap dmamap of the first and the last. */
1296 txd = &sc->age_cdata.age_txdesc[prod];
1297 map = txd_last->tx_dmamap;
1298 txd_last->tx_dmamap = txd->tx_dmamap;
1299 txd->tx_dmamap = map;
1300 txd->tx_m = m;
1301 KASSERT(txd_last->tx_m == NULL);
1302 } else {
1303 KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]);
1304 txd_last->tx_m = m;
1305 }
1306
1307 /* Update producer index. */
1308 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1309 sc->age_cdata.age_tx_prod = prod;
1310
1311 return 0;
1312 }
1313
1314 static void
1315 age_txintr(struct age_softc *sc, int tpd_cons)
1316 {
1317 struct ifnet *ifp = &sc->sc_ec.ec_if;
1318 struct age_txdesc *txd;
1319 int cons, prog;
1320
1321 if (sc->age_cdata.age_tx_cnt <= 0) {
1322 if (ifp->if_timer != 0)
1323 printf("timer running without packets\n");
1324 if (sc->age_cdata.age_tx_cnt)
1325 printf("age_tx_cnt corrupted\n");
1326 }
1327
1328 /*
1329 * Go through our Tx list and free mbufs for those
1330 * frames which have been transmitted.
1331 */
1332 cons = sc->age_cdata.age_tx_cons;
1333 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1334 if (sc->age_cdata.age_tx_cnt <= 0)
1335 break;
1336 prog++;
1337 ifp->if_flags &= ~IFF_OACTIVE;
1338 sc->age_cdata.age_tx_cnt--;
1339 txd = &sc->age_cdata.age_txdesc[cons];
1340 /*
1341 * Clear Tx descriptors, it's not required but would
1342 * help debugging in case of Tx issues.
1343 */
1344 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1345 cons * sizeof(struct tx_desc), sizeof(struct tx_desc),
1346 BUS_DMASYNC_POSTWRITE);
1347 txd->tx_desc->addr = 0;
1348 txd->tx_desc->len = 0;
1349 txd->tx_desc->flags = 0;
1350
1351 if (txd->tx_m == NULL)
1352 continue;
1353 /* Reclaim transmitted mbufs. */
1354 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1355 m_freem(txd->tx_m);
1356 txd->tx_m = NULL;
1357 }
1358
1359 if (prog > 0) {
1360 sc->age_cdata.age_tx_cons = cons;
1361
1362 /*
1363 * Unarm watchdog timer only when there are no pending
1364 * Tx descriptors in queue.
1365 */
1366 if (sc->age_cdata.age_tx_cnt == 0)
1367 ifp->if_timer = 0;
1368 }
1369 }
1370
1371 /* Receive a frame. */
1372 static void
1373 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1374 {
1375 struct ifnet *ifp = &sc->sc_ec.ec_if;
1376 struct age_rxdesc *rxd;
1377 struct rx_desc *desc;
1378 struct mbuf *mp, *m;
1379 uint32_t status, index;
1380 int count, nsegs, pktlen;
1381 int rx_cons;
1382
1383 status = le32toh(rxrd->flags);
1384 index = le32toh(rxrd->index);
1385 rx_cons = AGE_RX_CONS(index);
1386 nsegs = AGE_RX_NSEGS(index);
1387
1388 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1389 if ((status & AGE_RRD_ERROR) != 0 &&
1390 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1391 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1392 /*
1393 * We want to pass the following frames to upper
1394 * layer regardless of error status of Rx return
1395 * ring.
1396 *
1397 * o IP/TCP/UDP checksum is bad.
1398 * o frame length and protocol specific length
1399 * does not match.
1400 */
1401 sc->age_cdata.age_rx_cons += nsegs;
1402 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1403 return;
1404 }
1405
1406 pktlen = 0;
1407 for (count = 0; count < nsegs; count++,
1408 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1409 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1410 mp = rxd->rx_m;
1411 desc = rxd->rx_desc;
1412 /* Add a new receive buffer to the ring. */
1413 if (age_newbuf(sc, rxd, 0) != 0) {
1414 ifp->if_iqdrops++;
1415 /* Reuse Rx buffers. */
1416 if (sc->age_cdata.age_rxhead != NULL) {
1417 m_freem(sc->age_cdata.age_rxhead);
1418 AGE_RXCHAIN_RESET(sc);
1419 }
1420 break;
1421 }
1422
1423 /* The length of the first mbuf is computed last. */
1424 if (count != 0) {
1425 mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1426 pktlen += mp->m_len;
1427 }
1428
1429 /* Chain received mbufs. */
1430 if (sc->age_cdata.age_rxhead == NULL) {
1431 sc->age_cdata.age_rxhead = mp;
1432 sc->age_cdata.age_rxtail = mp;
1433 } else {
1434 m_remove_pkthdr(mp);
1435 sc->age_cdata.age_rxprev_tail =
1436 sc->age_cdata.age_rxtail;
1437 sc->age_cdata.age_rxtail->m_next = mp;
1438 sc->age_cdata.age_rxtail = mp;
1439 }
1440
1441 if (count == nsegs - 1) {
1442 /*
1443 * It seems that L1 controller has no way
1444 * to tell hardware to strip CRC bytes.
1445 */
1446 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1447 if (nsegs > 1) {
1448 /* Remove the CRC bytes in chained mbufs. */
1449 pktlen -= ETHER_CRC_LEN;
1450 if (mp->m_len <= ETHER_CRC_LEN) {
1451 sc->age_cdata.age_rxtail =
1452 sc->age_cdata.age_rxprev_tail;
1453 sc->age_cdata.age_rxtail->m_len -=
1454 (ETHER_CRC_LEN - mp->m_len);
1455 sc->age_cdata.age_rxtail->m_next = NULL;
1456 m_freem(mp);
1457 } else {
1458 mp->m_len -= ETHER_CRC_LEN;
1459 }
1460 }
1461
1462 m = sc->age_cdata.age_rxhead;
1463 KASSERT(m->m_flags & M_PKTHDR);
1464 m_set_rcvif(m, ifp);
1465 m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1466 /* Set the first mbuf length. */
1467 m->m_len = sc->age_cdata.age_rxlen - pktlen;
1468
1469 /*
1470 * Set checksum information.
1471 * It seems that L1 controller can compute partial
1472 * checksum. The partial checksum value can be used
1473 * to accelerate checksum computation for fragmented
1474 * TCP/UDP packets. Upper network stack already
1475 * takes advantage of the partial checksum value in
1476 * IP reassembly stage. But I'm not sure the
1477 * correctness of the partial hardware checksum
1478 * assistance due to lack of data sheet. If it is
1479 * proven to work on L1 I'll enable it.
1480 */
1481 if (status & AGE_RRD_IPV4) {
1482 if (status & AGE_RRD_IPCSUM_NOK)
1483 m->m_pkthdr.csum_flags |=
1484 M_CSUM_IPv4_BAD;
1485 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1486 (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1487 m->m_pkthdr.csum_flags |=
1488 M_CSUM_TCP_UDP_BAD;
1489 }
1490 /*
1491 * Don't mark bad checksum for TCP/UDP frames
1492 * as fragmented frames may always have set
1493 * bad checksummed bit of descriptor status.
1494 */
1495 }
1496 #if NVLAN > 0
1497 /* Check for VLAN tagged frames. */
1498 if (status & AGE_RRD_VLAN) {
1499 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1500 vlan_set_tag(m, AGE_RX_VLAN_TAG(vtag));
1501 }
1502 #endif
1503
1504 /* Pass it on. */
1505 if_percpuq_enqueue(ifp->if_percpuq, m);
1506
1507 /* Reset mbuf chains. */
1508 AGE_RXCHAIN_RESET(sc);
1509 }
1510 }
1511
1512 if (count != nsegs) {
1513 sc->age_cdata.age_rx_cons += nsegs;
1514 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1515 } else
1516 sc->age_cdata.age_rx_cons = rx_cons;
1517 }
1518
1519 static void
1520 age_rxintr(struct age_softc *sc, int rr_prod)
1521 {
1522 struct rx_rdesc *rxrd;
1523 int rr_cons, nsegs, pktlen, prog;
1524
1525 rr_cons = sc->age_cdata.age_rr_cons;
1526 if (rr_cons == rr_prod)
1527 return;
1528
1529 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1530 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1531 BUS_DMASYNC_POSTREAD);
1532
1533 for (prog = 0; rr_cons != rr_prod; prog++) {
1534 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1535 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1536 if (nsegs == 0)
1537 break;
1538 /*
1539 * Check number of segments against received bytes
1540 * Non-matching value would indicate that hardware
1541 * is still trying to update Rx return descriptors.
1542 * I'm not sure whether this check is really needed.
1543 */
1544 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1545 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1546 (MCLBYTES - ETHER_ALIGN)))
1547 break;
1548
1549 /* Received a frame. */
1550 age_rxeof(sc, rxrd);
1551
1552 /* Clear return ring. */
1553 rxrd->index = 0;
1554 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1555 }
1556
1557 if (prog > 0) {
1558 /* Update the consumer index. */
1559 sc->age_cdata.age_rr_cons = rr_cons;
1560
1561 /* Sync descriptors. */
1562 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1563 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1564 BUS_DMASYNC_PREWRITE);
1565
1566 /* Notify hardware availability of new Rx buffers. */
1567 AGE_COMMIT_MBOX(sc);
1568 }
1569 }
1570
1571 static void
1572 age_tick(void *xsc)
1573 {
1574 struct age_softc *sc = xsc;
1575 struct mii_data *mii = &sc->sc_miibus;
1576 int s;
1577
1578 s = splnet();
1579 mii_tick(mii);
1580 splx(s);
1581
1582 callout_schedule(&sc->sc_tick_ch, hz);
1583 }
1584
1585 static void
1586 age_reset(struct age_softc *sc)
1587 {
1588 uint32_t reg;
1589 int i;
1590
1591 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1592 CSR_READ_4(sc, AGE_MASTER_CFG);
1593 DELAY(1000);
1594 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1595 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1596 break;
1597 DELAY(10);
1598 }
1599
1600 if (i == 0)
1601 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1602 reg);
1603
1604 /* Initialize PCIe module. From Linux. */
1605 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1606 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1607 }
1608
1609 static int
1610 age_init(struct ifnet *ifp)
1611 {
1612 struct age_softc *sc = ifp->if_softc;
1613 struct mii_data *mii;
1614 uint8_t eaddr[ETHER_ADDR_LEN];
1615 bus_addr_t paddr;
1616 uint32_t reg, fsize;
1617 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1618 int error;
1619
1620 /*
1621 * Cancel any pending I/O.
1622 */
1623 age_stop(ifp, 0);
1624
1625 /*
1626 * Reset the chip to a known state.
1627 */
1628 age_reset(sc);
1629
1630 /* Initialize descriptors. */
1631 error = age_init_rx_ring(sc);
1632 if (error != 0) {
1633 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1634 age_stop(ifp, 0);
1635 return error;
1636 }
1637 age_init_rr_ring(sc);
1638 age_init_tx_ring(sc);
1639 age_init_cmb_block(sc);
1640 age_init_smb_block(sc);
1641
1642 /* Reprogram the station address. */
1643 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1644 CSR_WRITE_4(sc, AGE_PAR0,
1645 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1646 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1647
1648 /* Set descriptor base addresses. */
1649 paddr = sc->age_rdata.age_tx_ring_paddr;
1650 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1651 paddr = sc->age_rdata.age_rx_ring_paddr;
1652 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1653 paddr = sc->age_rdata.age_rr_ring_paddr;
1654 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1655 paddr = sc->age_rdata.age_tx_ring_paddr;
1656 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1657 paddr = sc->age_rdata.age_cmb_block_paddr;
1658 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1659 paddr = sc->age_rdata.age_smb_block_paddr;
1660 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1661
1662 /* Set Rx/Rx return descriptor counter. */
1663 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1664 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1665 DESC_RRD_CNT_MASK) |
1666 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1667
1668 /* Set Tx descriptor counter. */
1669 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1670 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1671
1672 /* Tell hardware that we're ready to load descriptors. */
1673 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1674
1675 /*
1676 * Initialize mailbox register.
1677 * Updated producer/consumer index information is exchanged
1678 * through this mailbox register. However Tx producer and
1679 * Rx return consumer/Rx producer are all shared such that
1680 * it's hard to separate code path between Tx and Rx without
1681 * locking. If L1 hardware have a separate mail box register
1682 * for Tx and Rx consumer/producer management we could have
1683 * indepent Tx/Rx handler which in turn Rx handler could have
1684 * been run without any locking.
1685 */
1686 AGE_COMMIT_MBOX(sc);
1687
1688 /* Configure IPG/IFG parameters. */
1689 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1690 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1691 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1692 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1693 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1694
1695 /* Set parameters for half-duplex media. */
1696 CSR_WRITE_4(sc, AGE_HDPX_CFG,
1697 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1698 HDPX_CFG_LCOL_MASK) |
1699 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1700 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1701 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1702 HDPX_CFG_ABEBT_MASK) |
1703 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1704 HDPX_CFG_JAMIPG_MASK));
1705
1706 /* Configure interrupt moderation timer. */
1707 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1708 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1709 reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1710 reg &= ~MASTER_MTIMER_ENB;
1711 if (AGE_USECS(sc->age_int_mod) == 0)
1712 reg &= ~MASTER_ITIMER_ENB;
1713 else
1714 reg |= MASTER_ITIMER_ENB;
1715 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1716 if (agedebug)
1717 printf("%s: interrupt moderation is %d us.\n",
1718 device_xname(sc->sc_dev), sc->age_int_mod);
1719 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1720
1721 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1722 if (ifp->if_mtu < ETHERMTU)
1723 sc->age_max_frame_size = ETHERMTU;
1724 else
1725 sc->age_max_frame_size = ifp->if_mtu;
1726 sc->age_max_frame_size += ETHER_HDR_LEN +
1727 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1728 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1729
1730 /* Configure jumbo frame. */
1731 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1732 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1733 (((fsize / sizeof(uint64_t)) <<
1734 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1735 ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1736 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1737 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1738 RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1739
1740 /* Configure flow-control parameters. From Linux. */
1741 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1742 /*
1743 * Magic workaround for old-L1.
1744 * Don't know which hw revision requires this magic.
1745 */
1746 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1747 /*
1748 * Another magic workaround for flow-control mode
1749 * change. From Linux.
1750 */
1751 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1752 }
1753 /*
1754 * TODO
1755 * Should understand pause parameter relationships between FIFO
1756 * size and number of Rx descriptors and Rx return descriptors.
1757 *
1758 * Magic parameters came from Linux.
1759 */
1760 switch (sc->age_chip_rev) {
1761 case 0x8001:
1762 case 0x9001:
1763 case 0x9002:
1764 case 0x9003:
1765 rxf_hi = AGE_RX_RING_CNT / 16;
1766 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1767 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1768 rrd_lo = AGE_RR_RING_CNT / 16;
1769 break;
1770 default:
1771 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1772 rxf_lo = reg / 16;
1773 if (rxf_lo < 192)
1774 rxf_lo = 192;
1775 rxf_hi = (reg * 7) / 8;
1776 if (rxf_hi < rxf_lo)
1777 rxf_hi = rxf_lo + 16;
1778 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1779 rrd_lo = reg / 8;
1780 rrd_hi = (reg * 7) / 8;
1781 if (rrd_lo < 2)
1782 rrd_lo = 2;
1783 if (rrd_hi < rrd_lo)
1784 rrd_hi = rrd_lo + 3;
1785 break;
1786 }
1787 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1788 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1789 RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1790 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1791 RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1792 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1793 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1794 RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1795 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1796 RXQ_RRD_PAUSE_THRESH_HI_MASK));
1797
1798 /* Configure RxQ. */
1799 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1800 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1801 RXQ_CFG_RD_BURST_MASK) |
1802 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1803 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1804 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1805 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1806 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1807
1808 /* Configure TxQ. */
1809 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1810 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1811 TXQ_CFG_TPD_BURST_MASK) |
1812 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1813 TXQ_CFG_TX_FIFO_BURST_MASK) |
1814 ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1815 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1816 TXQ_CFG_ENB);
1817
1818 /* Configure DMA parameters. */
1819 CSR_WRITE_4(sc, AGE_DMA_CFG,
1820 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1821 sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1822 sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1823
1824 /* Configure CMB DMA write threshold. */
1825 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1826 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1827 CMB_WR_THRESH_RRD_MASK) |
1828 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1829 CMB_WR_THRESH_TPD_MASK));
1830
1831 /* Set CMB/SMB timer and enable them. */
1832 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1833 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1834 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1835
1836 /* Request SMB updates for every seconds. */
1837 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1838 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1839
1840 /*
1841 * Disable all WOL bits as WOL can interfere normal Rx
1842 * operation.
1843 */
1844 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1845
1846 /*
1847 * Configure Tx/Rx MACs.
1848 * - Auto-padding for short frames.
1849 * - Enable CRC generation.
1850 * Start with full-duplex/1000Mbps media. Actual reconfiguration
1851 * of MAC is followed after link establishment.
1852 */
1853 CSR_WRITE_4(sc, AGE_MAC_CFG,
1854 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1855 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1856 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1857 MAC_CFG_PREAMBLE_MASK));
1858
1859 /* Set up the receive filter. */
1860 age_rxfilter(sc);
1861 age_rxvlan(sc);
1862
1863 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1864 reg |= MAC_CFG_RXCSUM_ENB;
1865
1866 /* Ack all pending interrupts and clear it. */
1867 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1868 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1869
1870 /* Finally enable Tx/Rx MAC. */
1871 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1872
1873 sc->age_flags &= ~AGE_FLAG_LINK;
1874
1875 /* Switch to the current media. */
1876 mii = &sc->sc_miibus;
1877 mii_mediachg(mii);
1878
1879 callout_schedule(&sc->sc_tick_ch, hz);
1880
1881 ifp->if_flags |= IFF_RUNNING;
1882 ifp->if_flags &= ~IFF_OACTIVE;
1883
1884 return 0;
1885 }
1886
1887 static void
1888 age_stop(struct ifnet *ifp, int disable)
1889 {
1890 struct age_softc *sc = ifp->if_softc;
1891 struct age_txdesc *txd;
1892 struct age_rxdesc *rxd;
1893 uint32_t reg;
1894 int i;
1895
1896 callout_stop(&sc->sc_tick_ch);
1897
1898 /*
1899 * Mark the interface down and cancel the watchdog timer.
1900 */
1901 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1902 ifp->if_timer = 0;
1903
1904 sc->age_flags &= ~AGE_FLAG_LINK;
1905
1906 mii_down(&sc->sc_miibus);
1907
1908 /*
1909 * Disable interrupts.
1910 */
1911 CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1912 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1913
1914 /* Stop CMB/SMB updates. */
1915 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1916
1917 /* Stop Rx/Tx MAC. */
1918 age_stop_rxmac(sc);
1919 age_stop_txmac(sc);
1920
1921 /* Stop DMA. */
1922 CSR_WRITE_4(sc, AGE_DMA_CFG,
1923 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1924
1925 /* Stop TxQ/RxQ. */
1926 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1927 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1928 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1929 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1930 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1931 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1932 break;
1933 DELAY(10);
1934 }
1935 if (i == 0)
1936 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1937 device_xname(sc->sc_dev), reg);
1938
1939 /* Reclaim Rx buffers that have been processed. */
1940 if (sc->age_cdata.age_rxhead != NULL)
1941 m_freem(sc->age_cdata.age_rxhead);
1942 AGE_RXCHAIN_RESET(sc);
1943
1944 /*
1945 * Free RX and TX mbufs still in the queues.
1946 */
1947 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1948 rxd = &sc->age_cdata.age_rxdesc[i];
1949 if (rxd->rx_m != NULL) {
1950 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1951 m_freem(rxd->rx_m);
1952 rxd->rx_m = NULL;
1953 }
1954 }
1955 for (i = 0; i < AGE_TX_RING_CNT; i++) {
1956 txd = &sc->age_cdata.age_txdesc[i];
1957 if (txd->tx_m != NULL) {
1958 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1959 m_freem(txd->tx_m);
1960 txd->tx_m = NULL;
1961 }
1962 }
1963 }
1964
1965 static void
1966 age_stats_update(struct age_softc *sc)
1967 {
1968 struct ifnet *ifp = &sc->sc_ec.ec_if;
1969 struct age_stats *stat;
1970 struct smb *smb;
1971
1972 stat = &sc->age_stat;
1973
1974 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1975 sc->age_cdata.age_smb_block_map->dm_mapsize,
1976 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1977
1978 smb = sc->age_rdata.age_smb_block;
1979 if (smb->updated == 0)
1980 return;
1981
1982 /* Rx stats. */
1983 stat->rx_frames += smb->rx_frames;
1984 stat->rx_bcast_frames += smb->rx_bcast_frames;
1985 stat->rx_mcast_frames += smb->rx_mcast_frames;
1986 stat->rx_pause_frames += smb->rx_pause_frames;
1987 stat->rx_control_frames += smb->rx_control_frames;
1988 stat->rx_crcerrs += smb->rx_crcerrs;
1989 stat->rx_lenerrs += smb->rx_lenerrs;
1990 stat->rx_bytes += smb->rx_bytes;
1991 stat->rx_runts += smb->rx_runts;
1992 stat->rx_fragments += smb->rx_fragments;
1993 stat->rx_pkts_64 += smb->rx_pkts_64;
1994 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1995 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1996 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1997 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1998 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1999 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2000 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2001 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2002 stat->rx_desc_oflows += smb->rx_desc_oflows;
2003 stat->rx_alignerrs += smb->rx_alignerrs;
2004 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2005 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2006 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2007
2008 /* Tx stats. */
2009 stat->tx_frames += smb->tx_frames;
2010 stat->tx_bcast_frames += smb->tx_bcast_frames;
2011 stat->tx_mcast_frames += smb->tx_mcast_frames;
2012 stat->tx_pause_frames += smb->tx_pause_frames;
2013 stat->tx_excess_defer += smb->tx_excess_defer;
2014 stat->tx_control_frames += smb->tx_control_frames;
2015 stat->tx_deferred += smb->tx_deferred;
2016 stat->tx_bytes += smb->tx_bytes;
2017 stat->tx_pkts_64 += smb->tx_pkts_64;
2018 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2019 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2020 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2021 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2022 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2023 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2024 stat->tx_single_colls += smb->tx_single_colls;
2025 stat->tx_multi_colls += smb->tx_multi_colls;
2026 stat->tx_late_colls += smb->tx_late_colls;
2027 stat->tx_excess_colls += smb->tx_excess_colls;
2028 stat->tx_underrun += smb->tx_underrun;
2029 stat->tx_desc_underrun += smb->tx_desc_underrun;
2030 stat->tx_lenerrs += smb->tx_lenerrs;
2031 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2032 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2033 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2034
2035 /* Update counters in ifnet. */
2036 ifp->if_opackets += smb->tx_frames;
2037
2038 ifp->if_collisions += smb->tx_single_colls +
2039 smb->tx_multi_colls + smb->tx_late_colls +
2040 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2041
2042 ifp->if_oerrors += smb->tx_excess_colls +
2043 smb->tx_late_colls + smb->tx_underrun +
2044 smb->tx_pkts_truncated;
2045
2046 ifp->if_ipackets += smb->rx_frames;
2047
2048 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2049 smb->rx_runts + smb->rx_pkts_truncated +
2050 smb->rx_fifo_oflows + smb->rx_desc_oflows +
2051 smb->rx_alignerrs;
2052
2053 /* Update done, clear. */
2054 smb->updated = 0;
2055
2056 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2057 sc->age_cdata.age_smb_block_map->dm_mapsize,
2058 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2059 }
2060
2061 static void
2062 age_stop_txmac(struct age_softc *sc)
2063 {
2064 uint32_t reg;
2065 int i;
2066
2067 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2068 if ((reg & MAC_CFG_TX_ENB) != 0) {
2069 reg &= ~MAC_CFG_TX_ENB;
2070 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2071 }
2072 /* Stop Tx DMA engine. */
2073 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2074 if ((reg & DMA_CFG_RD_ENB) != 0) {
2075 reg &= ~DMA_CFG_RD_ENB;
2076 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2077 }
2078 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2079 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2080 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2081 break;
2082 DELAY(10);
2083 }
2084 if (i == 0)
2085 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2086 }
2087
2088 static void
2089 age_stop_rxmac(struct age_softc *sc)
2090 {
2091 uint32_t reg;
2092 int i;
2093
2094 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2095 if ((reg & MAC_CFG_RX_ENB) != 0) {
2096 reg &= ~MAC_CFG_RX_ENB;
2097 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2098 }
2099 /* Stop Rx DMA engine. */
2100 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2101 if ((reg & DMA_CFG_WR_ENB) != 0) {
2102 reg &= ~DMA_CFG_WR_ENB;
2103 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2104 }
2105 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2106 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2107 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2108 break;
2109 DELAY(10);
2110 }
2111 if (i == 0)
2112 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2113 }
2114
2115 static void
2116 age_init_tx_ring(struct age_softc *sc)
2117 {
2118 struct age_ring_data *rd;
2119 struct age_txdesc *txd;
2120 int i;
2121
2122 sc->age_cdata.age_tx_prod = 0;
2123 sc->age_cdata.age_tx_cons = 0;
2124 sc->age_cdata.age_tx_cnt = 0;
2125
2126 rd = &sc->age_rdata;
2127 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2128 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2129 txd = &sc->age_cdata.age_txdesc[i];
2130 txd->tx_desc = &rd->age_tx_ring[i];
2131 txd->tx_m = NULL;
2132 }
2133 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2134 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2135 }
2136
2137 static int
2138 age_init_rx_ring(struct age_softc *sc)
2139 {
2140 struct age_ring_data *rd;
2141 struct age_rxdesc *rxd;
2142 int i;
2143
2144 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2145 rd = &sc->age_rdata;
2146 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2147 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2148 rxd = &sc->age_cdata.age_rxdesc[i];
2149 rxd->rx_m = NULL;
2150 rxd->rx_desc = &rd->age_rx_ring[i];
2151 if (age_newbuf(sc, rxd, 1) != 0)
2152 return ENOBUFS;
2153 }
2154
2155 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2156 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2157
2158 return 0;
2159 }
2160
2161 static void
2162 age_init_rr_ring(struct age_softc *sc)
2163 {
2164 struct age_ring_data *rd;
2165
2166 sc->age_cdata.age_rr_cons = 0;
2167 AGE_RXCHAIN_RESET(sc);
2168
2169 rd = &sc->age_rdata;
2170 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2171 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2172 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2173 }
2174
2175 static void
2176 age_init_cmb_block(struct age_softc *sc)
2177 {
2178 struct age_ring_data *rd;
2179
2180 rd = &sc->age_rdata;
2181 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2182 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2183 sc->age_cdata.age_cmb_block_map->dm_mapsize,
2184 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2185 }
2186
2187 static void
2188 age_init_smb_block(struct age_softc *sc)
2189 {
2190 struct age_ring_data *rd;
2191
2192 rd = &sc->age_rdata;
2193 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2194 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2195 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2196 }
2197
2198 static int
2199 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2200 {
2201 struct rx_desc *desc;
2202 struct mbuf *m;
2203 bus_dmamap_t map;
2204 int error;
2205
2206 MGETHDR(m, M_DONTWAIT, MT_DATA);
2207 if (m == NULL)
2208 return ENOBUFS;
2209 MCLGET(m, M_DONTWAIT);
2210 if (!(m->m_flags & M_EXT)) {
2211 m_freem(m);
2212 return ENOBUFS;
2213 }
2214
2215 m->m_len = m->m_pkthdr.len = MCLBYTES;
2216 m_adj(m, ETHER_ALIGN);
2217
2218 error = bus_dmamap_load_mbuf(sc->sc_dmat,
2219 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2220
2221 if (error != 0) {
2222 m_freem(m);
2223
2224 if (init)
2225 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2226 return error;
2227 }
2228
2229 if (rxd->rx_m != NULL) {
2230 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2231 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2232 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2233 }
2234 map = rxd->rx_dmamap;
2235 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2236 sc->age_cdata.age_rx_sparemap = map;
2237 rxd->rx_m = m;
2238
2239 desc = rxd->rx_desc;
2240 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2241 desc->len =
2242 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2243 AGE_RD_LEN_SHIFT);
2244
2245 return 0;
2246 }
2247
2248 static void
2249 age_rxvlan(struct age_softc *sc)
2250 {
2251 uint32_t reg;
2252
2253 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2254 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2255 if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2256 reg |= MAC_CFG_VLAN_TAG_STRIP;
2257 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2258 }
2259
2260 static void
2261 age_rxfilter(struct age_softc *sc)
2262 {
2263 struct ethercom *ec = &sc->sc_ec;
2264 struct ifnet *ifp = &sc->sc_ec.ec_if;
2265 struct ether_multi *enm;
2266 struct ether_multistep step;
2267 uint32_t crc;
2268 uint32_t mchash[2];
2269 uint32_t rxcfg;
2270
2271 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2272 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2273 ifp->if_flags &= ~IFF_ALLMULTI;
2274
2275 /*
2276 * Always accept broadcast frames.
2277 */
2278 rxcfg |= MAC_CFG_BCAST;
2279
2280 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2281 ifp->if_flags |= IFF_ALLMULTI;
2282 if (ifp->if_flags & IFF_PROMISC)
2283 rxcfg |= MAC_CFG_PROMISC;
2284 else
2285 rxcfg |= MAC_CFG_ALLMULTI;
2286 mchash[0] = mchash[1] = 0xFFFFFFFF;
2287 } else {
2288 /* Program new filter. */
2289 memset(mchash, 0, sizeof(mchash));
2290
2291 ETHER_LOCK(ec);
2292 ETHER_FIRST_MULTI(step, ec, enm);
2293 while (enm != NULL) {
2294 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2295 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2296 ETHER_NEXT_MULTI(step, enm);
2297 }
2298 ETHER_UNLOCK(ec);
2299 }
2300
2301 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2302 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2303 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2304 }
2305