if_age.c revision 1.28.2.6 1 /* $NetBSD: if_age.c,v 1.28.2.6 2011/11/18 23:25:40 sborrill Exp $ */
2 /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */
3
4 /*-
5 * Copyright (c) 2008, Pyun YongHyeon <yongari (at) FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.28.2.6 2011/11/18 23:25:40 sborrill Exp $");
35
36 #include "bpfilter.h"
37 #include "vlan.h"
38
39 #include <sys/param.h>
40 #include <sys/proc.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/types.h>
44 #include <sys/sockio.h>
45 #include <sys/mbuf.h>
46 #include <sys/queue.h>
47 #include <sys/kernel.h>
48 #include <sys/device.h>
49 #include <sys/callout.h>
50 #include <sys/socket.h>
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #endif
63
64 #include <net/if_types.h>
65 #include <net/if_vlanvar.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #include <sys/rnd.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79
80 #include <dev/pci/if_agereg.h>
81
82 static int age_match(device_t, cfdata_t, void *);
83 static void age_attach(device_t, device_t, void *);
84 static int age_detach(device_t, int);
85
86 static bool age_resume(device_t PMF_FN_PROTO);
87
88 static int age_miibus_readreg(device_t, int, int);
89 static void age_miibus_writereg(device_t, int, int, int);
90 static void age_miibus_statchg(device_t);
91
92 static int age_init(struct ifnet *);
93 static int age_ioctl(struct ifnet *, u_long, void *);
94 static void age_start(struct ifnet *);
95 static void age_watchdog(struct ifnet *);
96 static bool age_shutdown(device_t, int);
97 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
98 static int age_mediachange(struct ifnet *);
99
100 static int age_intr(void *);
101 static int age_dma_alloc(struct age_softc *);
102 static void age_dma_free(struct age_softc *);
103 static void age_get_macaddr(struct age_softc *, uint8_t[]);
104 static void age_phy_reset(struct age_softc *);
105
106 static int age_encap(struct age_softc *, struct mbuf **);
107 static void age_init_tx_ring(struct age_softc *);
108 static int age_init_rx_ring(struct age_softc *);
109 static void age_init_rr_ring(struct age_softc *);
110 static void age_init_cmb_block(struct age_softc *);
111 static void age_init_smb_block(struct age_softc *);
112 static int age_newbuf(struct age_softc *, struct age_rxdesc *, int);
113 static void age_mac_config(struct age_softc *);
114 static void age_txintr(struct age_softc *, int);
115 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
116 static void age_rxintr(struct age_softc *, int);
117 static void age_tick(void *);
118 static void age_reset(struct age_softc *);
119 static void age_stop(struct ifnet *, int);
120 static void age_stats_update(struct age_softc *);
121 static void age_stop_txmac(struct age_softc *);
122 static void age_stop_rxmac(struct age_softc *);
123 static void age_rxvlan(struct age_softc *sc);
124 static void age_rxfilter(struct age_softc *);
125
126 CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
127 age_match, age_attach, age_detach, NULL);
128
129 int agedebug = 0;
130 #define DPRINTF(x) do { if (agedebug) printf x; } while (0)
131
132 #define ETHER_ALIGN 2
133 #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
134
135 static int
136 age_match(device_t dev, cfdata_t match, void *aux)
137 {
138 struct pci_attach_args *pa = aux;
139
140 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
141 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
142 }
143
144 static void
145 age_attach(device_t parent, device_t self, void *aux)
146 {
147 struct age_softc *sc = device_private(self);
148 struct pci_attach_args *pa = aux;
149 pci_intr_handle_t ih;
150 const char *intrstr;
151 struct ifnet *ifp = &sc->sc_ec.ec_if;
152 pcireg_t memtype;
153 int error = 0;
154
155 aprint_naive("\n");
156 aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
157
158 sc->sc_dev = self;
159 sc->sc_dmat = pa->pa_dmat;
160 sc->sc_pct = pa->pa_pc;
161 sc->sc_pcitag = pa->pa_tag;
162
163 /*
164 * Allocate IO memory
165 */
166 memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
167 switch (memtype) {
168 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
169 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
170 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
171 break;
172 default:
173 aprint_error_dev(self, "invalid base address register\n");
174 break;
175 }
176
177 if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
178 &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
179 aprint_error_dev(self, "could not map mem space\n");
180 return;
181 }
182
183 if (pci_intr_map(pa, &ih) != 0) {
184 aprint_error_dev(self, "could not map interrupt\n");
185 goto fail;
186 }
187
188 /*
189 * Allocate IRQ
190 */
191 intrstr = pci_intr_string(sc->sc_pct, ih);
192 sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
193 age_intr, sc);
194 if (sc->sc_irq_handle == NULL) {
195 aprint_error_dev(self, "could not establish interrupt");
196 if (intrstr != NULL)
197 aprint_error(" at %s", intrstr);
198 aprint_error("\n");
199 goto fail;
200 }
201 aprint_normal_dev(self, "%s\n", intrstr);
202
203 /* Set PHY address. */
204 sc->age_phyaddr = AGE_PHY_ADDR;
205
206 /* Reset PHY. */
207 age_phy_reset(sc);
208
209 /* Reset the ethernet controller. */
210 age_reset(sc);
211
212 /* Get PCI and chip id/revision. */
213 sc->age_rev = PCI_REVISION(pa->pa_class);
214 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
215 MASTER_CHIP_REV_SHIFT;
216
217 aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
218 aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
219
220 if (agedebug) {
221 aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
222 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
223 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
224 }
225
226 /* Set max allowable DMA size. */
227 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
228 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
229
230 /* Allocate DMA stuffs */
231 error = age_dma_alloc(sc);
232 if (error)
233 goto fail;
234
235 callout_init(&sc->sc_tick_ch, 0);
236 callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
237
238 /* Load station address. */
239 age_get_macaddr(sc, sc->sc_enaddr);
240
241 aprint_normal_dev(self, "Ethernet address %s\n",
242 ether_sprintf(sc->sc_enaddr));
243
244 ifp->if_softc = sc;
245 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
246 ifp->if_init = age_init;
247 ifp->if_ioctl = age_ioctl;
248 ifp->if_start = age_start;
249 ifp->if_stop = age_stop;
250 ifp->if_watchdog = age_watchdog;
251 ifp->if_baudrate = IF_Gbps(1);
252 IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
253 IFQ_SET_READY(&ifp->if_snd);
254 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
255
256 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
257
258 #ifdef AGE_CHECKSUM
259 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
260 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
261 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
262 #endif
263
264 #if NVLAN > 0
265 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
266 #endif
267
268 /* Set up MII bus. */
269 sc->sc_miibus.mii_ifp = ifp;
270 sc->sc_miibus.mii_readreg = age_miibus_readreg;
271 sc->sc_miibus.mii_writereg = age_miibus_writereg;
272 sc->sc_miibus.mii_statchg = age_miibus_statchg;
273
274 sc->sc_ec.ec_mii = &sc->sc_miibus;
275 ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
276 age_mediastatus);
277 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
278 MII_OFFSET_ANY, MIIF_DOPAUSE);
279
280 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
281 aprint_error_dev(self, "no PHY found!\n");
282 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
283 0, NULL);
284 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
285 } else
286 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
287
288 if_attach(ifp);
289 ether_ifattach(ifp, sc->sc_enaddr);
290
291 if (!pmf_device_register1(self, NULL, age_resume, age_shutdown))
292 aprint_error_dev(self, "couldn't establish power handler\n");
293 else
294 pmf_class_network_register(self, ifp);
295
296 return;
297
298 fail:
299 age_dma_free(sc);
300 if (sc->sc_irq_handle != NULL) {
301 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
302 sc->sc_irq_handle = NULL;
303 }
304 if (sc->sc_mem_size) {
305 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
306 sc->sc_mem_size = 0;
307 }
308 }
309
310 static int
311 age_detach(device_t self, int flags)
312 {
313 struct age_softc *sc = device_private(self);
314 struct ifnet *ifp = &sc->sc_ec.ec_if;
315 int s;
316
317 pmf_device_deregister(self);
318 s = splnet();
319 age_stop(ifp, 0);
320 splx(s);
321
322 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
323
324 /* Delete all remaining media. */
325 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
326
327 ether_ifdetach(ifp);
328 if_detach(ifp);
329 age_dma_free(sc);
330
331 if (sc->sc_irq_handle != NULL) {
332 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
333 sc->sc_irq_handle = NULL;
334 }
335 if (sc->sc_mem_size) {
336 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
337 sc->sc_mem_size = 0;
338 }
339 return 0;
340 }
341
342 /*
343 * Read a PHY register on the MII of the L1.
344 */
345 static int
346 age_miibus_readreg(device_t dev, int phy, int reg)
347 {
348 struct age_softc *sc = device_private(dev);
349 uint32_t v;
350 int i;
351
352 if (phy != sc->age_phyaddr)
353 return 0;
354
355 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
356 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
357 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
358 DELAY(1);
359 v = CSR_READ_4(sc, AGE_MDIO);
360 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
361 break;
362 }
363
364 if (i == 0) {
365 printf("%s: phy read timeout: phy %d, reg %d\n",
366 device_xname(sc->sc_dev), phy, reg);
367 return 0;
368 }
369
370 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
371 }
372
373 /*
374 * Write a PHY register on the MII of the L1.
375 */
376 static void
377 age_miibus_writereg(device_t dev, int phy, int reg, int val)
378 {
379 struct age_softc *sc = device_private(dev);
380 uint32_t v;
381 int i;
382
383 if (phy != sc->age_phyaddr)
384 return;
385
386 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
387 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
388 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
389
390 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
391 DELAY(1);
392 v = CSR_READ_4(sc, AGE_MDIO);
393 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
394 break;
395 }
396
397 if (i == 0) {
398 printf("%s: phy write timeout: phy %d, reg %d\n",
399 device_xname(sc->sc_dev), phy, reg);
400 }
401 }
402
403 /*
404 * Callback from MII layer when media changes.
405 */
406 static void
407 age_miibus_statchg(device_t dev)
408 {
409 struct age_softc *sc = device_private(dev);
410 struct ifnet *ifp = &sc->sc_ec.ec_if;
411 struct mii_data *mii;
412
413 if ((ifp->if_flags & IFF_RUNNING) == 0)
414 return;
415
416 mii = &sc->sc_miibus;
417
418 sc->age_flags &= ~AGE_FLAG_LINK;
419 if ((mii->mii_media_status & IFM_AVALID) != 0) {
420 switch (IFM_SUBTYPE(mii->mii_media_active)) {
421 case IFM_10_T:
422 case IFM_100_TX:
423 case IFM_1000_T:
424 sc->age_flags |= AGE_FLAG_LINK;
425 break;
426 default:
427 break;
428 }
429 }
430
431 /* Stop Rx/Tx MACs. */
432 age_stop_rxmac(sc);
433 age_stop_txmac(sc);
434
435 /* Program MACs with resolved speed/duplex/flow-control. */
436 if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
437 uint32_t reg;
438
439 age_mac_config(sc);
440 reg = CSR_READ_4(sc, AGE_MAC_CFG);
441 /* Restart DMA engine and Tx/Rx MAC. */
442 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
443 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
444 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
445 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
446 }
447 }
448
449 /*
450 * Get the current interface media status.
451 */
452 static void
453 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
454 {
455 struct age_softc *sc = ifp->if_softc;
456 struct mii_data *mii = &sc->sc_miibus;
457
458 mii_pollstat(mii);
459 ifmr->ifm_status = mii->mii_media_status;
460 ifmr->ifm_active = mii->mii_media_active;
461 }
462
463 /*
464 * Set hardware to newly-selected media.
465 */
466 static int
467 age_mediachange(struct ifnet *ifp)
468 {
469 struct age_softc *sc = ifp->if_softc;
470 struct mii_data *mii = &sc->sc_miibus;
471 int error;
472
473 if (mii->mii_instance != 0) {
474 struct mii_softc *miisc;
475
476 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
477 mii_phy_reset(miisc);
478 }
479 error = mii_mediachg(mii);
480
481 return error;
482 }
483
484 static int
485 age_intr(void *arg)
486 {
487 struct age_softc *sc = arg;
488 struct ifnet *ifp = &sc->sc_ec.ec_if;
489 struct cmb *cmb;
490 uint32_t status;
491
492 status = CSR_READ_4(sc, AGE_INTR_STATUS);
493 if (status == 0 || (status & AGE_INTRS) == 0)
494 return 0;
495
496 cmb = sc->age_rdata.age_cmb_block;
497 if (cmb == NULL) {
498 /* Happens when bringing up the interface
499 * w/o having a carrier. Ack the interrupt.
500 */
501 CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
502 return 0;
503 }
504
505 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
506 sc->age_cdata.age_cmb_block_map->dm_mapsize,
507 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
508 status = le32toh(cmb->intr_status);
509 /* ACK/reenable interrupts */
510 CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
511 while ((status & AGE_INTRS) != 0) {
512 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
513 TPD_CONS_SHIFT;
514 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
515 RRD_PROD_SHIFT;
516
517 /* Let hardware know CMB was served. */
518 cmb->intr_status = 0;
519 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
520 sc->age_cdata.age_cmb_block_map->dm_mapsize,
521 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
522
523 if (ifp->if_flags & IFF_RUNNING) {
524 if (status & INTR_CMB_RX)
525 age_rxintr(sc, sc->age_rr_prod);
526
527 if (status & INTR_CMB_TX)
528 age_txintr(sc, sc->age_tpd_cons);
529
530 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
531 if (status & INTR_DMA_RD_TO_RST)
532 printf("%s: DMA read error! -- "
533 "resetting\n",
534 device_xname(sc->sc_dev));
535 if (status & INTR_DMA_WR_TO_RST)
536 printf("%s: DMA write error! -- "
537 "resetting\n",
538 device_xname(sc->sc_dev));
539 age_init(ifp);
540 }
541
542 age_start(ifp);
543
544 if (status & INTR_SMB)
545 age_stats_update(sc);
546 }
547 /* check if more interrupts did came in */
548 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
549 sc->age_cdata.age_cmb_block_map->dm_mapsize,
550 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
551 status = le32toh(cmb->intr_status);
552 }
553
554 return 1;
555 }
556
557 static void
558 age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
559 {
560 uint32_t ea[2], reg;
561 int i, vpdc;
562
563 reg = CSR_READ_4(sc, AGE_SPI_CTRL);
564 if ((reg & SPI_VPD_ENB) != 0) {
565 /* Get VPD stored in TWSI EEPROM. */
566 reg &= ~SPI_VPD_ENB;
567 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
568 }
569
570 if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
571 PCI_CAP_VPD, &vpdc, NULL)) {
572 /*
573 * PCI VPD capability found, let TWSI reload EEPROM.
574 * This will set Ethernet address of controller.
575 */
576 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
577 TWSI_CTRL_SW_LD_START);
578 for (i = 100; i > 0; i++) {
579 DELAY(1000);
580 reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
581 if ((reg & TWSI_CTRL_SW_LD_START) == 0)
582 break;
583 }
584 if (i == 0)
585 printf("%s: reloading EEPROM timeout!\n",
586 device_xname(sc->sc_dev));
587 } else {
588 if (agedebug)
589 printf("%s: PCI VPD capability not found!\n",
590 device_xname(sc->sc_dev));
591 }
592
593 ea[0] = CSR_READ_4(sc, AGE_PAR0);
594 ea[1] = CSR_READ_4(sc, AGE_PAR1);
595
596 eaddr[0] = (ea[1] >> 8) & 0xFF;
597 eaddr[1] = (ea[1] >> 0) & 0xFF;
598 eaddr[2] = (ea[0] >> 24) & 0xFF;
599 eaddr[3] = (ea[0] >> 16) & 0xFF;
600 eaddr[4] = (ea[0] >> 8) & 0xFF;
601 eaddr[5] = (ea[0] >> 0) & 0xFF;
602 }
603
604 static void
605 age_phy_reset(struct age_softc *sc)
606 {
607 uint16_t reg, pn;
608 int i, linkup;
609
610 /* Reset PHY. */
611 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
612 DELAY(2000);
613 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
614 DELAY(2000);
615
616 #define ATPHY_DBG_ADDR 0x1D
617 #define ATPHY_DBG_DATA 0x1E
618 #define ATPHY_CDTC 0x16
619 #define PHY_CDTC_ENB 0x0001
620 #define PHY_CDTC_POFF 8
621 #define ATPHY_CDTS 0x1C
622 #define PHY_CDTS_STAT_OK 0x0000
623 #define PHY_CDTS_STAT_SHORT 0x0100
624 #define PHY_CDTS_STAT_OPEN 0x0200
625 #define PHY_CDTS_STAT_INVAL 0x0300
626 #define PHY_CDTS_STAT_MASK 0x0300
627
628 /* Check power saving mode. Magic from Linux. */
629 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
630 for (linkup = 0, pn = 0; pn < 4; pn++) {
631 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
632 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
633 for (i = 200; i > 0; i--) {
634 DELAY(1000);
635 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
636 ATPHY_CDTC);
637 if ((reg & PHY_CDTC_ENB) == 0)
638 break;
639 }
640 DELAY(1000);
641 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
642 ATPHY_CDTS);
643 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
644 linkup++;
645 break;
646 }
647 }
648 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
649 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
650 if (linkup == 0) {
651 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
652 ATPHY_DBG_ADDR, 0);
653 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
654 ATPHY_DBG_DATA, 0x124E);
655 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
656 ATPHY_DBG_ADDR, 1);
657 reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
658 ATPHY_DBG_DATA);
659 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
660 ATPHY_DBG_DATA, reg | 0x03);
661 /* XXX */
662 DELAY(1500 * 1000);
663 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
664 ATPHY_DBG_ADDR, 0);
665 age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
666 ATPHY_DBG_DATA, 0x024E);
667 }
668
669 #undef ATPHY_DBG_ADDR
670 #undef ATPHY_DBG_DATA
671 #undef ATPHY_CDTC
672 #undef PHY_CDTC_ENB
673 #undef PHY_CDTC_POFF
674 #undef ATPHY_CDTS
675 #undef PHY_CDTS_STAT_OK
676 #undef PHY_CDTS_STAT_SHORT
677 #undef PHY_CDTS_STAT_OPEN
678 #undef PHY_CDTS_STAT_INVAL
679 #undef PHY_CDTS_STAT_MASK
680 }
681
682 static int
683 age_dma_alloc(struct age_softc *sc)
684 {
685 struct age_txdesc *txd;
686 struct age_rxdesc *rxd;
687 int nsegs, error, i;
688
689 /*
690 * Create DMA stuffs for TX ring
691 */
692 error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
693 AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
694 if (error) {
695 sc->age_cdata.age_tx_ring_map = NULL;
696 return ENOBUFS;
697 }
698
699 /* Allocate DMA'able memory for TX ring */
700 error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
701 ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1,
702 &nsegs, BUS_DMA_NOWAIT);
703 if (error) {
704 printf("%s: could not allocate DMA'able memory for Tx ring, "
705 "error = %i\n", device_xname(sc->sc_dev), error);
706 return error;
707 }
708
709 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
710 nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
711 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
712 if (error)
713 return ENOBUFS;
714
715 memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
716
717 /* Load the DMA map for Tx ring. */
718 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
719 sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT);
720 if (error) {
721 printf("%s: could not load DMA'able memory for Tx ring, "
722 "error = %i\n", device_xname(sc->sc_dev), error);
723 bus_dmamem_free(sc->sc_dmat,
724 &sc->age_rdata.age_tx_ring_seg, 1);
725 return error;
726 }
727
728 sc->age_rdata.age_tx_ring_paddr =
729 sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
730
731 /*
732 * Create DMA stuffs for RX ring
733 */
734 error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
735 AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
736 if (error) {
737 sc->age_cdata.age_rx_ring_map = NULL;
738 return ENOBUFS;
739 }
740
741 /* Allocate DMA'able memory for RX ring */
742 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
743 ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1,
744 &nsegs, BUS_DMA_NOWAIT);
745 if (error) {
746 printf("%s: could not allocate DMA'able memory for Rx ring, "
747 "error = %i.\n", device_xname(sc->sc_dev), error);
748 return error;
749 }
750
751 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
752 nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
753 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
754 if (error)
755 return ENOBUFS;
756
757 memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
758
759 /* Load the DMA map for Rx ring. */
760 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
761 sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT);
762 if (error) {
763 printf("%s: could not load DMA'able memory for Rx ring, "
764 "error = %i.\n", device_xname(sc->sc_dev), error);
765 bus_dmamem_free(sc->sc_dmat,
766 &sc->age_rdata.age_rx_ring_seg, 1);
767 return error;
768 }
769
770 sc->age_rdata.age_rx_ring_paddr =
771 sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
772
773 /*
774 * Create DMA stuffs for RX return ring
775 */
776 error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
777 AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
778 if (error) {
779 sc->age_cdata.age_rr_ring_map = NULL;
780 return ENOBUFS;
781 }
782
783 /* Allocate DMA'able memory for RX return ring */
784 error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
785 ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1,
786 &nsegs, BUS_DMA_NOWAIT);
787 if (error) {
788 printf("%s: could not allocate DMA'able memory for Rx "
789 "return ring, error = %i.\n",
790 device_xname(sc->sc_dev), error);
791 return error;
792 }
793
794 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
795 nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
796 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
797 if (error)
798 return ENOBUFS;
799
800 memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
801
802 /* Load the DMA map for Rx return ring. */
803 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
804 sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_NOWAIT);
805 if (error) {
806 printf("%s: could not load DMA'able memory for Rx return ring, "
807 "error = %i\n", device_xname(sc->sc_dev), error);
808 bus_dmamem_free(sc->sc_dmat,
809 &sc->age_rdata.age_rr_ring_seg, 1);
810 return error;
811 }
812
813 sc->age_rdata.age_rr_ring_paddr =
814 sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
815
816 /*
817 * Create DMA stuffs for CMB block
818 */
819 error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
820 AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
821 &sc->age_cdata.age_cmb_block_map);
822 if (error) {
823 sc->age_cdata.age_cmb_block_map = NULL;
824 return ENOBUFS;
825 }
826
827 /* Allocate DMA'able memory for CMB block */
828 error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
829 ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1,
830 &nsegs, BUS_DMA_NOWAIT);
831 if (error) {
832 printf("%s: could not allocate DMA'able memory for "
833 "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
834 return error;
835 }
836
837 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
838 nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
839 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
840 if (error)
841 return ENOBUFS;
842
843 memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
844
845 /* Load the DMA map for CMB block. */
846 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
847 sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
848 BUS_DMA_NOWAIT);
849 if (error) {
850 printf("%s: could not load DMA'able memory for CMB block, "
851 "error = %i\n", device_xname(sc->sc_dev), error);
852 bus_dmamem_free(sc->sc_dmat,
853 &sc->age_rdata.age_cmb_block_seg, 1);
854 return error;
855 }
856
857 sc->age_rdata.age_cmb_block_paddr =
858 sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
859
860 /*
861 * Create DMA stuffs for SMB block
862 */
863 error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
864 AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
865 &sc->age_cdata.age_smb_block_map);
866 if (error) {
867 sc->age_cdata.age_smb_block_map = NULL;
868 return ENOBUFS;
869 }
870
871 /* Allocate DMA'able memory for SMB block */
872 error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
873 ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1,
874 &nsegs, BUS_DMA_NOWAIT);
875 if (error) {
876 printf("%s: could not allocate DMA'able memory for "
877 "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
878 return error;
879 }
880
881 error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
882 nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
883 BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
884 if (error)
885 return ENOBUFS;
886
887 memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
888
889 /* Load the DMA map for SMB block */
890 error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
891 sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
892 BUS_DMA_NOWAIT);
893 if (error) {
894 printf("%s: could not load DMA'able memory for SMB block, "
895 "error = %i\n", device_xname(sc->sc_dev), error);
896 bus_dmamem_free(sc->sc_dmat,
897 &sc->age_rdata.age_smb_block_seg, 1);
898 return error;
899 }
900
901 sc->age_rdata.age_smb_block_paddr =
902 sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
903
904 /* Create DMA maps for Tx buffers. */
905 for (i = 0; i < AGE_TX_RING_CNT; i++) {
906 txd = &sc->age_cdata.age_txdesc[i];
907 txd->tx_m = NULL;
908 txd->tx_dmamap = NULL;
909 error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
910 AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
911 &txd->tx_dmamap);
912 if (error) {
913 txd->tx_dmamap = NULL;
914 printf("%s: could not create Tx dmamap, error = %i.\n",
915 device_xname(sc->sc_dev), error);
916 return error;
917 }
918 }
919
920 /* Create DMA maps for Rx buffers. */
921 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
922 BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
923 if (error) {
924 sc->age_cdata.age_rx_sparemap = NULL;
925 printf("%s: could not create spare Rx dmamap, error = %i.\n",
926 device_xname(sc->sc_dev), error);
927 return error;
928 }
929 for (i = 0; i < AGE_RX_RING_CNT; i++) {
930 rxd = &sc->age_cdata.age_rxdesc[i];
931 rxd->rx_m = NULL;
932 rxd->rx_dmamap = NULL;
933 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
934 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
935 if (error) {
936 rxd->rx_dmamap = NULL;
937 printf("%s: could not create Rx dmamap, error = %i.\n",
938 device_xname(sc->sc_dev), error);
939 return error;
940 }
941 }
942
943 return 0;
944 }
945
946 static void
947 age_dma_free(struct age_softc *sc)
948 {
949 struct age_txdesc *txd;
950 struct age_rxdesc *rxd;
951 int i;
952
953 /* Tx buffers */
954 for (i = 0; i < AGE_TX_RING_CNT; i++) {
955 txd = &sc->age_cdata.age_txdesc[i];
956 if (txd->tx_dmamap != NULL) {
957 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
958 txd->tx_dmamap = NULL;
959 }
960 }
961 /* Rx buffers */
962 for (i = 0; i < AGE_RX_RING_CNT; i++) {
963 rxd = &sc->age_cdata.age_rxdesc[i];
964 if (rxd->rx_dmamap != NULL) {
965 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
966 rxd->rx_dmamap = NULL;
967 }
968 }
969 if (sc->age_cdata.age_rx_sparemap != NULL) {
970 bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
971 sc->age_cdata.age_rx_sparemap = NULL;
972 }
973
974 /* Tx ring. */
975 if (sc->age_cdata.age_tx_ring_map != NULL)
976 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
977 if (sc->age_cdata.age_tx_ring_map != NULL &&
978 sc->age_rdata.age_tx_ring != NULL)
979 bus_dmamem_free(sc->sc_dmat,
980 &sc->age_rdata.age_tx_ring_seg, 1);
981 sc->age_rdata.age_tx_ring = NULL;
982 sc->age_cdata.age_tx_ring_map = NULL;
983
984 /* Rx ring. */
985 if (sc->age_cdata.age_rx_ring_map != NULL)
986 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
987 if (sc->age_cdata.age_rx_ring_map != NULL &&
988 sc->age_rdata.age_rx_ring != NULL)
989 bus_dmamem_free(sc->sc_dmat,
990 &sc->age_rdata.age_rx_ring_seg, 1);
991 sc->age_rdata.age_rx_ring = NULL;
992 sc->age_cdata.age_rx_ring_map = NULL;
993
994 /* Rx return ring. */
995 if (sc->age_cdata.age_rr_ring_map != NULL)
996 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
997 if (sc->age_cdata.age_rr_ring_map != NULL &&
998 sc->age_rdata.age_rr_ring != NULL)
999 bus_dmamem_free(sc->sc_dmat,
1000 &sc->age_rdata.age_rr_ring_seg, 1);
1001 sc->age_rdata.age_rr_ring = NULL;
1002 sc->age_cdata.age_rr_ring_map = NULL;
1003
1004 /* CMB block */
1005 if (sc->age_cdata.age_cmb_block_map != NULL)
1006 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1007 if (sc->age_cdata.age_cmb_block_map != NULL &&
1008 sc->age_rdata.age_cmb_block != NULL)
1009 bus_dmamem_free(sc->sc_dmat,
1010 &sc->age_rdata.age_cmb_block_seg, 1);
1011 sc->age_rdata.age_cmb_block = NULL;
1012 sc->age_cdata.age_cmb_block_map = NULL;
1013
1014 /* SMB block */
1015 if (sc->age_cdata.age_smb_block_map != NULL)
1016 bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1017 if (sc->age_cdata.age_smb_block_map != NULL &&
1018 sc->age_rdata.age_smb_block != NULL)
1019 bus_dmamem_free(sc->sc_dmat,
1020 &sc->age_rdata.age_smb_block_seg, 1);
1021 sc->age_rdata.age_smb_block = NULL;
1022 sc->age_cdata.age_smb_block_map = NULL;
1023 }
1024
1025 static void
1026 age_start(struct ifnet *ifp)
1027 {
1028 struct age_softc *sc = ifp->if_softc;
1029 struct mbuf *m_head;
1030 int enq;
1031
1032 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1033 return;
1034 if ((sc->age_flags & AGE_FLAG_LINK) == 0)
1035 return;
1036 if (IFQ_IS_EMPTY(&ifp->if_snd))
1037 return;
1038
1039 enq = 0;
1040 for (;;) {
1041 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1042 if (m_head == NULL)
1043 break;
1044
1045 /*
1046 * Pack the data into the transmit ring. If we
1047 * don't have room, set the OACTIVE flag and wait
1048 * for the NIC to drain the ring.
1049 */
1050 if (age_encap(sc, &m_head)) {
1051 if (m_head == NULL)
1052 break;
1053 IF_PREPEND(&ifp->if_snd, m_head);
1054 ifp->if_flags |= IFF_OACTIVE;
1055 break;
1056 }
1057 enq = 1;
1058
1059 #if NBPFILTER > 0
1060 /*
1061 * If there's a BPF listener, bounce a copy of this frame
1062 * to him.
1063 */
1064 if (ifp->if_bpf != NULL)
1065 bpf_mtap(ifp->if_bpf, m_head);
1066 #endif
1067 }
1068
1069 if (enq) {
1070 /* Update mbox. */
1071 AGE_COMMIT_MBOX(sc);
1072 /* Set a timeout in case the chip goes out to lunch. */
1073 ifp->if_timer = AGE_TX_TIMEOUT;
1074 }
1075 }
1076
1077 static void
1078 age_watchdog(struct ifnet *ifp)
1079 {
1080 struct age_softc *sc = ifp->if_softc;
1081
1082 if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1083 printf("%s: watchdog timeout (missed link)\n",
1084 device_xname(sc->sc_dev));
1085 ifp->if_oerrors++;
1086 age_init(ifp);
1087 return;
1088 }
1089
1090 if (sc->age_cdata.age_tx_cnt == 0) {
1091 printf("%s: watchdog timeout (missed Tx interrupts) "
1092 "-- recovering\n", device_xname(sc->sc_dev));
1093 age_start(ifp);
1094 return;
1095 }
1096
1097 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1098 ifp->if_oerrors++;
1099 age_init(ifp);
1100 age_start(ifp);
1101 }
1102
1103 static bool
1104 age_shutdown(device_t self, int howto)
1105 {
1106 struct age_softc *sc;
1107 struct ifnet *ifp;
1108
1109 sc = device_private(self);
1110 ifp = &sc->sc_ec.ec_if;
1111 age_stop(ifp, 1);
1112
1113 return true;
1114 }
1115
1116
1117 static int
1118 age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1119 {
1120 struct age_softc *sc = ifp->if_softc;
1121 int s, error;
1122
1123 s = splnet();
1124
1125 error = ether_ioctl(ifp, cmd, data);
1126 if (error == ENETRESET) {
1127 if (ifp->if_flags & IFF_RUNNING)
1128 age_rxfilter(sc);
1129 error = 0;
1130 }
1131
1132 splx(s);
1133 return error;
1134 }
1135
1136 static void
1137 age_mac_config(struct age_softc *sc)
1138 {
1139 struct mii_data *mii;
1140 uint32_t reg;
1141
1142 mii = &sc->sc_miibus;
1143
1144 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1145 reg &= ~MAC_CFG_FULL_DUPLEX;
1146 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1147 reg &= ~MAC_CFG_SPEED_MASK;
1148
1149 /* Reprogram MAC with resolved speed/duplex. */
1150 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1151 case IFM_10_T:
1152 case IFM_100_TX:
1153 reg |= MAC_CFG_SPEED_10_100;
1154 break;
1155 case IFM_1000_T:
1156 reg |= MAC_CFG_SPEED_1000;
1157 break;
1158 }
1159 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1160 reg |= MAC_CFG_FULL_DUPLEX;
1161 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1162 reg |= MAC_CFG_TX_FC;
1163 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1164 reg |= MAC_CFG_RX_FC;
1165 }
1166
1167 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1168 }
1169
1170 static bool
1171 age_resume(device_t dv PMF_FN_ARGS)
1172 {
1173 struct age_softc *sc = device_private(dv);
1174 uint16_t cmd;
1175
1176 /*
1177 * Clear INTx emulation disable for hardware that
1178 * is set in resume event. From Linux.
1179 */
1180 cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1181 if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1182 cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1183 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1184 PCI_COMMAND_STATUS_REG, cmd);
1185 }
1186
1187 return true;
1188 }
1189
1190 static int
1191 age_encap(struct age_softc *sc, struct mbuf **m_head)
1192 {
1193 struct age_txdesc *txd, *txd_last;
1194 struct tx_desc *desc;
1195 struct mbuf *m;
1196 bus_dmamap_t map;
1197 uint32_t cflags, poff, vtag;
1198 int error, i, nsegs, prod;
1199 #if NVLAN > 0
1200 struct m_tag *mtag;
1201 #endif
1202
1203 m = *m_head;
1204 cflags = vtag = 0;
1205 poff = 0;
1206
1207 prod = sc->age_cdata.age_tx_prod;
1208 txd = &sc->age_cdata.age_txdesc[prod];
1209 txd_last = txd;
1210 map = txd->tx_dmamap;
1211
1212 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1213
1214 if (error == EFBIG) {
1215 error = 0;
1216
1217 *m_head = m_pullup(*m_head, MHLEN);
1218 if (*m_head == NULL) {
1219 printf("%s: can't defrag TX mbuf\n",
1220 device_xname(sc->sc_dev));
1221 return ENOBUFS;
1222 }
1223
1224 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1225 BUS_DMA_NOWAIT);
1226
1227 if (error != 0) {
1228 printf("%s: could not load defragged TX mbuf\n",
1229 device_xname(sc->sc_dev));
1230 m_freem(*m_head);
1231 *m_head = NULL;
1232 return error;
1233 }
1234 } else if (error) {
1235 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1236 return error;
1237 }
1238
1239 nsegs = map->dm_nsegs;
1240
1241 if (nsegs == 0) {
1242 m_freem(*m_head);
1243 *m_head = NULL;
1244 return EIO;
1245 }
1246
1247 /* Check descriptor overrun. */
1248 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1249 bus_dmamap_unload(sc->sc_dmat, map);
1250 return ENOBUFS;
1251 }
1252 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1253 BUS_DMASYNC_PREWRITE);
1254
1255 m = *m_head;
1256 /* Configure Tx IP/TCP/UDP checksum offload. */
1257 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1258 cflags |= AGE_TD_CSUM;
1259 if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1260 cflags |= AGE_TD_TCPCSUM;
1261 if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1262 cflags |= AGE_TD_UDPCSUM;
1263 /* Set checksum start offset. */
1264 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1265 }
1266
1267 #if NVLAN > 0
1268 /* Configure VLAN hardware tag insertion. */
1269 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1270 vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag)));
1271 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1272 cflags |= AGE_TD_INSERT_VLAN_TAG;
1273 }
1274 #endif
1275
1276 desc = NULL;
1277 KASSERT(nsegs > 0);
1278 for (i = 0; ; i++) {
1279 desc = &sc->age_rdata.age_tx_ring[prod];
1280 desc->addr = htole64(map->dm_segs[i].ds_addr);
1281 desc->len =
1282 htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1283 desc->flags = htole32(cflags);
1284 sc->age_cdata.age_tx_cnt++;
1285 if (i == (nsegs - 1))
1286 break;
1287
1288 /* sync this descriptor and go to the next one */
1289 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1290 prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1291 BUS_DMASYNC_PREWRITE);
1292 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1293 }
1294
1295 /* Set EOP on the last descriptor and sync it. */
1296 desc->flags |= htole32(AGE_TD_EOP);
1297 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1298 prod * sizeof(struct tx_desc), sizeof(struct tx_desc),
1299 BUS_DMASYNC_PREWRITE);
1300
1301 if (nsegs > 1) {
1302 /* Swap dmamap of the first and the last. */
1303 txd = &sc->age_cdata.age_txdesc[prod];
1304 map = txd_last->tx_dmamap;
1305 txd_last->tx_dmamap = txd->tx_dmamap;
1306 txd->tx_dmamap = map;
1307 txd->tx_m = m;
1308 KASSERT(txd_last->tx_m == NULL);
1309 } else {
1310 KASSERT(txd_last == &sc->age_cdata.age_txdesc[prod]);
1311 txd_last->tx_m = m;
1312 }
1313
1314 /* Update producer index. */
1315 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1316 sc->age_cdata.age_tx_prod = prod;
1317
1318 return 0;
1319 }
1320
1321 static void
1322 age_txintr(struct age_softc *sc, int tpd_cons)
1323 {
1324 struct ifnet *ifp = &sc->sc_ec.ec_if;
1325 struct age_txdesc *txd;
1326 int cons, prog;
1327
1328
1329 if (sc->age_cdata.age_tx_cnt <= 0) {
1330 if (ifp->if_timer != 0)
1331 printf("timer running without packets\n");
1332 if (sc->age_cdata.age_tx_cnt)
1333 printf("age_tx_cnt corrupted\n");
1334 }
1335
1336 /*
1337 * Go through our Tx list and free mbufs for those
1338 * frames which have been transmitted.
1339 */
1340 cons = sc->age_cdata.age_tx_cons;
1341 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1342 if (sc->age_cdata.age_tx_cnt <= 0)
1343 break;
1344 prog++;
1345 ifp->if_flags &= ~IFF_OACTIVE;
1346 sc->age_cdata.age_tx_cnt--;
1347 txd = &sc->age_cdata.age_txdesc[cons];
1348 /*
1349 * Clear Tx descriptors, it's not required but would
1350 * help debugging in case of Tx issues.
1351 */
1352 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
1353 cons * sizeof(struct tx_desc), sizeof(struct tx_desc),
1354 BUS_DMASYNC_POSTWRITE);
1355 txd->tx_desc->addr = 0;
1356 txd->tx_desc->len = 0;
1357 txd->tx_desc->flags = 0;
1358
1359 if (txd->tx_m == NULL)
1360 continue;
1361 /* Reclaim transmitted mbufs. */
1362 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1363 m_freem(txd->tx_m);
1364 txd->tx_m = NULL;
1365 }
1366
1367 if (prog > 0) {
1368 sc->age_cdata.age_tx_cons = cons;
1369
1370 /*
1371 * Unarm watchdog timer only when there are no pending
1372 * Tx descriptors in queue.
1373 */
1374 if (sc->age_cdata.age_tx_cnt == 0)
1375 ifp->if_timer = 0;
1376 }
1377 }
1378
1379 /* Receive a frame. */
1380 static void
1381 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1382 {
1383 struct ifnet *ifp = &sc->sc_ec.ec_if;
1384 struct age_rxdesc *rxd;
1385 struct rx_desc *desc;
1386 struct mbuf *mp, *m;
1387 uint32_t status, index;
1388 int count, nsegs, pktlen;
1389 int rx_cons;
1390
1391 status = le32toh(rxrd->flags);
1392 index = le32toh(rxrd->index);
1393 rx_cons = AGE_RX_CONS(index);
1394 nsegs = AGE_RX_NSEGS(index);
1395
1396 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1397 if ((status & AGE_RRD_ERROR) != 0 &&
1398 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1399 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1400 /*
1401 * We want to pass the following frames to upper
1402 * layer regardless of error status of Rx return
1403 * ring.
1404 *
1405 * o IP/TCP/UDP checksum is bad.
1406 * o frame length and protocol specific length
1407 * does not match.
1408 */
1409 sc->age_cdata.age_rx_cons += nsegs;
1410 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1411 return;
1412 }
1413
1414 pktlen = 0;
1415 for (count = 0; count < nsegs; count++,
1416 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1417 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1418 mp = rxd->rx_m;
1419 desc = rxd->rx_desc;
1420 /* Add a new receive buffer to the ring. */
1421 if (age_newbuf(sc, rxd, 0) != 0) {
1422 ifp->if_iqdrops++;
1423 /* Reuse Rx buffers. */
1424 if (sc->age_cdata.age_rxhead != NULL) {
1425 m_freem(sc->age_cdata.age_rxhead);
1426 AGE_RXCHAIN_RESET(sc);
1427 }
1428 break;
1429 }
1430
1431 /* The length of the first mbuf is computed last. */
1432 if (count != 0) {
1433 mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1434 pktlen += mp->m_len;
1435 }
1436
1437 /* Chain received mbufs. */
1438 if (sc->age_cdata.age_rxhead == NULL) {
1439 sc->age_cdata.age_rxhead = mp;
1440 sc->age_cdata.age_rxtail = mp;
1441 } else {
1442 mp->m_flags &= ~M_PKTHDR;
1443 sc->age_cdata.age_rxprev_tail =
1444 sc->age_cdata.age_rxtail;
1445 sc->age_cdata.age_rxtail->m_next = mp;
1446 sc->age_cdata.age_rxtail = mp;
1447 }
1448
1449 if (count == nsegs - 1) {
1450 /*
1451 * It seems that L1 controller has no way
1452 * to tell hardware to strip CRC bytes.
1453 */
1454 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1455 if (nsegs > 1) {
1456 /* Remove the CRC bytes in chained mbufs. */
1457 pktlen -= ETHER_CRC_LEN;
1458 if (mp->m_len <= ETHER_CRC_LEN) {
1459 sc->age_cdata.age_rxtail =
1460 sc->age_cdata.age_rxprev_tail;
1461 sc->age_cdata.age_rxtail->m_len -=
1462 (ETHER_CRC_LEN - mp->m_len);
1463 sc->age_cdata.age_rxtail->m_next = NULL;
1464 m_freem(mp);
1465 } else {
1466 mp->m_len -= ETHER_CRC_LEN;
1467 }
1468 }
1469
1470 m = sc->age_cdata.age_rxhead;
1471 m->m_flags |= M_PKTHDR;
1472 m->m_pkthdr.rcvif = ifp;
1473 m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1474 /* Set the first mbuf length. */
1475 m->m_len = sc->age_cdata.age_rxlen - pktlen;
1476
1477 /*
1478 * Set checksum information.
1479 * It seems that L1 controller can compute partial
1480 * checksum. The partial checksum value can be used
1481 * to accelerate checksum computation for fragmented
1482 * TCP/UDP packets. Upper network stack already
1483 * takes advantage of the partial checksum value in
1484 * IP reassembly stage. But I'm not sure the
1485 * correctness of the partial hardware checksum
1486 * assistance due to lack of data sheet. If it is
1487 * proven to work on L1 I'll enable it.
1488 */
1489 if (status & AGE_RRD_IPV4) {
1490 if (status & AGE_RRD_IPCSUM_NOK)
1491 m->m_pkthdr.csum_flags |=
1492 M_CSUM_IPv4_BAD;
1493 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1494 (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1495 m->m_pkthdr.csum_flags |=
1496 M_CSUM_TCP_UDP_BAD;
1497 }
1498 /*
1499 * Don't mark bad checksum for TCP/UDP frames
1500 * as fragmented frames may always have set
1501 * bad checksummed bit of descriptor status.
1502 */
1503 }
1504 #if NVLAN > 0
1505 /* Check for VLAN tagged frames. */
1506 if (status & AGE_RRD_VLAN) {
1507 uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1508 VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag),
1509 continue);
1510 }
1511 #endif
1512
1513 #if NBPFILTER > 0
1514 if (ifp->if_bpf)
1515 bpf_mtap(ifp->if_bpf, m);
1516 #endif
1517 /* Pass it on. */
1518 ether_input(ifp, m);
1519
1520 /* Reset mbuf chains. */
1521 AGE_RXCHAIN_RESET(sc);
1522 }
1523 }
1524
1525 if (count != nsegs) {
1526 sc->age_cdata.age_rx_cons += nsegs;
1527 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1528 } else
1529 sc->age_cdata.age_rx_cons = rx_cons;
1530 }
1531
1532 static void
1533 age_rxintr(struct age_softc *sc, int rr_prod)
1534 {
1535 struct rx_rdesc *rxrd;
1536 int rr_cons, nsegs, pktlen, prog;
1537
1538 rr_cons = sc->age_cdata.age_rr_cons;
1539 if (rr_cons == rr_prod)
1540 return;
1541
1542 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1543 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1544 BUS_DMASYNC_POSTREAD);
1545
1546 for (prog = 0; rr_cons != rr_prod; prog++) {
1547 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1548 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1549 if (nsegs == 0)
1550 break;
1551 /*
1552 * Check number of segments against received bytes
1553 * Non-matching value would indicate that hardware
1554 * is still trying to update Rx return descriptors.
1555 * I'm not sure whether this check is really needed.
1556 */
1557 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1558 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1559 (MCLBYTES - ETHER_ALIGN)))
1560 break;
1561
1562 /* Received a frame. */
1563 age_rxeof(sc, rxrd);
1564
1565 /* Clear return ring. */
1566 rxrd->index = 0;
1567 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1568 }
1569
1570 if (prog > 0) {
1571 /* Update the consumer index. */
1572 sc->age_cdata.age_rr_cons = rr_cons;
1573
1574 /* Sync descriptors. */
1575 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1576 sc->age_cdata.age_rr_ring_map->dm_mapsize,
1577 BUS_DMASYNC_PREWRITE);
1578
1579 /* Notify hardware availability of new Rx buffers. */
1580 AGE_COMMIT_MBOX(sc);
1581 }
1582 }
1583
1584 static void
1585 age_tick(void *xsc)
1586 {
1587 struct age_softc *sc = xsc;
1588 struct mii_data *mii = &sc->sc_miibus;
1589 int s;
1590
1591 s = splnet();
1592 mii_tick(mii);
1593 splx(s);
1594
1595 callout_schedule(&sc->sc_tick_ch, hz);
1596 }
1597
1598 static void
1599 age_reset(struct age_softc *sc)
1600 {
1601 uint32_t reg;
1602 int i;
1603
1604 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1605 CSR_READ_4(sc, AGE_MASTER_CFG);
1606 DELAY(1000);
1607 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1608 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1609 break;
1610 DELAY(10);
1611 }
1612
1613 if (i == 0)
1614 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1615 reg);
1616
1617 /* Initialize PCIe module. From Linux. */
1618 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1619 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1620 }
1621
1622 static int
1623 age_init(struct ifnet *ifp)
1624 {
1625 struct age_softc *sc = ifp->if_softc;
1626 struct mii_data *mii;
1627 uint8_t eaddr[ETHER_ADDR_LEN];
1628 bus_addr_t paddr;
1629 uint32_t reg, fsize;
1630 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1631 int error;
1632
1633 /*
1634 * Cancel any pending I/O.
1635 */
1636 age_stop(ifp, 0);
1637
1638 /*
1639 * Reset the chip to a known state.
1640 */
1641 age_reset(sc);
1642
1643 /* Initialize descriptors. */
1644 error = age_init_rx_ring(sc);
1645 if (error != 0) {
1646 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1647 age_stop(ifp, 0);
1648 return error;
1649 }
1650 age_init_rr_ring(sc);
1651 age_init_tx_ring(sc);
1652 age_init_cmb_block(sc);
1653 age_init_smb_block(sc);
1654
1655 /* Reprogram the station address. */
1656 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1657 CSR_WRITE_4(sc, AGE_PAR0,
1658 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1659 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1660
1661 /* Set descriptor base addresses. */
1662 paddr = sc->age_rdata.age_tx_ring_paddr;
1663 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1664 paddr = sc->age_rdata.age_rx_ring_paddr;
1665 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1666 paddr = sc->age_rdata.age_rr_ring_paddr;
1667 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1668 paddr = sc->age_rdata.age_tx_ring_paddr;
1669 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1670 paddr = sc->age_rdata.age_cmb_block_paddr;
1671 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1672 paddr = sc->age_rdata.age_smb_block_paddr;
1673 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1674
1675 /* Set Rx/Rx return descriptor counter. */
1676 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1677 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1678 DESC_RRD_CNT_MASK) |
1679 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1680
1681 /* Set Tx descriptor counter. */
1682 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1683 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1684
1685 /* Tell hardware that we're ready to load descriptors. */
1686 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1687
1688 /*
1689 * Initialize mailbox register.
1690 * Updated producer/consumer index information is exchanged
1691 * through this mailbox register. However Tx producer and
1692 * Rx return consumer/Rx producer are all shared such that
1693 * it's hard to separate code path between Tx and Rx without
1694 * locking. If L1 hardware have a separate mail box register
1695 * for Tx and Rx consumer/producer management we could have
1696 * indepent Tx/Rx handler which in turn Rx handler could have
1697 * been run without any locking.
1698 */
1699 AGE_COMMIT_MBOX(sc);
1700
1701 /* Configure IPG/IFG parameters. */
1702 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1703 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1704 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1705 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1706 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1707
1708 /* Set parameters for half-duplex media. */
1709 CSR_WRITE_4(sc, AGE_HDPX_CFG,
1710 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1711 HDPX_CFG_LCOL_MASK) |
1712 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1713 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1714 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1715 HDPX_CFG_ABEBT_MASK) |
1716 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1717 HDPX_CFG_JAMIPG_MASK));
1718
1719 /* Configure interrupt moderation timer. */
1720 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1721 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1722 reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1723 reg &= ~MASTER_MTIMER_ENB;
1724 if (AGE_USECS(sc->age_int_mod) == 0)
1725 reg &= ~MASTER_ITIMER_ENB;
1726 else
1727 reg |= MASTER_ITIMER_ENB;
1728 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1729 if (agedebug)
1730 printf("%s: interrupt moderation is %d us.\n",
1731 device_xname(sc->sc_dev), sc->age_int_mod);
1732 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1733
1734 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1735 if (ifp->if_mtu < ETHERMTU)
1736 sc->age_max_frame_size = ETHERMTU;
1737 else
1738 sc->age_max_frame_size = ifp->if_mtu;
1739 sc->age_max_frame_size += ETHER_HDR_LEN +
1740 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1741 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1742
1743 /* Configure jumbo frame. */
1744 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1745 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1746 (((fsize / sizeof(uint64_t)) <<
1747 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1748 ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1749 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1750 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1751 RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1752
1753 /* Configure flow-control parameters. From Linux. */
1754 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1755 /*
1756 * Magic workaround for old-L1.
1757 * Don't know which hw revision requires this magic.
1758 */
1759 CSR_WRITE_4(sc, 0x12FC, 0x6500);
1760 /*
1761 * Another magic workaround for flow-control mode
1762 * change. From Linux.
1763 */
1764 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1765 }
1766 /*
1767 * TODO
1768 * Should understand pause parameter relationships between FIFO
1769 * size and number of Rx descriptors and Rx return descriptors.
1770 *
1771 * Magic parameters came from Linux.
1772 */
1773 switch (sc->age_chip_rev) {
1774 case 0x8001:
1775 case 0x9001:
1776 case 0x9002:
1777 case 0x9003:
1778 rxf_hi = AGE_RX_RING_CNT / 16;
1779 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1780 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1781 rrd_lo = AGE_RR_RING_CNT / 16;
1782 break;
1783 default:
1784 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1785 rxf_lo = reg / 16;
1786 if (rxf_lo < 192)
1787 rxf_lo = 192;
1788 rxf_hi = (reg * 7) / 8;
1789 if (rxf_hi < rxf_lo)
1790 rxf_hi = rxf_lo + 16;
1791 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1792 rrd_lo = reg / 8;
1793 rrd_hi = (reg * 7) / 8;
1794 if (rrd_lo < 2)
1795 rrd_lo = 2;
1796 if (rrd_hi < rrd_lo)
1797 rrd_hi = rrd_lo + 3;
1798 break;
1799 }
1800 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1801 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1802 RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1803 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1804 RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1805 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1806 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1807 RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1808 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1809 RXQ_RRD_PAUSE_THRESH_HI_MASK));
1810
1811 /* Configure RxQ. */
1812 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1813 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1814 RXQ_CFG_RD_BURST_MASK) |
1815 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1816 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1817 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1818 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1819 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1820
1821 /* Configure TxQ. */
1822 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1823 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1824 TXQ_CFG_TPD_BURST_MASK) |
1825 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1826 TXQ_CFG_TX_FIFO_BURST_MASK) |
1827 ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1828 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1829 TXQ_CFG_ENB);
1830
1831 /* Configure DMA parameters. */
1832 CSR_WRITE_4(sc, AGE_DMA_CFG,
1833 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1834 sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1835 sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1836
1837 /* Configure CMB DMA write threshold. */
1838 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1839 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1840 CMB_WR_THRESH_RRD_MASK) |
1841 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1842 CMB_WR_THRESH_TPD_MASK));
1843
1844 /* Set CMB/SMB timer and enable them. */
1845 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1846 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1847 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1848
1849 /* Request SMB updates for every seconds. */
1850 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1851 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1852
1853 /*
1854 * Disable all WOL bits as WOL can interfere normal Rx
1855 * operation.
1856 */
1857 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1858
1859 /*
1860 * Configure Tx/Rx MACs.
1861 * - Auto-padding for short frames.
1862 * - Enable CRC generation.
1863 * Start with full-duplex/1000Mbps media. Actual reconfiguration
1864 * of MAC is followed after link establishment.
1865 */
1866 CSR_WRITE_4(sc, AGE_MAC_CFG,
1867 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1868 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1869 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1870 MAC_CFG_PREAMBLE_MASK));
1871
1872 /* Set up the receive filter. */
1873 age_rxfilter(sc);
1874 age_rxvlan(sc);
1875
1876 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1877 reg |= MAC_CFG_RXCSUM_ENB;
1878
1879 /* Ack all pending interrupts and clear it. */
1880 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1881 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1882
1883 /* Finally enable Tx/Rx MAC. */
1884 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1885
1886 sc->age_flags &= ~AGE_FLAG_LINK;
1887
1888 /* Switch to the current media. */
1889 mii = &sc->sc_miibus;
1890 mii_mediachg(mii);
1891
1892 callout_schedule(&sc->sc_tick_ch, hz);
1893
1894 ifp->if_flags |= IFF_RUNNING;
1895 ifp->if_flags &= ~IFF_OACTIVE;
1896
1897 return 0;
1898 }
1899
1900 static void
1901 age_stop(struct ifnet *ifp, int disable)
1902 {
1903 struct age_softc *sc = ifp->if_softc;
1904 struct age_txdesc *txd;
1905 struct age_rxdesc *rxd;
1906 uint32_t reg;
1907 int i;
1908
1909 callout_stop(&sc->sc_tick_ch);
1910
1911 /*
1912 * Mark the interface down and cancel the watchdog timer.
1913 */
1914 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1915 ifp->if_timer = 0;
1916
1917 sc->age_flags &= ~AGE_FLAG_LINK;
1918
1919 mii_down(&sc->sc_miibus);
1920
1921 /*
1922 * Disable interrupts.
1923 */
1924 CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1925 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1926
1927 /* Stop CMB/SMB updates. */
1928 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1929
1930 /* Stop Rx/Tx MAC. */
1931 age_stop_rxmac(sc);
1932 age_stop_txmac(sc);
1933
1934 /* Stop DMA. */
1935 CSR_WRITE_4(sc, AGE_DMA_CFG,
1936 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1937
1938 /* Stop TxQ/RxQ. */
1939 CSR_WRITE_4(sc, AGE_TXQ_CFG,
1940 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1941 CSR_WRITE_4(sc, AGE_RXQ_CFG,
1942 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1943 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1944 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1945 break;
1946 DELAY(10);
1947 }
1948 if (i == 0)
1949 printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1950 device_xname(sc->sc_dev), reg);
1951
1952 /* Reclaim Rx buffers that have been processed. */
1953 if (sc->age_cdata.age_rxhead != NULL)
1954 m_freem(sc->age_cdata.age_rxhead);
1955 AGE_RXCHAIN_RESET(sc);
1956
1957 /*
1958 * Free RX and TX mbufs still in the queues.
1959 */
1960 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1961 rxd = &sc->age_cdata.age_rxdesc[i];
1962 if (rxd->rx_m != NULL) {
1963 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1964 m_freem(rxd->rx_m);
1965 rxd->rx_m = NULL;
1966 }
1967 }
1968 for (i = 0; i < AGE_TX_RING_CNT; i++) {
1969 txd = &sc->age_cdata.age_txdesc[i];
1970 if (txd->tx_m != NULL) {
1971 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1972 m_freem(txd->tx_m);
1973 txd->tx_m = NULL;
1974 }
1975 }
1976 }
1977
1978 static void
1979 age_stats_update(struct age_softc *sc)
1980 {
1981 struct ifnet *ifp = &sc->sc_ec.ec_if;
1982 struct age_stats *stat;
1983 struct smb *smb;
1984
1985 stat = &sc->age_stat;
1986
1987 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1988 sc->age_cdata.age_smb_block_map->dm_mapsize,
1989 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1990
1991 smb = sc->age_rdata.age_smb_block;
1992 if (smb->updated == 0)
1993 return;
1994
1995 /* Rx stats. */
1996 stat->rx_frames += smb->rx_frames;
1997 stat->rx_bcast_frames += smb->rx_bcast_frames;
1998 stat->rx_mcast_frames += smb->rx_mcast_frames;
1999 stat->rx_pause_frames += smb->rx_pause_frames;
2000 stat->rx_control_frames += smb->rx_control_frames;
2001 stat->rx_crcerrs += smb->rx_crcerrs;
2002 stat->rx_lenerrs += smb->rx_lenerrs;
2003 stat->rx_bytes += smb->rx_bytes;
2004 stat->rx_runts += smb->rx_runts;
2005 stat->rx_fragments += smb->rx_fragments;
2006 stat->rx_pkts_64 += smb->rx_pkts_64;
2007 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2008 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2009 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2010 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2011 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2012 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2013 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2014 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2015 stat->rx_desc_oflows += smb->rx_desc_oflows;
2016 stat->rx_alignerrs += smb->rx_alignerrs;
2017 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2018 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2019 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2020
2021 /* Tx stats. */
2022 stat->tx_frames += smb->tx_frames;
2023 stat->tx_bcast_frames += smb->tx_bcast_frames;
2024 stat->tx_mcast_frames += smb->tx_mcast_frames;
2025 stat->tx_pause_frames += smb->tx_pause_frames;
2026 stat->tx_excess_defer += smb->tx_excess_defer;
2027 stat->tx_control_frames += smb->tx_control_frames;
2028 stat->tx_deferred += smb->tx_deferred;
2029 stat->tx_bytes += smb->tx_bytes;
2030 stat->tx_pkts_64 += smb->tx_pkts_64;
2031 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2032 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2033 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2034 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2035 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2036 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2037 stat->tx_single_colls += smb->tx_single_colls;
2038 stat->tx_multi_colls += smb->tx_multi_colls;
2039 stat->tx_late_colls += smb->tx_late_colls;
2040 stat->tx_excess_colls += smb->tx_excess_colls;
2041 stat->tx_underrun += smb->tx_underrun;
2042 stat->tx_desc_underrun += smb->tx_desc_underrun;
2043 stat->tx_lenerrs += smb->tx_lenerrs;
2044 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2045 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2046 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2047
2048 /* Update counters in ifnet. */
2049 ifp->if_opackets += smb->tx_frames;
2050
2051 ifp->if_collisions += smb->tx_single_colls +
2052 smb->tx_multi_colls + smb->tx_late_colls +
2053 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2054
2055 ifp->if_oerrors += smb->tx_excess_colls +
2056 smb->tx_late_colls + smb->tx_underrun +
2057 smb->tx_pkts_truncated;
2058
2059 ifp->if_ipackets += smb->rx_frames;
2060
2061 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2062 smb->rx_runts + smb->rx_pkts_truncated +
2063 smb->rx_fifo_oflows + smb->rx_desc_oflows +
2064 smb->rx_alignerrs;
2065
2066 /* Update done, clear. */
2067 smb->updated = 0;
2068
2069 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2070 sc->age_cdata.age_smb_block_map->dm_mapsize,
2071 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2072 }
2073
2074 static void
2075 age_stop_txmac(struct age_softc *sc)
2076 {
2077 uint32_t reg;
2078 int i;
2079
2080 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2081 if ((reg & MAC_CFG_TX_ENB) != 0) {
2082 reg &= ~MAC_CFG_TX_ENB;
2083 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2084 }
2085 /* Stop Tx DMA engine. */
2086 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2087 if ((reg & DMA_CFG_RD_ENB) != 0) {
2088 reg &= ~DMA_CFG_RD_ENB;
2089 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2090 }
2091 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2092 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2093 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2094 break;
2095 DELAY(10);
2096 }
2097 if (i == 0)
2098 printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2099 }
2100
2101 static void
2102 age_stop_rxmac(struct age_softc *sc)
2103 {
2104 uint32_t reg;
2105 int i;
2106
2107 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2108 if ((reg & MAC_CFG_RX_ENB) != 0) {
2109 reg &= ~MAC_CFG_RX_ENB;
2110 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2111 }
2112 /* Stop Rx DMA engine. */
2113 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2114 if ((reg & DMA_CFG_WR_ENB) != 0) {
2115 reg &= ~DMA_CFG_WR_ENB;
2116 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2117 }
2118 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2119 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2120 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2121 break;
2122 DELAY(10);
2123 }
2124 if (i == 0)
2125 printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2126 }
2127
2128 static void
2129 age_init_tx_ring(struct age_softc *sc)
2130 {
2131 struct age_ring_data *rd;
2132 struct age_txdesc *txd;
2133 int i;
2134
2135 sc->age_cdata.age_tx_prod = 0;
2136 sc->age_cdata.age_tx_cons = 0;
2137 sc->age_cdata.age_tx_cnt = 0;
2138
2139 rd = &sc->age_rdata;
2140 memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2141 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2142 txd = &sc->age_cdata.age_txdesc[i];
2143 txd->tx_desc = &rd->age_tx_ring[i];
2144 txd->tx_m = NULL;
2145 }
2146 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2147 sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2148 }
2149
2150 static int
2151 age_init_rx_ring(struct age_softc *sc)
2152 {
2153 struct age_ring_data *rd;
2154 struct age_rxdesc *rxd;
2155 int i;
2156
2157 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2158 rd = &sc->age_rdata;
2159 memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2160 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2161 rxd = &sc->age_cdata.age_rxdesc[i];
2162 rxd->rx_m = NULL;
2163 rxd->rx_desc = &rd->age_rx_ring[i];
2164 if (age_newbuf(sc, rxd, 1) != 0)
2165 return ENOBUFS;
2166 }
2167
2168 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2169 sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2170
2171 return 0;
2172 }
2173
2174 static void
2175 age_init_rr_ring(struct age_softc *sc)
2176 {
2177 struct age_ring_data *rd;
2178
2179 sc->age_cdata.age_rr_cons = 0;
2180 AGE_RXCHAIN_RESET(sc);
2181
2182 rd = &sc->age_rdata;
2183 memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2184 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2185 sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2186 }
2187
2188 static void
2189 age_init_cmb_block(struct age_softc *sc)
2190 {
2191 struct age_ring_data *rd;
2192
2193 rd = &sc->age_rdata;
2194 memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2195 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2196 sc->age_cdata.age_cmb_block_map->dm_mapsize,
2197 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2198 }
2199
2200 static void
2201 age_init_smb_block(struct age_softc *sc)
2202 {
2203 struct age_ring_data *rd;
2204
2205 rd = &sc->age_rdata;
2206 memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2207 bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2208 sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2209 }
2210
2211 static int
2212 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2213 {
2214 struct rx_desc *desc;
2215 struct mbuf *m;
2216 bus_dmamap_t map;
2217 int error;
2218
2219 MGETHDR(m, M_DONTWAIT, MT_DATA);
2220 if (m == NULL)
2221 return ENOBUFS;
2222 MCLGET(m, M_DONTWAIT);
2223 if (!(m->m_flags & M_EXT)) {
2224 m_freem(m);
2225 return ENOBUFS;
2226 }
2227
2228 m->m_len = m->m_pkthdr.len = MCLBYTES;
2229 m_adj(m, ETHER_ALIGN);
2230
2231 error = bus_dmamap_load_mbuf(sc->sc_dmat,
2232 sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2233
2234 if (error != 0) {
2235 if (!error) {
2236 bus_dmamap_unload(sc->sc_dmat,
2237 sc->age_cdata.age_rx_sparemap);
2238 error = EFBIG;
2239 printf("%s: too many segments?!\n",
2240 device_xname(sc->sc_dev));
2241 }
2242 m_freem(m);
2243
2244 if (init)
2245 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2246 return error;
2247 }
2248
2249 if (rxd->rx_m != NULL) {
2250 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2251 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2252 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2253 }
2254 map = rxd->rx_dmamap;
2255 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2256 sc->age_cdata.age_rx_sparemap = map;
2257 rxd->rx_m = m;
2258
2259 desc = rxd->rx_desc;
2260 desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2261 desc->len =
2262 htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2263 AGE_RD_LEN_SHIFT);
2264
2265 return 0;
2266 }
2267
2268 static void
2269 age_rxvlan(struct age_softc *sc)
2270 {
2271 uint32_t reg;
2272
2273 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2274 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2275 if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING)
2276 reg |= MAC_CFG_VLAN_TAG_STRIP;
2277 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2278 }
2279
2280 static void
2281 age_rxfilter(struct age_softc *sc)
2282 {
2283 struct ethercom *ec = &sc->sc_ec;
2284 struct ifnet *ifp = &sc->sc_ec.ec_if;
2285 struct ether_multi *enm;
2286 struct ether_multistep step;
2287 uint32_t crc;
2288 uint32_t mchash[2];
2289 uint32_t rxcfg;
2290
2291 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2292 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2293 ifp->if_flags &= ~IFF_ALLMULTI;
2294
2295 /*
2296 * Always accept broadcast frames.
2297 */
2298 rxcfg |= MAC_CFG_BCAST;
2299
2300 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2301 ifp->if_flags |= IFF_ALLMULTI;
2302 if (ifp->if_flags & IFF_PROMISC)
2303 rxcfg |= MAC_CFG_PROMISC;
2304 else
2305 rxcfg |= MAC_CFG_ALLMULTI;
2306 mchash[0] = mchash[1] = 0xFFFFFFFF;
2307 } else {
2308 /* Program new filter. */
2309 memset(mchash, 0, sizeof(mchash));
2310
2311 ETHER_FIRST_MULTI(step, ec, enm);
2312 while (enm != NULL) {
2313 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2314 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2315 ETHER_NEXT_MULTI(step, enm);
2316 }
2317 }
2318
2319 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2320 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2321 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2322 }
2323