if_alc.c revision 1.5 1 /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */
2 /*-
3 * Copyright (c) 2009, Pyun YongHyeon <yongari (at) FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
30
31 #ifdef _KERNEL_OPT
32 #include "vlan.h"
33 #endif
34
35 #include <sys/param.h>
36 #include <sys/proc.h>
37 #include <sys/endian.h>
38 #include <sys/systm.h>
39 #include <sys/types.h>
40 #include <sys/sockio.h>
41 #include <sys/mbuf.h>
42 #include <sys/queue.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/callout.h>
46 #include <sys/socket.h>
47 #include <sys/module.h>
48
49 #include <sys/bus.h>
50
51 #include <net/if.h>
52 #include <net/if_dl.h>
53 #include <net/if_llc.h>
54 #include <net/if_media.h>
55 #include <net/if_ether.h>
56
57 #include <net/bpf.h>
58
59 #ifdef INET
60 #include <netinet/in.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in_var.h>
63 #include <netinet/ip.h>
64 #endif
65
66 #include <net/if_types.h>
67 #include <net/if_vlanvar.h>
68
69 #include <net/bpf.h>
70
71 #include <sys/rnd.h>
72
73 #include <dev/mii/mii.h>
74 #include <dev/mii/miivar.h>
75
76 #include <dev/pci/pcireg.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcidevs.h>
79
80 #include <dev/pci/if_alcreg.h>
81
82 /*
83 * Devices supported by this driver.
84 */
85 static struct alc_ident alc_ident_table[] = {
86 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024,
87 "Atheros AR8131 PCIe Gigabit Ethernet" },
88 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024,
89 "Atheros AR8132 PCIe Fast Ethernet" },
90 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024,
91 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
92 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024,
93 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
94 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024,
95 "Atheros AR8152 v1.1 PCIe Fast Ethernet" },
96 { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024,
97 "Atheros AR8152 v2.0 PCIe Fast Ethernet" },
98 { 0, 0, 0, NULL },
99 };
100
101 static int alc_match(device_t, cfdata_t, void *);
102 static void alc_attach(device_t, device_t, void *);
103 static int alc_detach(device_t, int);
104
105 static int alc_init(struct ifnet *);
106 static void alc_start(struct ifnet *);
107 static int alc_ioctl(struct ifnet *, u_long, void *);
108 static void alc_watchdog(struct ifnet *);
109 static int alc_mediachange(struct ifnet *);
110 static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
111
112 static void alc_aspm(struct alc_softc *, int);
113 static void alc_disable_l0s_l1(struct alc_softc *);
114 static int alc_dma_alloc(struct alc_softc *);
115 static void alc_dma_free(struct alc_softc *);
116 static int alc_encap(struct alc_softc *, struct mbuf **);
117 static struct alc_ident *
118 alc_find_ident(struct pci_attach_args *);
119 static void alc_get_macaddr(struct alc_softc *);
120 static void alc_init_cmb(struct alc_softc *);
121 static void alc_init_rr_ring(struct alc_softc *);
122 static int alc_init_rx_ring(struct alc_softc *);
123 static void alc_init_smb(struct alc_softc *);
124 static void alc_init_tx_ring(struct alc_softc *);
125 static int alc_intr(void *);
126 static void alc_mac_config(struct alc_softc *);
127 static int alc_miibus_readreg(device_t, int, int);
128 static void alc_miibus_statchg(device_t);
129 static void alc_miibus_writereg(device_t, int, int, int);
130 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, int);
131 static void alc_phy_down(struct alc_softc *);
132 static void alc_phy_reset(struct alc_softc *);
133 static void alc_reset(struct alc_softc *);
134 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
135 static int alc_rxintr(struct alc_softc *);
136 static void alc_iff(struct alc_softc *);
137 static void alc_rxvlan(struct alc_softc *);
138 static void alc_start_queue(struct alc_softc *);
139 static void alc_stats_clear(struct alc_softc *);
140 static void alc_stats_update(struct alc_softc *);
141 static void alc_stop(struct ifnet *, int);
142 static void alc_stop_mac(struct alc_softc *);
143 static void alc_stop_queue(struct alc_softc *);
144 static void alc_tick(void *);
145 static void alc_txeof(struct alc_softc *);
146
147 uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
148
149 CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc),
150 alc_match, alc_attach, alc_detach, NULL);
151
152 int alcdebug = 0;
153 #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
154
155 #define ETHER_ALIGN 2
156 #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
157
158 static int
159 alc_miibus_readreg(device_t dev, int phy, int reg)
160 {
161 struct alc_softc *sc = device_private(dev);
162 uint32_t v;
163 int i;
164
165 if (phy != sc->alc_phyaddr)
166 return (0);
167
168 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
169 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
170 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
171 DELAY(5);
172 v = CSR_READ_4(sc, ALC_MDIO);
173 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
174 break;
175 }
176
177 if (i == 0) {
178 printf("%s: phy read timeout: phy %d, reg %d\n",
179 device_xname(sc->sc_dev), phy, reg);
180 return (0);
181 }
182
183 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
184 }
185
186 static void
187 alc_miibus_writereg(device_t dev, int phy, int reg, int val)
188 {
189 struct alc_softc *sc = device_private(dev);
190 uint32_t v;
191 int i;
192
193 if (phy != sc->alc_phyaddr)
194 return;
195
196 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
197 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
198 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
199 for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
200 DELAY(5);
201 v = CSR_READ_4(sc, ALC_MDIO);
202 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
203 break;
204 }
205
206 if (i == 0)
207 printf("%s: phy write timeout: phy %d, reg %d\n",
208 device_xname(sc->sc_dev), phy, reg);
209 }
210
211 static void
212 alc_miibus_statchg(device_t dev)
213 {
214 struct alc_softc *sc = device_private(dev);
215 struct ifnet *ifp = &sc->sc_ec.ec_if;
216 struct mii_data *mii;
217 uint32_t reg;
218
219 if ((ifp->if_flags & IFF_RUNNING) == 0)
220 return;
221
222 mii = &sc->sc_miibus;
223
224 sc->alc_flags &= ~ALC_FLAG_LINK;
225 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
226 (IFM_ACTIVE | IFM_AVALID)) {
227 switch (IFM_SUBTYPE(mii->mii_media_active)) {
228 case IFM_10_T:
229 case IFM_100_TX:
230 sc->alc_flags |= ALC_FLAG_LINK;
231 break;
232 case IFM_1000_T:
233 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
234 sc->alc_flags |= ALC_FLAG_LINK;
235 break;
236 default:
237 break;
238 }
239 }
240 alc_stop_queue(sc);
241 /* Stop Rx/Tx MACs. */
242 alc_stop_mac(sc);
243
244 /* Program MACs with resolved speed/duplex/flow-control. */
245 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
246 alc_start_queue(sc);
247 alc_mac_config(sc);
248 /* Re-enable Tx/Rx MACs. */
249 reg = CSR_READ_4(sc, ALC_MAC_CFG);
250 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
251 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
252 alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
253 }
254 }
255
256 static void
257 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
258 {
259 struct alc_softc *sc = ifp->if_softc;
260 struct mii_data *mii = &sc->sc_miibus;
261
262 mii_pollstat(mii);
263 ifmr->ifm_status = mii->mii_media_status;
264 ifmr->ifm_active = mii->mii_media_active;
265 }
266
267 static int
268 alc_mediachange(struct ifnet *ifp)
269 {
270 struct alc_softc *sc = ifp->if_softc;
271 struct mii_data *mii = &sc->sc_miibus;
272 int error;
273
274 if (mii->mii_instance != 0) {
275 struct mii_softc *miisc;
276
277 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
278 mii_phy_reset(miisc);
279 }
280 error = mii_mediachg(mii);
281
282 return (error);
283 }
284
285 static struct alc_ident *
286 alc_find_ident(struct pci_attach_args *pa)
287 {
288 struct alc_ident *ident;
289 uint16_t vendor, devid;
290
291 vendor = PCI_VENDOR(pa->pa_id);
292 devid = PCI_PRODUCT(pa->pa_id);
293 for (ident = alc_ident_table; ident->name != NULL; ident++) {
294 if (vendor == ident->vendorid && devid == ident->deviceid)
295 return (ident);
296 }
297
298 return (NULL);
299 }
300
301 static int
302 alc_match(device_t dev, cfdata_t match, void *aux)
303 {
304 struct pci_attach_args *pa = aux;
305
306 return alc_find_ident(pa) != NULL;
307 }
308
309 static void
310 alc_get_macaddr(struct alc_softc *sc)
311 {
312 uint32_t ea[2], opt;
313 uint16_t val;
314 int eeprom, i;
315
316 eeprom = 0;
317 opt = CSR_READ_4(sc, ALC_OPT_CFG);
318 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
319 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
320 /*
321 * EEPROM found, let TWSI reload EEPROM configuration.
322 * This will set ethernet address of controller.
323 */
324 eeprom++;
325 switch (sc->alc_ident->deviceid) {
326 case PCI_PRODUCT_ATTANSIC_AR8131:
327 case PCI_PRODUCT_ATTANSIC_AR8132:
328 if ((opt & OPT_CFG_CLK_ENB) == 0) {
329 opt |= OPT_CFG_CLK_ENB;
330 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
331 CSR_READ_4(sc, ALC_OPT_CFG);
332 DELAY(1000);
333 }
334 break;
335 case PCI_PRODUCT_ATTANSIC_AR8151:
336 case PCI_PRODUCT_ATTANSIC_AR8151_V2:
337 case PCI_PRODUCT_ATTANSIC_AR8152_B:
338 case PCI_PRODUCT_ATTANSIC_AR8152_B2:
339 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
340 ALC_MII_DBG_ADDR, 0x00);
341 val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
342 ALC_MII_DBG_DATA);
343 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
344 ALC_MII_DBG_DATA, val & 0xFF7F);
345 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
346 ALC_MII_DBG_ADDR, 0x3B);
347 val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
348 ALC_MII_DBG_DATA);
349 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
350 ALC_MII_DBG_DATA, val | 0x0008);
351 DELAY(20);
352 break;
353 }
354
355 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
356 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
357 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
358 CSR_READ_4(sc, ALC_WOL_CFG);
359
360 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
361 TWSI_CFG_SW_LD_START);
362 for (i = 100; i > 0; i--) {
363 DELAY(1000);
364 if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
365 TWSI_CFG_SW_LD_START) == 0)
366 break;
367 }
368 if (i == 0)
369 printf("%s: reloading EEPROM timeout!\n",
370 device_xname(sc->sc_dev));
371 } else {
372 if (alcdebug)
373 printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev));
374 }
375 if (eeprom != 0) {
376 switch (sc->alc_ident->deviceid) {
377 case PCI_PRODUCT_ATTANSIC_AR8131:
378 case PCI_PRODUCT_ATTANSIC_AR8132:
379 if ((opt & OPT_CFG_CLK_ENB) != 0) {
380 opt &= ~OPT_CFG_CLK_ENB;
381 CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
382 CSR_READ_4(sc, ALC_OPT_CFG);
383 DELAY(1000);
384 }
385 break;
386 case PCI_PRODUCT_ATTANSIC_AR8151:
387 case PCI_PRODUCT_ATTANSIC_AR8151_V2:
388 case PCI_PRODUCT_ATTANSIC_AR8152_B:
389 case PCI_PRODUCT_ATTANSIC_AR8152_B2:
390 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
391 ALC_MII_DBG_ADDR, 0x00);
392 val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
393 ALC_MII_DBG_DATA);
394 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
395 ALC_MII_DBG_DATA, val | 0x0080);
396 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
397 ALC_MII_DBG_ADDR, 0x3B);
398 val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
399 ALC_MII_DBG_DATA);
400 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
401 ALC_MII_DBG_DATA, val & 0xFFF7);
402 DELAY(20);
403 break;
404 }
405 }
406
407 ea[0] = CSR_READ_4(sc, ALC_PAR0);
408 ea[1] = CSR_READ_4(sc, ALC_PAR1);
409 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
410 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
411 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
412 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
413 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
414 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
415 }
416
417 static void
418 alc_disable_l0s_l1(struct alc_softc *sc)
419 {
420 uint32_t pmcfg;
421
422 /* Another magic from vendor. */
423 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
424 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
425 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
426 PM_CFG_SERDES_PD_EX_L1);
427 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
428 PM_CFG_SERDES_L1_ENB;
429 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
430 }
431
432 static void
433 alc_phy_reset(struct alc_softc *sc)
434 {
435 uint16_t data;
436
437 /* Reset magic from Linux. */
438 CSR_WRITE_2(sc, ALC_GPHY_CFG,
439 GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
440 CSR_READ_2(sc, ALC_GPHY_CFG);
441 DELAY(10 * 1000);
442
443 CSR_WRITE_2(sc, ALC_GPHY_CFG,
444 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
445 GPHY_CFG_SEL_ANA_RESET);
446 CSR_READ_2(sc, ALC_GPHY_CFG);
447 DELAY(10 * 1000);
448
449 /* DSP fixup, Vendor magic. */
450 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
451 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
452 ALC_MII_DBG_ADDR, 0x000A);
453 data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
454 ALC_MII_DBG_DATA);
455 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
456 ALC_MII_DBG_DATA, data & 0xDFFF);
457 }
458 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
459 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
460 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
461 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
462 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
463 ALC_MII_DBG_ADDR, 0x003B);
464 data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
465 ALC_MII_DBG_DATA);
466 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
467 ALC_MII_DBG_DATA, data & 0xFFF7);
468 DELAY(20 * 1000);
469 }
470 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) {
471 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
472 ALC_MII_DBG_ADDR, 0x0029);
473 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
474 ALC_MII_DBG_DATA, 0x929D);
475 }
476 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
477 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 ||
478 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
479 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
480 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
481 ALC_MII_DBG_ADDR, 0x0029);
482 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
483 ALC_MII_DBG_DATA, 0xB6DD);
484 }
485
486 /* Load DSP codes, vendor magic. */
487 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
488 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
489 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
490 ALC_MII_DBG_ADDR, MII_ANA_CFG18);
491 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
492 ALC_MII_DBG_DATA, data);
493
494 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
495 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
496 ANA_SERDES_EN_LCKDT;
497 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
498 ALC_MII_DBG_ADDR, MII_ANA_CFG5);
499 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
500 ALC_MII_DBG_DATA, data);
501
502 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
503 ANA_LONG_CABLE_TH_100_MASK) |
504 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
505 ANA_SHORT_CABLE_TH_100_SHIFT) |
506 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
507 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
508 ALC_MII_DBG_ADDR, MII_ANA_CFG54);
509 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
510 ALC_MII_DBG_DATA, data);
511
512 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
513 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
514 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
515 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
516 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
517 ALC_MII_DBG_ADDR, MII_ANA_CFG4);
518 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
519 ALC_MII_DBG_DATA, data);
520
521 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
522 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
523 ANA_OEN_125M;
524 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
525 ALC_MII_DBG_ADDR, MII_ANA_CFG0);
526 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
527 ALC_MII_DBG_DATA, data);
528 DELAY(1000);
529 }
530
531 static void
532 alc_phy_down(struct alc_softc *sc)
533 {
534 switch (sc->alc_ident->deviceid) {
535 case PCI_PRODUCT_ATTANSIC_AR8151:
536 case PCI_PRODUCT_ATTANSIC_AR8151_V2:
537 /*
538 * GPHY power down caused more problems on AR8151 v2.0.
539 * When driver is reloaded after GPHY power down,
540 * accesses to PHY/MAC registers hung the system. Only
541 * cold boot recovered from it. I'm not sure whether
542 * AR8151 v1.0 also requires this one though. I don't
543 * have AR8151 v1.0 controller in hand.
544 * The only option left is to isolate the PHY and
545 * initiates power down the PHY which in turn saves
546 * more power when driver is unloaded.
547 */
548 alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
549 MII_BMCR, BMCR_ISO | BMCR_PDOWN);
550 break;
551 default:
552 /* Force PHY down. */
553 CSR_WRITE_2(sc, ALC_GPHY_CFG,
554 GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
555 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
556 GPHY_CFG_PWDOWN_HW);
557 DELAY(1000);
558 break;
559 }
560 }
561
562 static void
563 alc_aspm(struct alc_softc *sc, int media)
564 {
565 uint32_t pmcfg;
566 uint16_t linkcfg;
567
568 pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
569 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
570 (ALC_FLAG_APS | ALC_FLAG_PCIE))
571 linkcfg = CSR_READ_2(sc, sc->alc_expcap +
572 PCI_PCIE_LCSR);
573 else
574 linkcfg = 0;
575 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
576 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
577 pmcfg |= PM_CFG_MAC_ASPM_CHK;
578 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
579 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
580
581 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
582 /* Disable extended sync except AR8152 B v1.0 */
583 linkcfg &= ~0x80;
584 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
585 sc->alc_rev == ATHEROS_AR8152_B_V10)
586 linkcfg |= 0x80;
587 CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR,
588 linkcfg);
589 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
590 PM_CFG_HOTRST);
591 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
592 PM_CFG_L1_ENTRY_TIMER_SHIFT);
593 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
594 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
595 PM_CFG_PM_REQ_TIMER_SHIFT);
596 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
597 }
598
599 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
600 if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
601 pmcfg |= PM_CFG_ASPM_L0S_ENB;
602 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
603 pmcfg |= PM_CFG_ASPM_L1_ENB;
604 if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
605 if (sc->alc_ident->deviceid ==
606 PCI_PRODUCT_ATTANSIC_AR8152_B)
607 pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
608 pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
609 PM_CFG_SERDES_PLL_L1_ENB |
610 PM_CFG_SERDES_BUDS_RX_L1_ENB);
611 pmcfg |= PM_CFG_CLK_SWH_L1;
612 if (media == IFM_100_TX || media == IFM_1000_T) {
613 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
614 switch (sc->alc_ident->deviceid) {
615 case PCI_PRODUCT_ATTANSIC_AR8152_B:
616 pmcfg |= (7 <<
617 PM_CFG_L1_ENTRY_TIMER_SHIFT);
618 break;
619 case PCI_PRODUCT_ATTANSIC_AR8152_B2:
620 case PCI_PRODUCT_ATTANSIC_AR8151_V2:
621 pmcfg |= (4 <<
622 PM_CFG_L1_ENTRY_TIMER_SHIFT);
623 break;
624 default:
625 pmcfg |= (15 <<
626 PM_CFG_L1_ENTRY_TIMER_SHIFT);
627 break;
628 }
629 }
630 } else {
631 pmcfg |= PM_CFG_SERDES_L1_ENB |
632 PM_CFG_SERDES_PLL_L1_ENB |
633 PM_CFG_SERDES_BUDS_RX_L1_ENB;
634 pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
635 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
636 }
637 } else {
638 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
639 PM_CFG_SERDES_PLL_L1_ENB);
640 pmcfg |= PM_CFG_CLK_SWH_L1;
641 if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
642 pmcfg |= PM_CFG_ASPM_L1_ENB;
643 }
644 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
645 }
646
647 static void
648 alc_attach(device_t parent, device_t self, void *aux)
649 {
650
651 struct alc_softc *sc = device_private(self);
652 struct pci_attach_args *pa = aux;
653 pci_chipset_tag_t pc = pa->pa_pc;
654 pci_intr_handle_t ih;
655 const char *intrstr;
656 struct ifnet *ifp;
657 pcireg_t memtype;
658 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
659 uint16_t burst;
660 int base, mii_flags, state, error = 0;
661 uint32_t cap, ctl, val;
662
663 sc->alc_ident = alc_find_ident(pa);
664
665 aprint_naive("\n");
666 aprint_normal(": %s\n", sc->alc_ident->name);
667
668 sc->sc_dev = self;
669 sc->sc_dmat = pa->pa_dmat;
670 sc->sc_pct = pa->pa_pc;
671 sc->sc_pcitag = pa->pa_tag;
672
673 /*
674 * Allocate IO memory
675 */
676 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
677 switch (memtype) {
678 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
679 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
680 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
681 break;
682 default:
683 aprint_error_dev(self, "invalid base address register\n");
684 break;
685 }
686
687 if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
688 &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
689 aprint_error_dev(self, "could not map mem space\n");
690 return;
691 }
692
693 if (pci_intr_map(pa, &ih) != 0) {
694 printf(": can't map interrupt\n");
695 goto fail;
696 }
697
698 /*
699 * Allocate IRQ
700 */
701 intrstr = pci_intr_string(sc->sc_pct, ih);
702 sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc);
703 if (sc->sc_irq_handle == NULL) {
704 printf(": could not establish interrupt");
705 if (intrstr != NULL)
706 printf(" at %s", intrstr);
707 printf("\n");
708 goto fail;
709 }
710 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
711
712 /* Set PHY address. */
713 sc->alc_phyaddr = ALC_PHY_ADDR;
714
715 /* Initialize DMA parameters. */
716 sc->alc_dma_rd_burst = 0;
717 sc->alc_dma_wr_burst = 0;
718 sc->alc_rcb = DMA_CFG_RCB_64;
719 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
720 &base, NULL)) {
721 sc->alc_flags |= ALC_FLAG_PCIE;
722 sc->alc_expcap = base;
723 burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
724 base + PCI_PCIE_DCSR) >> 16;
725 sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
726 sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
727 if (alcdebug) {
728 printf("%s: Read request size : %u bytes.\n",
729 device_xname(sc->sc_dev),
730 alc_dma_burst[sc->alc_dma_rd_burst]);
731 printf("%s: TLP payload size : %u bytes.\n",
732 device_xname(sc->sc_dev),
733 alc_dma_burst[sc->alc_dma_wr_burst]);
734 }
735 /* Clear data link and flow-control protocol error. */
736 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
737 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
738 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
739 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
740 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
741 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
742 CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
743 PCIE_PHYMISC_FORCE_RCV_DET);
744 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
745 sc->alc_rev == ATHEROS_AR8152_B_V10) {
746 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
747 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
748 PCIE_PHYMISC2_SERDES_TH_MASK);
749 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
750 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
751 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
752 }
753 /* Disable ASPM L0S and L1. */
754 cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
755 base + PCI_PCIE_LCAP) >> 16;
756 if ((cap & 0x00000c00) != 0) {
757 ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
758 base + PCI_PCIE_LCSR) >> 16;
759 if ((ctl & 0x08) != 0)
760 sc->alc_rcb = DMA_CFG_RCB_128;
761 if (alcdebug)
762 printf("%s: RCB %u bytes\n",
763 device_xname(sc->sc_dev),
764 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
765 state = ctl & 0x03;
766 if (state & 0x01)
767 sc->alc_flags |= ALC_FLAG_L0S;
768 if (state & 0x02)
769 sc->alc_flags |= ALC_FLAG_L1S;
770 if (alcdebug)
771 printf("%s: ASPM %s %s\n",
772 device_xname(sc->sc_dev),
773 aspm_state[state],
774 state == 0 ? "disabled" : "enabled");
775 alc_disable_l0s_l1(sc);
776 } else {
777 aprint_debug_dev(sc->sc_dev, "no ASPM support\n");
778 }
779 }
780
781 /* Reset PHY. */
782 alc_phy_reset(sc);
783
784 /* Reset the ethernet controller. */
785 alc_reset(sc);
786
787 /*
788 * One odd thing is AR8132 uses the same PHY hardware(F1
789 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
790 * the PHY supports 1000Mbps but that's not true. The PHY
791 * used in AR8132 can't establish gigabit link even if it
792 * shows the same PHY model/revision number of AR8131.
793 */
794 switch (sc->alc_ident->deviceid) {
795 case PCI_PRODUCT_ATTANSIC_AR8152_B:
796 case PCI_PRODUCT_ATTANSIC_AR8152_B2:
797 sc->alc_flags |= ALC_FLAG_APS;
798 /* FALLTHROUGH */
799 case PCI_PRODUCT_ATTANSIC_AR8132:
800 sc->alc_flags |= ALC_FLAG_FASTETHER;
801 break;
802 case PCI_PRODUCT_ATTANSIC_AR8151:
803 case PCI_PRODUCT_ATTANSIC_AR8151_V2:
804 sc->alc_flags |= ALC_FLAG_APS;
805 /* FALLTHROUGH */
806 default:
807 break;
808 }
809 sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
810
811 /*
812 * It seems that AR813x/AR815x has silicon bug for SMB. In
813 * addition, Atheros said that enabling SMB wouldn't improve
814 * performance. However I think it's bad to access lots of
815 * registers to extract MAC statistics.
816 */
817 sc->alc_flags |= ALC_FLAG_SMB_BUG;
818 /*
819 * Don't use Tx CMB. It is known to have silicon bug.
820 */
821 sc->alc_flags |= ALC_FLAG_CMB_BUG;
822 sc->alc_rev = PCI_REVISION(pa->pa_class);
823 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
824 MASTER_CHIP_REV_SHIFT;
825 if (alcdebug) {
826 printf("%s: PCI device revision : 0x%04x\n",
827 device_xname(sc->sc_dev), sc->alc_rev);
828 printf("%s: Chip id/revision : 0x%04x\n",
829 device_xname(sc->sc_dev), sc->alc_chip_rev);
830 printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev),
831 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
832 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
833 }
834
835 error = alc_dma_alloc(sc);
836 if (error)
837 goto fail;
838
839 callout_init(&sc->sc_tick_ch, 0);
840 callout_setfunc(&sc->sc_tick_ch, alc_tick, sc);
841
842 /* Load station address. */
843 alc_get_macaddr(sc);
844
845 aprint_normal_dev(self, "Ethernet address %s\n",
846 ether_sprintf(sc->alc_eaddr));
847
848 ifp = &sc->sc_ec.ec_if;
849 ifp->if_softc = sc;
850 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
851 ifp->if_init = alc_init;
852 ifp->if_ioctl = alc_ioctl;
853 ifp->if_start = alc_start;
854 ifp->if_stop = alc_stop;
855 ifp->if_watchdog = alc_watchdog;
856 ifp->if_baudrate = IF_Gbps(1);
857 IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
858 IFQ_SET_READY(&ifp->if_snd);
859 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
860
861 sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
862
863 #ifdef ALC_CHECKSUM
864 ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
865 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
866 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
867 #endif
868
869 #if NVLAN > 0
870 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
871 #endif
872
873 /* Set up MII bus. */
874 sc->sc_miibus.mii_ifp = ifp;
875 sc->sc_miibus.mii_readreg = alc_miibus_readreg;
876 sc->sc_miibus.mii_writereg = alc_miibus_writereg;
877 sc->sc_miibus.mii_statchg = alc_miibus_statchg;
878
879 sc->sc_ec.ec_mii = &sc->sc_miibus;
880 ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
881 alc_mediastatus);
882 mii_flags = 0;
883 if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0)
884 mii_flags |= MIIF_DOPAUSE;
885 mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
886 MII_OFFSET_ANY, mii_flags);
887
888 if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
889 printf("%s: no PHY found!\n", device_xname(sc->sc_dev));
890 ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
891 0, NULL);
892 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
893 } else
894 ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
895
896 if_attach(ifp);
897 ether_ifattach(ifp, sc->alc_eaddr);
898
899 if (!pmf_device_register(self, NULL, NULL))
900 aprint_error_dev(self, "couldn't establish power handler\n");
901 else
902 pmf_class_network_register(self, ifp);
903
904 return;
905 fail:
906 alc_dma_free(sc);
907 if (sc->sc_irq_handle != NULL) {
908 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
909 sc->sc_irq_handle = NULL;
910 }
911 if (sc->sc_mem_size) {
912 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
913 sc->sc_mem_size = 0;
914 }
915 }
916
917 static int
918 alc_detach(device_t self, int flags)
919 {
920 struct alc_softc *sc = device_private(self);
921 struct ifnet *ifp = &sc->sc_ec.ec_if;
922 int s;
923
924 s = splnet();
925 alc_stop(ifp, 0);
926 splx(s);
927
928 mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
929
930 /* Delete all remaining media. */
931 ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
932
933 ether_ifdetach(ifp);
934 if_detach(ifp);
935 alc_dma_free(sc);
936
937 alc_phy_down(sc);
938 if (sc->sc_irq_handle != NULL) {
939 pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
940 sc->sc_irq_handle = NULL;
941 }
942 if (sc->sc_mem_size) {
943 bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
944 sc->sc_mem_size = 0;
945 }
946
947 return (0);
948 }
949
950 static int
951 alc_dma_alloc(struct alc_softc *sc)
952 {
953 struct alc_txdesc *txd;
954 struct alc_rxdesc *rxd;
955 int nsegs, error, i;
956
957 /*
958 * Create DMA stuffs for TX ring
959 */
960 error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
961 ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
962 if (error) {
963 sc->alc_cdata.alc_tx_ring_map = NULL;
964 return (ENOBUFS);
965 }
966
967 /* Allocate DMA'able memory for TX ring */
968 error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
969 ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
970 &nsegs, BUS_DMA_NOWAIT);
971 if (error) {
972 printf("%s: could not allocate DMA'able memory for Tx ring.\n",
973 device_xname(sc->sc_dev));
974 return error;
975 }
976
977 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
978 nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring,
979 BUS_DMA_NOWAIT);
980 if (error)
981 return (ENOBUFS);
982
983 /* Load the DMA map for Tx ring. */
984 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
985 sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
986 if (error) {
987 printf("%s: could not load DMA'able memory for Tx ring.\n",
988 device_xname(sc->sc_dev));
989 bus_dmamem_free(sc->sc_dmat,
990 &sc->alc_rdata.alc_tx_ring_seg, 1);
991 return error;
992 }
993
994 sc->alc_rdata.alc_tx_ring_paddr =
995 sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
996
997 /*
998 * Create DMA stuffs for RX ring
999 */
1000 error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
1001 ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1002 if (error)
1003 return (ENOBUFS);
1004
1005 /* Allocate DMA'able memory for RX ring */
1006 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1007 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1008 &nsegs, BUS_DMA_NOWAIT);
1009 if (error) {
1010 printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1011 device_xname(sc->sc_dev));
1012 return error;
1013 }
1014
1015 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1016 nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring,
1017 BUS_DMA_NOWAIT);
1018 if (error)
1019 return (ENOBUFS);
1020
1021 /* Load the DMA map for Rx ring. */
1022 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1023 sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1024 if (error) {
1025 printf("%s: could not load DMA'able memory for Rx ring.\n",
1026 device_xname(sc->sc_dev));
1027 bus_dmamem_free(sc->sc_dmat,
1028 &sc->alc_rdata.alc_rx_ring_seg, 1);
1029 return error;
1030 }
1031
1032 sc->alc_rdata.alc_rx_ring_paddr =
1033 sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1034
1035 /*
1036 * Create DMA stuffs for RX return ring
1037 */
1038 error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1039 ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1040 if (error)
1041 return (ENOBUFS);
1042
1043 /* Allocate DMA'able memory for RX return ring */
1044 error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1045 ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1046 &nsegs, BUS_DMA_NOWAIT);
1047 if (error) {
1048 printf("%s: could not allocate DMA'able memory for Rx "
1049 "return ring.\n", device_xname(sc->sc_dev));
1050 return error;
1051 }
1052
1053 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1054 nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring,
1055 BUS_DMA_NOWAIT);
1056 if (error)
1057 return (ENOBUFS);
1058
1059 /* Load the DMA map for Rx return ring. */
1060 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1061 sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1062 if (error) {
1063 printf("%s: could not load DMA'able memory for Rx return ring."
1064 "\n", device_xname(sc->sc_dev));
1065 bus_dmamem_free(sc->sc_dmat,
1066 &sc->alc_rdata.alc_rr_ring_seg, 1);
1067 return error;
1068 }
1069
1070 sc->alc_rdata.alc_rr_ring_paddr =
1071 sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1072
1073 /*
1074 * Create DMA stuffs for CMB block
1075 */
1076 error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1077 ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1078 &sc->alc_cdata.alc_cmb_map);
1079 if (error)
1080 return (ENOBUFS);
1081
1082 /* Allocate DMA'able memory for CMB block */
1083 error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1084 ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1085 &nsegs, BUS_DMA_NOWAIT);
1086 if (error) {
1087 printf("%s: could not allocate DMA'able memory for "
1088 "CMB block\n", device_xname(sc->sc_dev));
1089 return error;
1090 }
1091
1092 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1093 nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb,
1094 BUS_DMA_NOWAIT);
1095 if (error)
1096 return (ENOBUFS);
1097
1098 /* Load the DMA map for CMB block. */
1099 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1100 sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1101 BUS_DMA_WAITOK);
1102 if (error) {
1103 printf("%s: could not load DMA'able memory for CMB block\n",
1104 device_xname(sc->sc_dev));
1105 bus_dmamem_free(sc->sc_dmat,
1106 &sc->alc_rdata.alc_cmb_seg, 1);
1107 return error;
1108 }
1109
1110 sc->alc_rdata.alc_cmb_paddr =
1111 sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1112
1113 /*
1114 * Create DMA stuffs for SMB block
1115 */
1116 error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1117 ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1118 &sc->alc_cdata.alc_smb_map);
1119 if (error)
1120 return (ENOBUFS);
1121
1122 /* Allocate DMA'able memory for SMB block */
1123 error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1124 ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1125 &nsegs, BUS_DMA_NOWAIT);
1126 if (error) {
1127 printf("%s: could not allocate DMA'able memory for "
1128 "SMB block\n", device_xname(sc->sc_dev));
1129 return error;
1130 }
1131
1132 error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1133 nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb,
1134 BUS_DMA_NOWAIT);
1135 if (error)
1136 return (ENOBUFS);
1137
1138 /* Load the DMA map for SMB block */
1139 error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1140 sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1141 BUS_DMA_WAITOK);
1142 if (error) {
1143 printf("%s: could not load DMA'able memory for SMB block\n",
1144 device_xname(sc->sc_dev));
1145 bus_dmamem_free(sc->sc_dmat,
1146 &sc->alc_rdata.alc_smb_seg, 1);
1147 return error;
1148 }
1149
1150 sc->alc_rdata.alc_smb_paddr =
1151 sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1152
1153
1154 /* Create DMA maps for Tx buffers. */
1155 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1156 txd = &sc->alc_cdata.alc_txdesc[i];
1157 txd->tx_m = NULL;
1158 txd->tx_dmamap = NULL;
1159 error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1160 ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1161 &txd->tx_dmamap);
1162 if (error) {
1163 printf("%s: could not create Tx dmamap.\n",
1164 device_xname(sc->sc_dev));
1165 return error;
1166 }
1167 }
1168
1169 /* Create DMA maps for Rx buffers. */
1170 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1171 BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1172 if (error) {
1173 printf("%s: could not create spare Rx dmamap.\n",
1174 device_xname(sc->sc_dev));
1175 return error;
1176 }
1177
1178 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1179 rxd = &sc->alc_cdata.alc_rxdesc[i];
1180 rxd->rx_m = NULL;
1181 rxd->rx_dmamap = NULL;
1182 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1183 MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1184 if (error) {
1185 printf("%s: could not create Rx dmamap.\n",
1186 device_xname(sc->sc_dev));
1187 return error;
1188 }
1189 }
1190
1191 return (0);
1192 }
1193
1194
1195 static void
1196 alc_dma_free(struct alc_softc *sc)
1197 {
1198 struct alc_txdesc *txd;
1199 struct alc_rxdesc *rxd;
1200 int i;
1201
1202 /* Tx buffers */
1203 for (i = 0; i < ALC_TX_RING_CNT; i++) {
1204 txd = &sc->alc_cdata.alc_txdesc[i];
1205 if (txd->tx_dmamap != NULL) {
1206 bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1207 txd->tx_dmamap = NULL;
1208 }
1209 }
1210 /* Rx buffers */
1211 for (i = 0; i < ALC_RX_RING_CNT; i++) {
1212 rxd = &sc->alc_cdata.alc_rxdesc[i];
1213 if (rxd->rx_dmamap != NULL) {
1214 bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1215 rxd->rx_dmamap = NULL;
1216 }
1217 }
1218 if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1219 bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1220 sc->alc_cdata.alc_rx_sparemap = NULL;
1221 }
1222
1223 /* Tx ring. */
1224 if (sc->alc_cdata.alc_tx_ring_map != NULL)
1225 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1226 if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1227 sc->alc_rdata.alc_tx_ring != NULL)
1228 bus_dmamem_free(sc->sc_dmat,
1229 &sc->alc_rdata.alc_tx_ring_seg, 1);
1230 sc->alc_rdata.alc_tx_ring = NULL;
1231 sc->alc_cdata.alc_tx_ring_map = NULL;
1232
1233 /* Rx ring. */
1234 if (sc->alc_cdata.alc_rx_ring_map != NULL)
1235 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1236 if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1237 sc->alc_rdata.alc_rx_ring != NULL)
1238 bus_dmamem_free(sc->sc_dmat,
1239 &sc->alc_rdata.alc_rx_ring_seg, 1);
1240 sc->alc_rdata.alc_rx_ring = NULL;
1241 sc->alc_cdata.alc_rx_ring_map = NULL;
1242
1243 /* Rx return ring. */
1244 if (sc->alc_cdata.alc_rr_ring_map != NULL)
1245 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1246 if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1247 sc->alc_rdata.alc_rr_ring != NULL)
1248 bus_dmamem_free(sc->sc_dmat,
1249 &sc->alc_rdata.alc_rr_ring_seg, 1);
1250 sc->alc_rdata.alc_rr_ring = NULL;
1251 sc->alc_cdata.alc_rr_ring_map = NULL;
1252
1253 /* CMB block */
1254 if (sc->alc_cdata.alc_cmb_map != NULL)
1255 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1256 if (sc->alc_cdata.alc_cmb_map != NULL &&
1257 sc->alc_rdata.alc_cmb != NULL)
1258 bus_dmamem_free(sc->sc_dmat,
1259 &sc->alc_rdata.alc_cmb_seg, 1);
1260 sc->alc_rdata.alc_cmb = NULL;
1261 sc->alc_cdata.alc_cmb_map = NULL;
1262
1263 /* SMB block */
1264 if (sc->alc_cdata.alc_smb_map != NULL)
1265 bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1266 if (sc->alc_cdata.alc_smb_map != NULL &&
1267 sc->alc_rdata.alc_smb != NULL)
1268 bus_dmamem_free(sc->sc_dmat,
1269 &sc->alc_rdata.alc_smb_seg, 1);
1270 sc->alc_rdata.alc_smb = NULL;
1271 sc->alc_cdata.alc_smb_map = NULL;
1272 }
1273
1274 static int
1275 alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1276 {
1277 struct alc_txdesc *txd, *txd_last;
1278 struct tx_desc *desc;
1279 struct mbuf *m;
1280 bus_dmamap_t map;
1281 uint32_t cflags, poff, vtag;
1282 int error, idx, nsegs, prod;
1283 #if NVLAN > 0
1284 struct m_tag *mtag;
1285 #endif
1286
1287 m = *m_head;
1288 cflags = vtag = 0;
1289 poff = 0;
1290
1291 prod = sc->alc_cdata.alc_tx_prod;
1292 txd = &sc->alc_cdata.alc_txdesc[prod];
1293 txd_last = txd;
1294 map = txd->tx_dmamap;
1295
1296 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1297
1298 if (error == EFBIG) {
1299 error = 0;
1300
1301 *m_head = m_pullup(*m_head, MHLEN);
1302 if (*m_head == NULL) {
1303 printf("%s: can't defrag TX mbuf\n",
1304 device_xname(sc->sc_dev));
1305 return ENOBUFS;
1306 }
1307
1308 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1309 BUS_DMA_NOWAIT);
1310
1311 if (error != 0) {
1312 printf("%s: could not load defragged TX mbuf\n",
1313 device_xname(sc->sc_dev));
1314 m_freem(*m_head);
1315 *m_head = NULL;
1316 return error;
1317 }
1318 } else if (error) {
1319 printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1320 return (error);
1321 }
1322
1323 nsegs = map->dm_nsegs;
1324
1325 if (nsegs == 0) {
1326 m_freem(*m_head);
1327 *m_head = NULL;
1328 return (EIO);
1329 }
1330
1331 /* Check descriptor overrun. */
1332 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1333 bus_dmamap_unload(sc->sc_dmat, map);
1334 return (ENOBUFS);
1335 }
1336 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1337 BUS_DMASYNC_PREWRITE);
1338
1339 m = *m_head;
1340 desc = NULL;
1341 idx = 0;
1342 #if NVLAN > 0
1343 /* Configure VLAN hardware tag insertion. */
1344 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1345 vtag = htons(VLAN_TAG_VALUE(mtag));
1346 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1347 cflags |= TD_INS_VLAN_TAG;
1348 }
1349 #endif
1350 /* Configure Tx checksum offload. */
1351 if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1352 cflags |= TD_CUSTOM_CSUM;
1353 /* Set checksum start offset. */
1354 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1355 TD_PLOAD_OFFSET_MASK;
1356 }
1357 for (; idx < nsegs; idx++) {
1358 desc = &sc->alc_rdata.alc_tx_ring[prod];
1359 desc->len =
1360 htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1361 desc->flags = htole32(cflags);
1362 desc->addr = htole64(map->dm_segs[idx].ds_addr);
1363 sc->alc_cdata.alc_tx_cnt++;
1364 ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1365 }
1366 /* Update producer index. */
1367 sc->alc_cdata.alc_tx_prod = prod;
1368
1369 /* Finally set EOP on the last descriptor. */
1370 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1371 desc = &sc->alc_rdata.alc_tx_ring[prod];
1372 desc->flags |= htole32(TD_EOP);
1373
1374 /* Swap dmamap of the first and the last. */
1375 txd = &sc->alc_cdata.alc_txdesc[prod];
1376 map = txd_last->tx_dmamap;
1377 txd_last->tx_dmamap = txd->tx_dmamap;
1378 txd->tx_dmamap = map;
1379 txd->tx_m = m;
1380
1381 return (0);
1382 }
1383
1384 static void
1385 alc_start(struct ifnet *ifp)
1386 {
1387 struct alc_softc *sc = ifp->if_softc;
1388 struct mbuf *m_head;
1389 int enq;
1390
1391 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1392 return;
1393
1394 /* Reclaim transmitted frames. */
1395 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1396 alc_txeof(sc);
1397
1398 enq = 0;
1399 for (;;) {
1400 IFQ_DEQUEUE(&ifp->if_snd, m_head);
1401 if (m_head == NULL)
1402 break;
1403
1404 /*
1405 * Pack the data into the transmit ring. If we
1406 * don't have room, set the OACTIVE flag and wait
1407 * for the NIC to drain the ring.
1408 */
1409 if (alc_encap(sc, &m_head)) {
1410 if (m_head == NULL)
1411 break;
1412 ifp->if_flags |= IFF_OACTIVE;
1413 break;
1414 }
1415 enq = 1;
1416
1417 /*
1418 * If there's a BPF listener, bounce a copy of this frame
1419 * to him.
1420 */
1421 bpf_mtap(ifp, m_head);
1422 }
1423
1424 if (enq) {
1425 /* Sync descriptors. */
1426 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1427 sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1428 BUS_DMASYNC_PREWRITE);
1429 /* Kick. Assume we're using normal Tx priority queue. */
1430 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1431 (sc->alc_cdata.alc_tx_prod <<
1432 MBOX_TD_PROD_LO_IDX_SHIFT) &
1433 MBOX_TD_PROD_LO_IDX_MASK);
1434 /* Set a timeout in case the chip goes out to lunch. */
1435 ifp->if_timer = ALC_TX_TIMEOUT;
1436 }
1437 }
1438
1439 static void
1440 alc_watchdog(struct ifnet *ifp)
1441 {
1442 struct alc_softc *sc = ifp->if_softc;
1443
1444 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1445 printf("%s: watchdog timeout (missed link)\n",
1446 device_xname(sc->sc_dev));
1447 ifp->if_oerrors++;
1448 alc_init(ifp);
1449 return;
1450 }
1451
1452 printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1453 ifp->if_oerrors++;
1454 alc_init(ifp);
1455
1456 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1457 alc_start(ifp);
1458 }
1459
1460 static int
1461 alc_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1462 {
1463 struct alc_softc *sc = ifp->if_softc;
1464 int s, error = 0;
1465
1466 s = splnet();
1467
1468 error = ether_ioctl(ifp, cmd, data);
1469 if (error == ENETRESET) {
1470 if (ifp->if_flags & IFF_RUNNING)
1471 alc_iff(sc);
1472 error = 0;
1473 }
1474
1475 splx(s);
1476 return (error);
1477 }
1478
1479 static void
1480 alc_mac_config(struct alc_softc *sc)
1481 {
1482 struct mii_data *mii;
1483 uint32_t reg;
1484
1485 mii = &sc->sc_miibus;
1486 reg = CSR_READ_4(sc, ALC_MAC_CFG);
1487 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1488 MAC_CFG_SPEED_MASK);
1489 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
1490 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
1491 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
1492 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
1493 /* Reprogram MAC with resolved speed/duplex. */
1494 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1495 case IFM_10_T:
1496 case IFM_100_TX:
1497 reg |= MAC_CFG_SPEED_10_100;
1498 break;
1499 case IFM_1000_T:
1500 reg |= MAC_CFG_SPEED_1000;
1501 break;
1502 }
1503 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1504 reg |= MAC_CFG_FULL_DUPLEX;
1505 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1506 reg |= MAC_CFG_TX_FC;
1507 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1508 reg |= MAC_CFG_RX_FC;
1509 }
1510 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1511 }
1512
1513 static void
1514 alc_stats_clear(struct alc_softc *sc)
1515 {
1516 struct smb sb, *smb;
1517 uint32_t *reg;
1518 int i;
1519
1520 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1521 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1522 sc->alc_cdata.alc_smb_map->dm_mapsize,
1523 BUS_DMASYNC_POSTREAD);
1524 smb = sc->alc_rdata.alc_smb;
1525 /* Update done, clear. */
1526 smb->updated = 0;
1527 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1528 sc->alc_cdata.alc_smb_map->dm_mapsize,
1529 BUS_DMASYNC_PREWRITE);
1530 } else {
1531 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1532 reg++) {
1533 CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1534 i += sizeof(uint32_t);
1535 }
1536 /* Read Tx statistics. */
1537 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1538 reg++) {
1539 CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1540 i += sizeof(uint32_t);
1541 }
1542 }
1543 }
1544
1545 static void
1546 alc_stats_update(struct alc_softc *sc)
1547 {
1548 struct ifnet *ifp = &sc->sc_ec.ec_if;
1549 struct alc_hw_stats *stat;
1550 struct smb sb, *smb;
1551 uint32_t *reg;
1552 int i;
1553
1554 stat = &sc->alc_stats;
1555 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1556 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1557 sc->alc_cdata.alc_smb_map->dm_mapsize,
1558 BUS_DMASYNC_POSTREAD);
1559 smb = sc->alc_rdata.alc_smb;
1560 if (smb->updated == 0)
1561 return;
1562 } else {
1563 smb = &sb;
1564 /* Read Rx statistics. */
1565 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1566 reg++) {
1567 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1568 i += sizeof(uint32_t);
1569 }
1570 /* Read Tx statistics. */
1571 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1572 reg++) {
1573 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1574 i += sizeof(uint32_t);
1575 }
1576 }
1577
1578 /* Rx stats. */
1579 stat->rx_frames += smb->rx_frames;
1580 stat->rx_bcast_frames += smb->rx_bcast_frames;
1581 stat->rx_mcast_frames += smb->rx_mcast_frames;
1582 stat->rx_pause_frames += smb->rx_pause_frames;
1583 stat->rx_control_frames += smb->rx_control_frames;
1584 stat->rx_crcerrs += smb->rx_crcerrs;
1585 stat->rx_lenerrs += smb->rx_lenerrs;
1586 stat->rx_bytes += smb->rx_bytes;
1587 stat->rx_runts += smb->rx_runts;
1588 stat->rx_fragments += smb->rx_fragments;
1589 stat->rx_pkts_64 += smb->rx_pkts_64;
1590 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1591 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1592 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1593 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1594 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1595 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1596 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1597 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1598 stat->rx_rrs_errs += smb->rx_rrs_errs;
1599 stat->rx_alignerrs += smb->rx_alignerrs;
1600 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1601 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1602 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1603
1604 /* Tx stats. */
1605 stat->tx_frames += smb->tx_frames;
1606 stat->tx_bcast_frames += smb->tx_bcast_frames;
1607 stat->tx_mcast_frames += smb->tx_mcast_frames;
1608 stat->tx_pause_frames += smb->tx_pause_frames;
1609 stat->tx_excess_defer += smb->tx_excess_defer;
1610 stat->tx_control_frames += smb->tx_control_frames;
1611 stat->tx_deferred += smb->tx_deferred;
1612 stat->tx_bytes += smb->tx_bytes;
1613 stat->tx_pkts_64 += smb->tx_pkts_64;
1614 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1615 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1616 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1617 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1618 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1619 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1620 stat->tx_single_colls += smb->tx_single_colls;
1621 stat->tx_multi_colls += smb->tx_multi_colls;
1622 stat->tx_late_colls += smb->tx_late_colls;
1623 stat->tx_excess_colls += smb->tx_excess_colls;
1624 stat->tx_abort += smb->tx_abort;
1625 stat->tx_underrun += smb->tx_underrun;
1626 stat->tx_desc_underrun += smb->tx_desc_underrun;
1627 stat->tx_lenerrs += smb->tx_lenerrs;
1628 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1629 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1630 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1631
1632 /* Update counters in ifnet. */
1633 ifp->if_opackets += smb->tx_frames;
1634
1635 ifp->if_collisions += smb->tx_single_colls +
1636 smb->tx_multi_colls * 2 + smb->tx_late_colls +
1637 smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
1638
1639 /*
1640 * XXX
1641 * tx_pkts_truncated counter looks suspicious. It constantly
1642 * increments with no sign of Tx errors. This may indicate
1643 * the counter name is not correct one so I've removed the
1644 * counter in output errors.
1645 */
1646 ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
1647 smb->tx_underrun;
1648
1649 ifp->if_ipackets += smb->rx_frames;
1650
1651 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1652 smb->rx_runts + smb->rx_pkts_truncated +
1653 smb->rx_fifo_oflows + smb->rx_rrs_errs +
1654 smb->rx_alignerrs;
1655
1656 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1657 /* Update done, clear. */
1658 smb->updated = 0;
1659 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1660 sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1661 }
1662 }
1663
1664 static int
1665 alc_intr(void *arg)
1666 {
1667 struct alc_softc *sc = arg;
1668 struct ifnet *ifp = &sc->sc_ec.ec_if;
1669 uint32_t status;
1670
1671 status = CSR_READ_4(sc, ALC_INTR_STATUS);
1672 if ((status & ALC_INTRS) == 0)
1673 return (0);
1674
1675 /* Acknowledge and disable interrupts. */
1676 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
1677
1678 if (ifp->if_flags & IFF_RUNNING) {
1679 if (status & INTR_RX_PKT) {
1680 int error;
1681
1682 error = alc_rxintr(sc);
1683 if (error) {
1684 alc_init(ifp);
1685 return (0);
1686 }
1687 }
1688
1689 if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
1690 INTR_TXQ_TO_RST)) {
1691 if (status & INTR_DMA_RD_TO_RST)
1692 printf("%s: DMA read error! -- resetting\n",
1693 device_xname(sc->sc_dev));
1694 if (status & INTR_DMA_WR_TO_RST)
1695 printf("%s: DMA write error! -- resetting\n",
1696 device_xname(sc->sc_dev));
1697 if (status & INTR_TXQ_TO_RST)
1698 printf("%s: TxQ reset! -- resetting\n",
1699 device_xname(sc->sc_dev));
1700 alc_init(ifp);
1701 return (0);
1702 }
1703
1704 alc_txeof(sc);
1705 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1706 alc_start(ifp);
1707 }
1708
1709 /* Re-enable interrupts. */
1710 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
1711 return (1);
1712 }
1713
1714 static void
1715 alc_txeof(struct alc_softc *sc)
1716 {
1717 struct ifnet *ifp = &sc->sc_ec.ec_if;
1718 struct alc_txdesc *txd;
1719 uint32_t cons, prod;
1720 int prog;
1721
1722 if (sc->alc_cdata.alc_tx_cnt == 0)
1723 return;
1724 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1725 sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1726 BUS_DMASYNC_POSTREAD);
1727 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
1728 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1729 sc->alc_cdata.alc_cmb_map->dm_mapsize,
1730 BUS_DMASYNC_POSTREAD);
1731 prod = sc->alc_rdata.alc_cmb->cons;
1732 } else
1733 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
1734 /* Assume we're using normal Tx priority queue. */
1735 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
1736 MBOX_TD_CONS_LO_IDX_SHIFT;
1737 cons = sc->alc_cdata.alc_tx_cons;
1738 /*
1739 * Go through our Tx list and free mbufs for those
1740 * frames which have been transmitted.
1741 */
1742 for (prog = 0; cons != prod; prog++,
1743 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
1744 if (sc->alc_cdata.alc_tx_cnt <= 0)
1745 break;
1746 prog++;
1747 ifp->if_flags &= ~IFF_OACTIVE;
1748 sc->alc_cdata.alc_tx_cnt--;
1749 txd = &sc->alc_cdata.alc_txdesc[cons];
1750 if (txd->tx_m != NULL) {
1751 /* Reclaim transmitted mbufs. */
1752 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1753 m_freem(txd->tx_m);
1754 txd->tx_m = NULL;
1755 }
1756 }
1757
1758 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1759 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1760 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1761 sc->alc_cdata.alc_tx_cons = cons;
1762 /*
1763 * Unarm watchdog timer only when there is no pending
1764 * frames in Tx queue.
1765 */
1766 if (sc->alc_cdata.alc_tx_cnt == 0)
1767 ifp->if_timer = 0;
1768 }
1769
1770 static int
1771 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, int init)
1772 {
1773 struct mbuf *m;
1774 bus_dmamap_t map;
1775 int error;
1776
1777 MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1778 if (m == NULL)
1779 return (ENOBUFS);
1780 MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
1781 if (!(m->m_flags & M_EXT)) {
1782 m_freem(m);
1783 return (ENOBUFS);
1784 }
1785
1786 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
1787
1788 error = bus_dmamap_load_mbuf(sc->sc_dmat,
1789 sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
1790
1791 if (error != 0) {
1792 if (!error) {
1793 bus_dmamap_unload(sc->sc_dmat,
1794 sc->alc_cdata.alc_rx_sparemap);
1795 error = EFBIG;
1796 printf("%s: too many segments?!\n",
1797 device_xname(sc->sc_dev));
1798 }
1799 m_freem(m);
1800
1801 if (init)
1802 printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
1803
1804 return (error);
1805 }
1806
1807 if (rxd->rx_m != NULL) {
1808 bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1809 rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1810 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1811 }
1812 map = rxd->rx_dmamap;
1813 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
1814 sc->alc_cdata.alc_rx_sparemap = map;
1815 rxd->rx_m = m;
1816 rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
1817 return (0);
1818 }
1819
1820 static int
1821 alc_rxintr(struct alc_softc *sc)
1822 {
1823 struct ifnet *ifp = &sc->sc_ec.ec_if;
1824 struct rx_rdesc *rrd;
1825 uint32_t nsegs, status;
1826 int rr_cons, prog;
1827
1828 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1829 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1830 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1831 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1832 rr_cons = sc->alc_cdata.alc_rr_cons;
1833 for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
1834 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
1835 status = le32toh(rrd->status);
1836 if ((status & RRD_VALID) == 0)
1837 break;
1838 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
1839 if (nsegs == 0) {
1840 /* This should not happen! */
1841 if (alcdebug)
1842 printf("%s: unexpected segment count -- "
1843 "resetting\n", device_xname(sc->sc_dev));
1844 return (EIO);
1845 }
1846 alc_rxeof(sc, rrd);
1847 /* Clear Rx return status. */
1848 rrd->status = 0;
1849 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
1850 sc->alc_cdata.alc_rx_cons += nsegs;
1851 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
1852 prog += nsegs;
1853 }
1854
1855 if (prog > 0) {
1856 /* Update the consumer index. */
1857 sc->alc_cdata.alc_rr_cons = rr_cons;
1858 /* Sync Rx return descriptors. */
1859 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1860 sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1861 BUS_DMASYNC_PREWRITE);
1862 /*
1863 * Sync updated Rx descriptors such that controller see
1864 * modified buffer addresses.
1865 */
1866 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1867 sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1868 BUS_DMASYNC_PREWRITE);
1869 /*
1870 * Let controller know availability of new Rx buffers.
1871 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
1872 * it may be possible to update ALC_MBOX_RD0_PROD_IDX
1873 * only when Rx buffer pre-fetching is required. In
1874 * addition we already set ALC_RX_RD_FREE_THRESH to
1875 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
1876 * it still seems that pre-fetching needs more
1877 * experimentation.
1878 */
1879 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
1880 sc->alc_cdata.alc_rx_cons);
1881 }
1882
1883 return (0);
1884 }
1885
1886 /* Receive a frame. */
1887 static void
1888 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
1889 {
1890 struct ifnet *ifp = &sc->sc_ec.ec_if;
1891 struct alc_rxdesc *rxd;
1892 struct mbuf *mp, *m;
1893 uint32_t rdinfo, status;
1894 int count, nsegs, rx_cons;
1895
1896 status = le32toh(rrd->status);
1897 rdinfo = le32toh(rrd->rdinfo);
1898 rx_cons = RRD_RD_IDX(rdinfo);
1899 nsegs = RRD_RD_CNT(rdinfo);
1900
1901 sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
1902 if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
1903 /*
1904 * We want to pass the following frames to upper
1905 * layer regardless of error status of Rx return
1906 * ring.
1907 *
1908 * o IP/TCP/UDP checksum is bad.
1909 * o frame length and protocol specific length
1910 * does not match.
1911 *
1912 * Force network stack compute checksum for
1913 * errored frames.
1914 */
1915 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
1916 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
1917 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
1918 return;
1919 }
1920
1921 for (count = 0; count < nsegs; count++,
1922 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
1923 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
1924 mp = rxd->rx_m;
1925 /* Add a new receive buffer to the ring. */
1926 if (alc_newbuf(sc, rxd, 0) != 0) {
1927 ifp->if_iqdrops++;
1928 /* Reuse Rx buffers. */
1929 if (sc->alc_cdata.alc_rxhead != NULL)
1930 m_freem(sc->alc_cdata.alc_rxhead);
1931 break;
1932 }
1933
1934 /*
1935 * Assume we've received a full sized frame.
1936 * Actual size is fixed when we encounter the end of
1937 * multi-segmented frame.
1938 */
1939 mp->m_len = sc->alc_buf_size;
1940
1941 /* Chain received mbufs. */
1942 if (sc->alc_cdata.alc_rxhead == NULL) {
1943 sc->alc_cdata.alc_rxhead = mp;
1944 sc->alc_cdata.alc_rxtail = mp;
1945 } else {
1946 mp->m_flags &= ~M_PKTHDR;
1947 sc->alc_cdata.alc_rxprev_tail =
1948 sc->alc_cdata.alc_rxtail;
1949 sc->alc_cdata.alc_rxtail->m_next = mp;
1950 sc->alc_cdata.alc_rxtail = mp;
1951 }
1952
1953 if (count == nsegs - 1) {
1954 /* Last desc. for this frame. */
1955 m = sc->alc_cdata.alc_rxhead;
1956 m->m_flags |= M_PKTHDR;
1957 /*
1958 * It seems that L1C/L2C controller has no way
1959 * to tell hardware to strip CRC bytes.
1960 */
1961 m->m_pkthdr.len =
1962 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
1963 if (nsegs > 1) {
1964 /* Set last mbuf size. */
1965 mp->m_len = sc->alc_cdata.alc_rxlen -
1966 (nsegs - 1) * sc->alc_buf_size;
1967 /* Remove the CRC bytes in chained mbufs. */
1968 if (mp->m_len <= ETHER_CRC_LEN) {
1969 sc->alc_cdata.alc_rxtail =
1970 sc->alc_cdata.alc_rxprev_tail;
1971 sc->alc_cdata.alc_rxtail->m_len -=
1972 (ETHER_CRC_LEN - mp->m_len);
1973 sc->alc_cdata.alc_rxtail->m_next = NULL;
1974 m_freem(mp);
1975 } else {
1976 mp->m_len -= ETHER_CRC_LEN;
1977 }
1978 } else
1979 m->m_len = m->m_pkthdr.len;
1980 m->m_pkthdr.rcvif = ifp;
1981 #if NVLAN > 0
1982 /*
1983 * Due to hardware bugs, Rx checksum offloading
1984 * was intentionally disabled.
1985 */
1986 if (status & RRD_VLAN_TAG) {
1987 u_int32_t vtag = RRD_VLAN(le32toh(rrd->vtag));
1988 VLAN_INPUT_TAG(ifp, m, ntohs(vtag), );
1989 }
1990 #endif
1991
1992 bpf_mtap(ifp, m);
1993
1994 {
1995 /* Pass it on. */
1996 ether_input(ifp, m);
1997 }
1998 }
1999 }
2000 /* Reset mbuf chains. */
2001 ALC_RXCHAIN_RESET(sc);
2002 }
2003
2004 static void
2005 alc_tick(void *xsc)
2006 {
2007 struct alc_softc *sc = xsc;
2008 struct mii_data *mii = &sc->sc_miibus;
2009 int s;
2010
2011 s = splnet();
2012 mii_tick(mii);
2013 alc_stats_update(sc);
2014 splx(s);
2015
2016 callout_schedule(&sc->sc_tick_ch, hz);
2017 }
2018
2019 static void
2020 alc_reset(struct alc_softc *sc)
2021 {
2022 uint32_t reg;
2023 int i;
2024
2025 reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
2026 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2027 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2028 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2029 DELAY(10);
2030 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2031 break;
2032 }
2033 if (i == 0)
2034 printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
2035
2036 for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2037 if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2038 break;
2039 DELAY(10);
2040 }
2041
2042 if (i == 0)
2043 printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
2044 reg);
2045 }
2046
2047 static int
2048 alc_init(struct ifnet *ifp)
2049 {
2050 struct alc_softc *sc = ifp->if_softc;
2051 struct mii_data *mii;
2052 uint8_t eaddr[ETHER_ADDR_LEN];
2053 bus_addr_t paddr;
2054 uint32_t reg, rxf_hi, rxf_lo;
2055 int error;
2056
2057 /*
2058 * Cancel any pending I/O.
2059 */
2060 alc_stop(ifp, 0);
2061 /*
2062 * Reset the chip to a known state.
2063 */
2064 alc_reset(sc);
2065
2066 /* Initialize Rx descriptors. */
2067 error = alc_init_rx_ring(sc);
2068 if (error != 0) {
2069 printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
2070 alc_stop(ifp, 0);
2071 return (error);
2072 }
2073 alc_init_rr_ring(sc);
2074 alc_init_tx_ring(sc);
2075 alc_init_cmb(sc);
2076 alc_init_smb(sc);
2077
2078 /* Enable all clocks. */
2079 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2080
2081 /* Reprogram the station address. */
2082 memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
2083 CSR_WRITE_4(sc, ALC_PAR0,
2084 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2085 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2086 /*
2087 * Clear WOL status and disable all WOL feature as WOL
2088 * would interfere Rx operation under normal environments.
2089 */
2090 CSR_READ_4(sc, ALC_WOL_CFG);
2091 CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2092 /* Set Tx descriptor base addresses. */
2093 paddr = sc->alc_rdata.alc_tx_ring_paddr;
2094 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2095 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2096 /* We don't use high priority ring. */
2097 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2098 /* Set Tx descriptor counter. */
2099 CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2100 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2101 /* Set Rx descriptor base addresses. */
2102 paddr = sc->alc_rdata.alc_rx_ring_paddr;
2103 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2104 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2105 /* We use one Rx ring. */
2106 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2107 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2108 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2109 /* Set Rx descriptor counter. */
2110 CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2111 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2112
2113 /*
2114 * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2115 * if it do not fit the buffer size. Rx return descriptor holds
2116 * a counter that indicates how many fragments were made by the
2117 * hardware. The buffer size should be multiple of 8 bytes.
2118 * Since hardware has limit on the size of buffer size, always
2119 * use the maximum value.
2120 * For strict-alignment architectures make sure to reduce buffer
2121 * size by 8 bytes to make room for alignment fixup.
2122 */
2123 sc->alc_buf_size = RX_BUF_SIZE_MAX;
2124 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2125
2126 paddr = sc->alc_rdata.alc_rr_ring_paddr;
2127 /* Set Rx return descriptor base addresses. */
2128 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2129 /* We use one Rx return ring. */
2130 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2131 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2132 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2133 /* Set Rx return descriptor counter. */
2134 CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2135 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2136 paddr = sc->alc_rdata.alc_cmb_paddr;
2137 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2138 paddr = sc->alc_rdata.alc_smb_paddr;
2139 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2140 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2141
2142 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
2143 /* Reconfigure SRAM - Vendor magic. */
2144 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2145 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2146 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2147 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2148 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2149 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2150 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2151 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2152 }
2153
2154 /* Tell hardware that we're ready to load DMA blocks. */
2155 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2156
2157 /* Configure interrupt moderation timer. */
2158 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2159 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2160 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2161 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2162 CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2163 /*
2164 * We don't want to automatic interrupt clear as task queue
2165 * for the interrupt should know interrupt status.
2166 */
2167 reg = MASTER_SA_TIMER_ENB;
2168 if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2169 reg |= MASTER_IM_RX_TIMER_ENB;
2170 if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2171 reg |= MASTER_IM_TX_TIMER_ENB;
2172 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2173 /*
2174 * Disable interrupt re-trigger timer. We don't want automatic
2175 * re-triggering of un-ACKed interrupts.
2176 */
2177 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2178 /* Configure CMB. */
2179 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2180 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2181 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2182 else
2183 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2184 /*
2185 * Hardware can be configured to issue SMB interrupt based
2186 * on programmed interval. Since there is a callout that is
2187 * invoked for every hz in driver we use that instead of
2188 * relying on periodic SMB interrupt.
2189 */
2190 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2191 /* Clear MAC statistics. */
2192 alc_stats_clear(sc);
2193
2194 /*
2195 * Always use maximum frame size that controller can support.
2196 * Otherwise received frames that has larger frame length
2197 * than alc(4) MTU would be silently dropped in hardware. This
2198 * would make path-MTU discovery hard as sender wouldn't get
2199 * any responses from receiver. alc(4) supports
2200 * multi-fragmented frames on Rx path so it has no issue on
2201 * assembling fragmented frames. Using maximum frame size also
2202 * removes the need to reinitialize hardware when interface
2203 * MTU configuration was changed.
2204 *
2205 * Be conservative in what you do, be liberal in what you
2206 * accept from others - RFC 793.
2207 */
2208 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
2209
2210 /* Disable header split(?) */
2211 CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2212
2213 /* Configure IPG/IFG parameters. */
2214 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2215 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2216 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2217 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2218 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2219 /* Set parameters for half-duplex media. */
2220 CSR_WRITE_4(sc, ALC_HDPX_CFG,
2221 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2222 HDPX_CFG_LCOL_MASK) |
2223 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2224 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2225 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2226 HDPX_CFG_ABEBT_MASK) |
2227 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2228 HDPX_CFG_JAMIPG_MASK));
2229 /*
2230 * Set TSO/checksum offload threshold. For frames that is
2231 * larger than this threshold, hardware wouldn't do
2232 * TSO/checksum offloading.
2233 */
2234 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
2235 (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2236 TSO_OFFLOAD_THRESH_MASK);
2237 /* Configure TxQ. */
2238 reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2239 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2240 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2241 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2242 reg >>= 1;
2243 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2244 TXQ_CFG_TD_BURST_MASK;
2245 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2246
2247 /* Configure Rx free descriptor pre-fetching. */
2248 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2249 ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
2250 RX_RD_FREE_THRESH_HI_MASK) |
2251 ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
2252 RX_RD_FREE_THRESH_LO_MASK));
2253
2254 /*
2255 * Configure flow control parameters.
2256 * XON : 80% of Rx FIFO
2257 * XOFF : 30% of Rx FIFO
2258 */
2259 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
2260 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) {
2261 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2262 rxf_hi = (reg * 8) / 10;
2263 rxf_lo = (reg * 3) / 10;
2264 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2265 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2266 RX_FIFO_PAUSE_THRESH_LO_MASK) |
2267 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2268 RX_FIFO_PAUSE_THRESH_HI_MASK));
2269 }
2270
2271 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2272 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2)
2273 CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2274 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
2275 SERDES_PHY_CLK_SLOWDOWN);
2276
2277 /* Disable RSS until I understand L1C/L2C's RSS logic. */
2278 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2279 CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2280
2281 /* Configure RxQ. */
2282 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2283 RXQ_CFG_RD_BURST_MASK;
2284 reg |= RXQ_CFG_RSS_MODE_DIS;
2285 if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
2286 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
2287 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2288
2289 /* Configure DMA parameters. */
2290 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2291 reg |= sc->alc_rcb;
2292 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2293 reg |= DMA_CFG_CMB_ENB;
2294 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2295 reg |= DMA_CFG_SMB_ENB;
2296 else
2297 reg |= DMA_CFG_SMB_DIS;
2298 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2299 DMA_CFG_RD_BURST_SHIFT;
2300 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2301 DMA_CFG_WR_BURST_SHIFT;
2302 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2303 DMA_CFG_RD_DELAY_CNT_MASK;
2304 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2305 DMA_CFG_WR_DELAY_CNT_MASK;
2306 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2307
2308 /*
2309 * Configure Tx/Rx MACs.
2310 * - Auto-padding for short frames.
2311 * - Enable CRC generation.
2312 * Actual reconfiguration of MAC for resolved speed/duplex
2313 * is followed after detection of link establishment.
2314 * AR813x/AR815x always does checksum computation regardless
2315 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
2316 * have bug in protocol field in Rx return structure so
2317 * these controllers can't handle fragmented frames. Disable
2318 * Rx checksum offloading until there is a newer controller
2319 * that has sane implementation.
2320 */
2321 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2322 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2323 MAC_CFG_PREAMBLE_MASK);
2324 if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
2325 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
2326 sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2327 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2328 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
2329 reg |= MAC_CFG_SPEED_10_100;
2330 else
2331 reg |= MAC_CFG_SPEED_1000;
2332 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2333
2334 /* Set up the receive filter. */
2335 alc_iff(sc);
2336 alc_rxvlan(sc);
2337
2338 /* Acknowledge all pending interrupts and clear it. */
2339 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
2340 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2341 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
2342
2343 sc->alc_flags &= ~ALC_FLAG_LINK;
2344 /* Switch to the current media. */
2345 mii = &sc->sc_miibus;
2346 mii_mediachg(mii);
2347
2348 callout_schedule(&sc->sc_tick_ch, hz);
2349
2350 ifp->if_flags |= IFF_RUNNING;
2351 ifp->if_flags &= ~IFF_OACTIVE;
2352
2353 return (0);
2354 }
2355
2356 static void
2357 alc_stop(struct ifnet *ifp, int disable)
2358 {
2359 struct alc_softc *sc = ifp->if_softc;
2360 struct alc_txdesc *txd;
2361 struct alc_rxdesc *rxd;
2362 uint32_t reg;
2363 int i;
2364
2365 callout_stop(&sc->sc_tick_ch);
2366
2367 /*
2368 * Mark the interface down and cancel the watchdog timer.
2369 */
2370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2371 ifp->if_timer = 0;
2372
2373 sc->alc_flags &= ~ALC_FLAG_LINK;
2374
2375 alc_stats_update(sc);
2376
2377 mii_down(&sc->sc_miibus);
2378
2379 /* Disable interrupts. */
2380 CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
2381 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2382 alc_stop_queue(sc);
2383
2384 /* Disable DMA. */
2385 reg = CSR_READ_4(sc, ALC_DMA_CFG);
2386 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
2387 reg |= DMA_CFG_SMB_DIS;
2388 CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2389 DELAY(1000);
2390
2391 /* Stop Rx/Tx MACs. */
2392 alc_stop_mac(sc);
2393
2394 /* Disable interrupts which might be touched in taskq handler. */
2395 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2396
2397 /* Reclaim Rx buffers that have been processed. */
2398 if (sc->alc_cdata.alc_rxhead != NULL)
2399 m_freem(sc->alc_cdata.alc_rxhead);
2400 ALC_RXCHAIN_RESET(sc);
2401 /*
2402 * Free Tx/Rx mbufs still in the queues.
2403 */
2404 for (i = 0; i < ALC_RX_RING_CNT; i++) {
2405 rxd = &sc->alc_cdata.alc_rxdesc[i];
2406 if (rxd->rx_m != NULL) {
2407 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2408 m_freem(rxd->rx_m);
2409 rxd->rx_m = NULL;
2410 }
2411 }
2412 for (i = 0; i < ALC_TX_RING_CNT; i++) {
2413 txd = &sc->alc_cdata.alc_txdesc[i];
2414 if (txd->tx_m != NULL) {
2415 bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2416 m_freem(txd->tx_m);
2417 txd->tx_m = NULL;
2418 }
2419 }
2420 }
2421
2422 static void
2423 alc_stop_mac(struct alc_softc *sc)
2424 {
2425 uint32_t reg;
2426 int i;
2427
2428 /* Disable Rx/Tx MAC. */
2429 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2430 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2431 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2432 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2433 }
2434 for (i = ALC_TIMEOUT; i > 0; i--) {
2435 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2436 if (reg == 0)
2437 break;
2438 DELAY(10);
2439 }
2440 if (i == 0)
2441 printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
2442 device_xname(sc->sc_dev), reg);
2443 }
2444
2445 static void
2446 alc_start_queue(struct alc_softc *sc)
2447 {
2448 uint32_t qcfg[] = {
2449 0,
2450 RXQ_CFG_QUEUE0_ENB,
2451 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
2452 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
2453 RXQ_CFG_ENB
2454 };
2455 uint32_t cfg;
2456
2457 /* Enable RxQ. */
2458 cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
2459 cfg &= ~RXQ_CFG_ENB;
2460 cfg |= qcfg[1];
2461 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
2462 /* Enable TxQ. */
2463 cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
2464 cfg |= TXQ_CFG_ENB;
2465 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
2466 }
2467
2468 static void
2469 alc_stop_queue(struct alc_softc *sc)
2470 {
2471 uint32_t reg;
2472 int i;
2473
2474 /* Disable RxQ. */
2475 reg = CSR_READ_4(sc, ALC_RXQ_CFG);
2476 if ((reg & RXQ_CFG_ENB) != 0) {
2477 reg &= ~RXQ_CFG_ENB;
2478 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2479 }
2480 /* Disable TxQ. */
2481 reg = CSR_READ_4(sc, ALC_TXQ_CFG);
2482 if ((reg & TXQ_CFG_ENB) != 0) {
2483 reg &= ~TXQ_CFG_ENB;
2484 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
2485 }
2486 for (i = ALC_TIMEOUT; i > 0; i--) {
2487 reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2488 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2489 break;
2490 DELAY(10);
2491 }
2492 if (i == 0)
2493 printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
2494 device_xname(sc->sc_dev), reg);
2495 }
2496
2497 static void
2498 alc_init_tx_ring(struct alc_softc *sc)
2499 {
2500 struct alc_ring_data *rd;
2501 struct alc_txdesc *txd;
2502 int i;
2503
2504 sc->alc_cdata.alc_tx_prod = 0;
2505 sc->alc_cdata.alc_tx_cons = 0;
2506 sc->alc_cdata.alc_tx_cnt = 0;
2507
2508 rd = &sc->alc_rdata;
2509 memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ);
2510 for (i = 0; i < ALC_TX_RING_CNT; i++) {
2511 txd = &sc->alc_cdata.alc_txdesc[i];
2512 txd->tx_m = NULL;
2513 }
2514
2515 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2516 sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2517 }
2518
2519 static int
2520 alc_init_rx_ring(struct alc_softc *sc)
2521 {
2522 struct alc_ring_data *rd;
2523 struct alc_rxdesc *rxd;
2524 int i;
2525
2526 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
2527 rd = &sc->alc_rdata;
2528 memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ);
2529 for (i = 0; i < ALC_RX_RING_CNT; i++) {
2530 rxd = &sc->alc_cdata.alc_rxdesc[i];
2531 rxd->rx_m = NULL;
2532 rxd->rx_desc = &rd->alc_rx_ring[i];
2533 if (alc_newbuf(sc, rxd, 1) != 0)
2534 return (ENOBUFS);
2535 }
2536
2537 /*
2538 * Since controller does not update Rx descriptors, driver
2539 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
2540 * is enough to ensure coherence.
2541 */
2542 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2543 sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2544 /* Let controller know availability of new Rx buffers. */
2545 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
2546
2547 return (0);
2548 }
2549
2550 static void
2551 alc_init_rr_ring(struct alc_softc *sc)
2552 {
2553 struct alc_ring_data *rd;
2554
2555 sc->alc_cdata.alc_rr_cons = 0;
2556 ALC_RXCHAIN_RESET(sc);
2557
2558 rd = &sc->alc_rdata;
2559 memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ);
2560 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2561 sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2562 }
2563
2564 static void
2565 alc_init_cmb(struct alc_softc *sc)
2566 {
2567 struct alc_ring_data *rd;
2568
2569 rd = &sc->alc_rdata;
2570 memset(rd->alc_cmb, 0, ALC_CMB_SZ);
2571 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2572 sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2573 }
2574
2575 static void
2576 alc_init_smb(struct alc_softc *sc)
2577 {
2578 struct alc_ring_data *rd;
2579
2580 rd = &sc->alc_rdata;
2581 memset(rd->alc_smb, 0, ALC_SMB_SZ);
2582 bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2583 sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2584 }
2585
2586 static void
2587 alc_rxvlan(struct alc_softc *sc)
2588 {
2589 uint32_t reg;
2590
2591 reg = CSR_READ_4(sc, ALC_MAC_CFG);
2592 if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2593 reg |= MAC_CFG_VLAN_TAG_STRIP;
2594 else
2595 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2596 CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2597 }
2598
2599 static void
2600 alc_iff(struct alc_softc *sc)
2601 {
2602 struct ethercom *ec = &sc->sc_ec;
2603 struct ifnet *ifp = &ec->ec_if;
2604 struct ether_multi *enm;
2605 struct ether_multistep step;
2606 uint32_t crc;
2607 uint32_t mchash[2];
2608 uint32_t rxcfg;
2609
2610 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
2611 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2612 ifp->if_flags &= ~IFF_ALLMULTI;
2613
2614 /*
2615 * Always accept broadcast frames.
2616 */
2617 rxcfg |= MAC_CFG_BCAST;
2618
2619 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2620 ifp->if_flags |= IFF_ALLMULTI;
2621 if (ifp->if_flags & IFF_PROMISC)
2622 rxcfg |= MAC_CFG_PROMISC;
2623 else
2624 rxcfg |= MAC_CFG_ALLMULTI;
2625 mchash[0] = mchash[1] = 0xFFFFFFFF;
2626 } else {
2627 /* Program new filter. */
2628 memset(mchash, 0, sizeof(mchash));
2629
2630 ETHER_FIRST_MULTI(step, ec, enm);
2631 while (enm != NULL) {
2632 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2633 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2634 ETHER_NEXT_MULTI(step, enm);
2635 }
2636 }
2637
2638 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
2639 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
2640 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
2641 }
2642
2643 MODULE(MODULE_CLASS_DRIVER, if_alc, "pci");
2644
2645 #ifdef _MODULE
2646 #include "ioconf.c"
2647 #endif
2648
2649 static int
2650 if_alc_modcmd(modcmd_t cmd, void *opaque)
2651 {
2652 int error = 0;
2653
2654 switch (cmd) {
2655 case MODULE_CMD_INIT:
2656 #ifdef _MODULE
2657 error = config_init_component(cfdriver_ioconf_if_alc,
2658 cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2659 #endif
2660 return error;
2661 case MODULE_CMD_FINI:
2662 #ifdef _MODULE
2663 error = config_fini_component(cfdriver_ioconf_if_alc,
2664 cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2665 #endif
2666 return error;
2667 default:
2668 return ENOTTY;
2669 }
2670 }
2671