if_alc.c revision 1.1 1 1.1 jmcneill /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */
2 1.1 jmcneill /*-
3 1.1 jmcneill * Copyright (c) 2009, Pyun YongHyeon <yongari (at) FreeBSD.org>
4 1.1 jmcneill * All rights reserved.
5 1.1 jmcneill *
6 1.1 jmcneill * Redistribution and use in source and binary forms, with or without
7 1.1 jmcneill * modification, are permitted provided that the following conditions
8 1.1 jmcneill * are met:
9 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright
10 1.1 jmcneill * notice unmodified, this list of conditions, and the following
11 1.1 jmcneill * disclaimer.
12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the
14 1.1 jmcneill * documentation and/or other materials provided with the distribution.
15 1.1 jmcneill *
16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 jmcneill * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 jmcneill * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 jmcneill * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 jmcneill * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 jmcneill * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 jmcneill * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 jmcneill * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 jmcneill * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 jmcneill * SUCH DAMAGE.
27 1.1 jmcneill */
28 1.1 jmcneill
29 1.1 jmcneill /* Driver for Atheros AR8131/AR8132 PCIe Ethernet. */
30 1.1 jmcneill
31 1.1 jmcneill #ifdef _KERNEL_OPT
32 1.1 jmcneill #include "vlan.h"
33 1.1 jmcneill #endif
34 1.1 jmcneill
35 1.1 jmcneill #include <sys/param.h>
36 1.1 jmcneill #include <sys/proc.h>
37 1.1 jmcneill #include <sys/endian.h>
38 1.1 jmcneill #include <sys/systm.h>
39 1.1 jmcneill #include <sys/types.h>
40 1.1 jmcneill #include <sys/sockio.h>
41 1.1 jmcneill #include <sys/mbuf.h>
42 1.1 jmcneill #include <sys/queue.h>
43 1.1 jmcneill #include <sys/kernel.h>
44 1.1 jmcneill #include <sys/device.h>
45 1.1 jmcneill #include <sys/callout.h>
46 1.1 jmcneill #include <sys/socket.h>
47 1.1 jmcneill #include <sys/module.h>
48 1.1 jmcneill
49 1.1 jmcneill #include <sys/bus.h>
50 1.1 jmcneill
51 1.1 jmcneill #include <net/if.h>
52 1.1 jmcneill #include <net/if_dl.h>
53 1.1 jmcneill #include <net/if_llc.h>
54 1.1 jmcneill #include <net/if_media.h>
55 1.1 jmcneill #include <net/if_ether.h>
56 1.1 jmcneill
57 1.1 jmcneill #include <net/bpf.h>
58 1.1 jmcneill
59 1.1 jmcneill #ifdef INET
60 1.1 jmcneill #include <netinet/in.h>
61 1.1 jmcneill #include <netinet/in_systm.h>
62 1.1 jmcneill #include <netinet/in_var.h>
63 1.1 jmcneill #include <netinet/ip.h>
64 1.1 jmcneill #endif
65 1.1 jmcneill
66 1.1 jmcneill #include <net/if_types.h>
67 1.1 jmcneill #include <net/if_vlanvar.h>
68 1.1 jmcneill
69 1.1 jmcneill #include <net/bpf.h>
70 1.1 jmcneill
71 1.1 jmcneill #include <sys/rnd.h>
72 1.1 jmcneill
73 1.1 jmcneill #include <dev/mii/mii.h>
74 1.1 jmcneill #include <dev/mii/miivar.h>
75 1.1 jmcneill
76 1.1 jmcneill #include <dev/pci/pcireg.h>
77 1.1 jmcneill #include <dev/pci/pcivar.h>
78 1.1 jmcneill #include <dev/pci/pcidevs.h>
79 1.1 jmcneill
80 1.1 jmcneill #include <dev/pci/if_alcreg.h>
81 1.1 jmcneill
82 1.1 jmcneill static int alc_match(device_t, cfdata_t, void *);
83 1.1 jmcneill static void alc_attach(device_t, device_t, void *);
84 1.1 jmcneill static int alc_detach(device_t, int);
85 1.1 jmcneill
86 1.1 jmcneill static int alc_init(struct ifnet *);
87 1.1 jmcneill static void alc_start(struct ifnet *);
88 1.1 jmcneill static int alc_ioctl(struct ifnet *, u_long, void *);
89 1.1 jmcneill static void alc_watchdog(struct ifnet *);
90 1.1 jmcneill static int alc_mediachange(struct ifnet *);
91 1.1 jmcneill static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
92 1.1 jmcneill
93 1.1 jmcneill static void alc_aspm(struct alc_softc *);
94 1.1 jmcneill static void alc_disable_l0s_l1(struct alc_softc *);
95 1.1 jmcneill static int alc_dma_alloc(struct alc_softc *);
96 1.1 jmcneill static void alc_dma_free(struct alc_softc *);
97 1.1 jmcneill static int alc_encap(struct alc_softc *, struct mbuf **);
98 1.1 jmcneill static void alc_get_macaddr(struct alc_softc *);
99 1.1 jmcneill static void alc_init_cmb(struct alc_softc *);
100 1.1 jmcneill static void alc_init_rr_ring(struct alc_softc *);
101 1.1 jmcneill static int alc_init_rx_ring(struct alc_softc *);
102 1.1 jmcneill static void alc_init_smb(struct alc_softc *);
103 1.1 jmcneill static void alc_init_tx_ring(struct alc_softc *);
104 1.1 jmcneill static int alc_intr(void *);
105 1.1 jmcneill static void alc_mac_config(struct alc_softc *);
106 1.1 jmcneill static int alc_miibus_readreg(device_t, int, int);
107 1.1 jmcneill static void alc_miibus_statchg(device_t);
108 1.1 jmcneill static void alc_miibus_writereg(device_t, int, int, int);
109 1.1 jmcneill static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, int);
110 1.1 jmcneill static void alc_phy_down(struct alc_softc *);
111 1.1 jmcneill static void alc_phy_reset(struct alc_softc *);
112 1.1 jmcneill static void alc_reset(struct alc_softc *);
113 1.1 jmcneill static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
114 1.1 jmcneill static int alc_rxintr(struct alc_softc *);
115 1.1 jmcneill static void alc_iff(struct alc_softc *);
116 1.1 jmcneill static void alc_rxvlan(struct alc_softc *);
117 1.1 jmcneill static void alc_start_queue(struct alc_softc *);
118 1.1 jmcneill static void alc_stats_clear(struct alc_softc *);
119 1.1 jmcneill static void alc_stats_update(struct alc_softc *);
120 1.1 jmcneill static void alc_stop(struct ifnet *, int);
121 1.1 jmcneill static void alc_stop_mac(struct alc_softc *);
122 1.1 jmcneill static void alc_stop_queue(struct alc_softc *);
123 1.1 jmcneill static void alc_tick(void *);
124 1.1 jmcneill static void alc_txeof(struct alc_softc *);
125 1.1 jmcneill
126 1.1 jmcneill uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
127 1.1 jmcneill
128 1.1 jmcneill CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc),
129 1.1 jmcneill alc_match, alc_attach, alc_detach, NULL);
130 1.1 jmcneill
131 1.1 jmcneill int alcdebug = 0;
132 1.1 jmcneill #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
133 1.1 jmcneill
134 1.1 jmcneill #define ETHER_ALIGN 2
135 1.1 jmcneill #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
136 1.1 jmcneill
137 1.1 jmcneill static int
138 1.1 jmcneill alc_miibus_readreg(device_t dev, int phy, int reg)
139 1.1 jmcneill {
140 1.1 jmcneill struct alc_softc *sc = device_private(dev);
141 1.1 jmcneill uint32_t v;
142 1.1 jmcneill int i;
143 1.1 jmcneill
144 1.1 jmcneill if (phy != sc->alc_phyaddr)
145 1.1 jmcneill return (0);
146 1.1 jmcneill
147 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
148 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
149 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
150 1.1 jmcneill DELAY(5);
151 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
152 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
153 1.1 jmcneill break;
154 1.1 jmcneill }
155 1.1 jmcneill
156 1.1 jmcneill if (i == 0) {
157 1.1 jmcneill printf("%s: phy read timeout: phy %d, reg %d\n",
158 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
159 1.1 jmcneill return (0);
160 1.1 jmcneill }
161 1.1 jmcneill
162 1.1 jmcneill return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
163 1.1 jmcneill }
164 1.1 jmcneill
165 1.1 jmcneill static void
166 1.1 jmcneill alc_miibus_writereg(device_t dev, int phy, int reg, int val)
167 1.1 jmcneill {
168 1.1 jmcneill struct alc_softc *sc = device_private(dev);
169 1.1 jmcneill uint32_t v;
170 1.1 jmcneill int i;
171 1.1 jmcneill
172 1.1 jmcneill if (phy != sc->alc_phyaddr)
173 1.1 jmcneill return;
174 1.1 jmcneill
175 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
176 1.1 jmcneill (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
177 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
178 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
179 1.1 jmcneill DELAY(5);
180 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
181 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
182 1.1 jmcneill break;
183 1.1 jmcneill }
184 1.1 jmcneill
185 1.1 jmcneill if (i == 0)
186 1.1 jmcneill printf("%s: phy write timeout: phy %d, reg %d\n",
187 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
188 1.1 jmcneill }
189 1.1 jmcneill
190 1.1 jmcneill static void
191 1.1 jmcneill alc_miibus_statchg(device_t dev)
192 1.1 jmcneill {
193 1.1 jmcneill struct alc_softc *sc = device_private(dev);
194 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
195 1.1 jmcneill struct mii_data *mii;
196 1.1 jmcneill uint32_t reg;
197 1.1 jmcneill
198 1.1 jmcneill if ((ifp->if_flags & IFF_RUNNING) == 0)
199 1.1 jmcneill return;
200 1.1 jmcneill
201 1.1 jmcneill mii = &sc->sc_miibus;
202 1.1 jmcneill
203 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
204 1.1 jmcneill if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
205 1.1 jmcneill (IFM_ACTIVE | IFM_AVALID)) {
206 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
207 1.1 jmcneill case IFM_10_T:
208 1.1 jmcneill case IFM_100_TX:
209 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
210 1.1 jmcneill break;
211 1.1 jmcneill case IFM_1000_T:
212 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
213 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
214 1.1 jmcneill break;
215 1.1 jmcneill default:
216 1.1 jmcneill break;
217 1.1 jmcneill }
218 1.1 jmcneill }
219 1.1 jmcneill alc_stop_queue(sc);
220 1.1 jmcneill /* Stop Rx/Tx MACs. */
221 1.1 jmcneill alc_stop_mac(sc);
222 1.1 jmcneill
223 1.1 jmcneill /* Program MACs with resolved speed/duplex/flow-control. */
224 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
225 1.1 jmcneill alc_start_queue(sc);
226 1.1 jmcneill alc_mac_config(sc);
227 1.1 jmcneill /* Re-enable Tx/Rx MACs. */
228 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
229 1.1 jmcneill reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
230 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
231 1.1 jmcneill }
232 1.1 jmcneill alc_aspm(sc);
233 1.1 jmcneill }
234 1.1 jmcneill
235 1.1 jmcneill static void
236 1.1 jmcneill alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
237 1.1 jmcneill {
238 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
239 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
240 1.1 jmcneill
241 1.1 jmcneill mii_pollstat(mii);
242 1.1 jmcneill ifmr->ifm_status = mii->mii_media_status;
243 1.1 jmcneill ifmr->ifm_active = mii->mii_media_active;
244 1.1 jmcneill }
245 1.1 jmcneill
246 1.1 jmcneill static int
247 1.1 jmcneill alc_mediachange(struct ifnet *ifp)
248 1.1 jmcneill {
249 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
250 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
251 1.1 jmcneill int error;
252 1.1 jmcneill
253 1.1 jmcneill if (mii->mii_instance != 0) {
254 1.1 jmcneill struct mii_softc *miisc;
255 1.1 jmcneill
256 1.1 jmcneill LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
257 1.1 jmcneill mii_phy_reset(miisc);
258 1.1 jmcneill }
259 1.1 jmcneill error = mii_mediachg(mii);
260 1.1 jmcneill
261 1.1 jmcneill return (error);
262 1.1 jmcneill }
263 1.1 jmcneill
264 1.1 jmcneill static int
265 1.1 jmcneill alc_match(device_t dev, cfdata_t match, void *aux)
266 1.1 jmcneill {
267 1.1 jmcneill struct pci_attach_args *pa = aux;
268 1.1 jmcneill
269 1.1 jmcneill if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_ATTANSIC)
270 1.1 jmcneill return 0;
271 1.1 jmcneill
272 1.1 jmcneill switch (PCI_PRODUCT(pa->pa_id)) {
273 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
274 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
275 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
276 1.1 jmcneill break;
277 1.1 jmcneill default:
278 1.1 jmcneill return 0;
279 1.1 jmcneill }
280 1.1 jmcneill
281 1.1 jmcneill return 1;
282 1.1 jmcneill }
283 1.1 jmcneill
284 1.1 jmcneill static void
285 1.1 jmcneill alc_get_macaddr(struct alc_softc *sc)
286 1.1 jmcneill {
287 1.1 jmcneill uint32_t ea[2], opt;
288 1.1 jmcneill int i;
289 1.1 jmcneill
290 1.1 jmcneill opt = CSR_READ_4(sc, ALC_OPT_CFG);
291 1.1 jmcneill if ((CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
292 1.1 jmcneill /*
293 1.1 jmcneill * EEPROM found, let TWSI reload EEPROM configuration.
294 1.1 jmcneill * This will set ethernet address of controller.
295 1.1 jmcneill */
296 1.1 jmcneill if ((opt & OPT_CFG_CLK_ENB) == 0) {
297 1.1 jmcneill opt |= OPT_CFG_CLK_ENB;
298 1.1 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
299 1.1 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
300 1.1 jmcneill DELAY(1000);
301 1.1 jmcneill }
302 1.1 jmcneill CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
303 1.1 jmcneill TWSI_CFG_SW_LD_START);
304 1.1 jmcneill for (i = 100; i > 0; i--) {
305 1.1 jmcneill DELAY(1000);
306 1.1 jmcneill if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
307 1.1 jmcneill TWSI_CFG_SW_LD_START) == 0)
308 1.1 jmcneill break;
309 1.1 jmcneill }
310 1.1 jmcneill if (i == 0)
311 1.1 jmcneill printf("%s: reloading EEPROM timeout!\n",
312 1.1 jmcneill device_xname(sc->sc_dev));
313 1.1 jmcneill } else {
314 1.1 jmcneill if (alcdebug)
315 1.1 jmcneill printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev));
316 1.1 jmcneill }
317 1.1 jmcneill if ((opt & OPT_CFG_CLK_ENB) != 0) {
318 1.1 jmcneill opt &= ~OPT_CFG_CLK_ENB;
319 1.1 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
320 1.1 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
321 1.1 jmcneill DELAY(1000);
322 1.1 jmcneill }
323 1.1 jmcneill
324 1.1 jmcneill ea[0] = CSR_READ_4(sc, ALC_PAR0);
325 1.1 jmcneill ea[1] = CSR_READ_4(sc, ALC_PAR1);
326 1.1 jmcneill sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
327 1.1 jmcneill sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
328 1.1 jmcneill sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
329 1.1 jmcneill sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
330 1.1 jmcneill sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
331 1.1 jmcneill sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
332 1.1 jmcneill }
333 1.1 jmcneill
334 1.1 jmcneill static void
335 1.1 jmcneill alc_disable_l0s_l1(struct alc_softc *sc)
336 1.1 jmcneill {
337 1.1 jmcneill uint32_t pmcfg;
338 1.1 jmcneill
339 1.1 jmcneill /* Another magic from vendor. */
340 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
341 1.1 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
342 1.1 jmcneill PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
343 1.1 jmcneill PM_CFG_SERDES_PD_EX_L1);
344 1.1 jmcneill pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
345 1.1 jmcneill PM_CFG_SERDES_L1_ENB;
346 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
347 1.1 jmcneill }
348 1.1 jmcneill
349 1.1 jmcneill static void
350 1.1 jmcneill alc_phy_reset(struct alc_softc *sc)
351 1.1 jmcneill {
352 1.1 jmcneill uint16_t data;
353 1.1 jmcneill
354 1.1 jmcneill /* Reset magic from Linux. */
355 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
356 1.1 jmcneill GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
357 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
358 1.1 jmcneill DELAY(10 * 1000);
359 1.1 jmcneill
360 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
361 1.1 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
362 1.1 jmcneill GPHY_CFG_SEL_ANA_RESET);
363 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
364 1.1 jmcneill DELAY(10 * 1000);
365 1.1 jmcneill
366 1.1 jmcneill /* Load DSP codes, vendor magic. */
367 1.1 jmcneill data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
368 1.1 jmcneill ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
369 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
370 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG18);
371 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
372 1.1 jmcneill ALC_MII_DBG_DATA, data);
373 1.1 jmcneill
374 1.1 jmcneill data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
375 1.1 jmcneill ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
376 1.1 jmcneill ANA_SERDES_EN_LCKDT;
377 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
378 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG5);
379 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
380 1.1 jmcneill ALC_MII_DBG_DATA, data);
381 1.1 jmcneill
382 1.1 jmcneill data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
383 1.1 jmcneill ANA_LONG_CABLE_TH_100_MASK) |
384 1.1 jmcneill ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
385 1.1 jmcneill ANA_SHORT_CABLE_TH_100_SHIFT) |
386 1.1 jmcneill ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
387 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
388 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG54);
389 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
390 1.1 jmcneill ALC_MII_DBG_DATA, data);
391 1.1 jmcneill
392 1.1 jmcneill data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
393 1.1 jmcneill ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
394 1.1 jmcneill ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
395 1.1 jmcneill ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
396 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
397 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG4);
398 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
399 1.1 jmcneill ALC_MII_DBG_DATA, data);
400 1.1 jmcneill
401 1.1 jmcneill data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
402 1.1 jmcneill ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
403 1.1 jmcneill ANA_OEN_125M;
404 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
405 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG0);
406 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
407 1.1 jmcneill ALC_MII_DBG_DATA, data);
408 1.1 jmcneill DELAY(1000);
409 1.1 jmcneill }
410 1.1 jmcneill
411 1.1 jmcneill static void
412 1.1 jmcneill alc_phy_down(struct alc_softc *sc)
413 1.1 jmcneill {
414 1.1 jmcneill
415 1.1 jmcneill /* Force PHY down. */
416 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
417 1.1 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
418 1.1 jmcneill GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW);
419 1.1 jmcneill DELAY(1000);
420 1.1 jmcneill }
421 1.1 jmcneill
422 1.1 jmcneill static void
423 1.1 jmcneill alc_aspm(struct alc_softc *sc)
424 1.1 jmcneill {
425 1.1 jmcneill uint32_t pmcfg;
426 1.1 jmcneill
427 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
428 1.1 jmcneill pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
429 1.1 jmcneill pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB;
430 1.1 jmcneill pmcfg |= PM_CFG_SERDES_L1_ENB;
431 1.1 jmcneill pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
432 1.1 jmcneill pmcfg |= PM_CFG_MAC_ASPM_CHK;
433 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
434 1.1 jmcneill pmcfg |= PM_CFG_SERDES_PLL_L1_ENB;
435 1.1 jmcneill pmcfg &= ~PM_CFG_CLK_SWH_L1;
436 1.1 jmcneill pmcfg &= ~PM_CFG_ASPM_L1_ENB;
437 1.1 jmcneill pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
438 1.1 jmcneill } else {
439 1.1 jmcneill pmcfg &= ~PM_CFG_SERDES_PLL_L1_ENB;
440 1.1 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
441 1.1 jmcneill pmcfg &= ~PM_CFG_ASPM_L1_ENB;
442 1.1 jmcneill pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
443 1.1 jmcneill }
444 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
445 1.1 jmcneill }
446 1.1 jmcneill
447 1.1 jmcneill static void
448 1.1 jmcneill alc_attach(device_t parent, device_t self, void *aux)
449 1.1 jmcneill {
450 1.1 jmcneill
451 1.1 jmcneill struct alc_softc *sc = device_private(self);
452 1.1 jmcneill struct pci_attach_args *pa = aux;
453 1.1 jmcneill pci_chipset_tag_t pc = pa->pa_pc;
454 1.1 jmcneill pci_intr_handle_t ih;
455 1.1 jmcneill const char *intrstr;
456 1.1 jmcneill struct ifnet *ifp;
457 1.1 jmcneill pcireg_t memtype;
458 1.1 jmcneill const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/l1" };
459 1.1 jmcneill uint16_t burst;
460 1.1 jmcneill int base, mii_flags, state, error = 0;
461 1.1 jmcneill uint32_t cap, ctl, val;
462 1.1 jmcneill
463 1.1 jmcneill aprint_naive("\n");
464 1.1 jmcneill aprint_normal(": Attansic/Atheros L1C/L2C Ethernet\n");
465 1.1 jmcneill
466 1.1 jmcneill sc->sc_dev = self;
467 1.1 jmcneill sc->sc_dmat = pa->pa_dmat;
468 1.1 jmcneill sc->sc_pct = pa->pa_pc;
469 1.1 jmcneill sc->sc_pcitag = pa->pa_tag;
470 1.1 jmcneill
471 1.1 jmcneill /*
472 1.1 jmcneill * Allocate IO memory
473 1.1 jmcneill */
474 1.1 jmcneill memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
475 1.1 jmcneill switch (memtype) {
476 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
477 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
478 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
479 1.1 jmcneill break;
480 1.1 jmcneill default:
481 1.1 jmcneill aprint_error_dev(self, "invalid base address register\n");
482 1.1 jmcneill break;
483 1.1 jmcneill }
484 1.1 jmcneill
485 1.1 jmcneill if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
486 1.1 jmcneill &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
487 1.1 jmcneill aprint_error_dev(self, "could not map mem space\n");
488 1.1 jmcneill return;
489 1.1 jmcneill }
490 1.1 jmcneill
491 1.1 jmcneill if (pci_intr_map(pa, &ih) != 0) {
492 1.1 jmcneill printf(": can't map interrupt\n");
493 1.1 jmcneill goto fail;
494 1.1 jmcneill }
495 1.1 jmcneill
496 1.1 jmcneill /*
497 1.1 jmcneill * Allocate IRQ
498 1.1 jmcneill */
499 1.1 jmcneill intrstr = pci_intr_string(sc->sc_pct, ih);
500 1.1 jmcneill sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc);
501 1.1 jmcneill if (sc->sc_irq_handle == NULL) {
502 1.1 jmcneill printf(": could not establish interrupt");
503 1.1 jmcneill if (intrstr != NULL)
504 1.1 jmcneill printf(" at %s", intrstr);
505 1.1 jmcneill printf("\n");
506 1.1 jmcneill goto fail;
507 1.1 jmcneill }
508 1.1 jmcneill aprint_normal_dev(self, "%s\n", intrstr);
509 1.1 jmcneill
510 1.1 jmcneill /* Set PHY address. */
511 1.1 jmcneill sc->alc_phyaddr = ALC_PHY_ADDR;
512 1.1 jmcneill
513 1.1 jmcneill /* Initialize DMA parameters. */
514 1.1 jmcneill sc->alc_dma_rd_burst = 0;
515 1.1 jmcneill sc->alc_dma_wr_burst = 0;
516 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_64;
517 1.1 jmcneill if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
518 1.1 jmcneill &base, NULL)) {
519 1.1 jmcneill sc->alc_flags |= ALC_FLAG_PCIE;
520 1.1 jmcneill burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
521 1.1 jmcneill base + PCI_PCIE_DCSR) >> 16;
522 1.1 jmcneill sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
523 1.1 jmcneill sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
524 1.1 jmcneill if (alcdebug) {
525 1.1 jmcneill printf("%s: Read request size : %u bytes.\n",
526 1.1 jmcneill device_xname(sc->sc_dev),
527 1.1 jmcneill alc_dma_burst[sc->alc_dma_rd_burst]);
528 1.1 jmcneill printf("%s: TLP payload size : %u bytes.\n",
529 1.1 jmcneill device_xname(sc->sc_dev),
530 1.1 jmcneill alc_dma_burst[sc->alc_dma_wr_burst]);
531 1.1 jmcneill }
532 1.1 jmcneill /* Clear data link and flow-control protocol error. */
533 1.1 jmcneill val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
534 1.1 jmcneill val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
535 1.1 jmcneill CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
536 1.1 jmcneill /* Disable ASPM L0S and L1. */
537 1.1 jmcneill cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
538 1.1 jmcneill base + PCI_PCIE_LCAP) >> 16;
539 1.1 jmcneill if ((cap & 0x00000c00) != 0) {
540 1.1 jmcneill ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
541 1.1 jmcneill base + PCI_PCIE_LCSR) >> 16;
542 1.1 jmcneill if ((ctl & 0x08) != 0)
543 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_128;
544 1.1 jmcneill if (alcdebug)
545 1.1 jmcneill printf("%s: RCB %u bytes\n",
546 1.1 jmcneill device_xname(sc->sc_dev),
547 1.1 jmcneill sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
548 1.1 jmcneill state = ctl & 0x03;
549 1.1 jmcneill if (alcdebug)
550 1.1 jmcneill printf("%s: ASPM %s %s\n",
551 1.1 jmcneill device_xname(sc->sc_dev),
552 1.1 jmcneill aspm_state[state],
553 1.1 jmcneill state == 0 ? "disabled" : "enabled");
554 1.1 jmcneill if (state != 0)
555 1.1 jmcneill alc_disable_l0s_l1(sc);
556 1.1 jmcneill }
557 1.1 jmcneill }
558 1.1 jmcneill
559 1.1 jmcneill /* Reset PHY. */
560 1.1 jmcneill alc_phy_reset(sc);
561 1.1 jmcneill
562 1.1 jmcneill /* Reset the ethernet controller. */
563 1.1 jmcneill alc_reset(sc);
564 1.1 jmcneill
565 1.1 jmcneill /*
566 1.1 jmcneill * One odd thing is AR8132 uses the same PHY hardware(F1
567 1.1 jmcneill * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
568 1.1 jmcneill * the PHY supports 1000Mbps but that's not true. The PHY
569 1.1 jmcneill * used in AR8132 can't establish gigabit link even if it
570 1.1 jmcneill * shows the same PHY model/revision number of AR8131.
571 1.1 jmcneill */
572 1.1 jmcneill switch (PCI_PRODUCT(pa->pa_id)) {
573 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
574 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
575 1.1 jmcneill sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_JUMBO;
576 1.1 jmcneill break;
577 1.1 jmcneill default:
578 1.1 jmcneill sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
579 1.1 jmcneill break;
580 1.1 jmcneill }
581 1.1 jmcneill
582 1.1 jmcneill /*
583 1.1 jmcneill * It seems that AR8131/AR8132 has silicon bug for SMB. In
584 1.1 jmcneill * addition, Atheros said that enabling SMB wouldn't improve
585 1.1 jmcneill * performance. However I think it's bad to access lots of
586 1.1 jmcneill * registers to extract MAC statistics.
587 1.1 jmcneill */
588 1.1 jmcneill sc->alc_flags |= ALC_FLAG_SMB_BUG;
589 1.1 jmcneill /*
590 1.1 jmcneill * Don't use Tx CMB. It is known to have silicon bug.
591 1.1 jmcneill */
592 1.1 jmcneill sc->alc_flags |= ALC_FLAG_CMB_BUG;
593 1.1 jmcneill sc->alc_rev = PCI_REVISION(pa->pa_class);
594 1.1 jmcneill sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
595 1.1 jmcneill MASTER_CHIP_REV_SHIFT;
596 1.1 jmcneill if (alcdebug) {
597 1.1 jmcneill printf("%s: PCI device revision : 0x%04x\n",
598 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_rev);
599 1.1 jmcneill printf("%s: Chip id/revision : 0x%04x\n",
600 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_chip_rev);
601 1.1 jmcneill printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev),
602 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
603 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
604 1.1 jmcneill }
605 1.1 jmcneill
606 1.1 jmcneill error = alc_dma_alloc(sc);
607 1.1 jmcneill if (error)
608 1.1 jmcneill goto fail;
609 1.1 jmcneill
610 1.1 jmcneill callout_init(&sc->sc_tick_ch, 0);
611 1.1 jmcneill callout_setfunc(&sc->sc_tick_ch, alc_tick, sc);
612 1.1 jmcneill
613 1.1 jmcneill /* Load station address. */
614 1.1 jmcneill alc_get_macaddr(sc);
615 1.1 jmcneill
616 1.1 jmcneill aprint_normal_dev(self, "Ethernet address %s\n",
617 1.1 jmcneill ether_sprintf(sc->alc_eaddr));
618 1.1 jmcneill
619 1.1 jmcneill ifp = &sc->sc_ec.ec_if;
620 1.1 jmcneill ifp->if_softc = sc;
621 1.1 jmcneill ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
622 1.1 jmcneill ifp->if_init = alc_init;
623 1.1 jmcneill ifp->if_ioctl = alc_ioctl;
624 1.1 jmcneill ifp->if_start = alc_start;
625 1.1 jmcneill ifp->if_stop = alc_stop;
626 1.1 jmcneill ifp->if_watchdog = alc_watchdog;
627 1.1 jmcneill ifp->if_baudrate = IF_Gbps(1);
628 1.1 jmcneill IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
629 1.1 jmcneill IFQ_SET_READY(&ifp->if_snd);
630 1.1 jmcneill strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
631 1.1 jmcneill
632 1.1 jmcneill sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
633 1.1 jmcneill
634 1.1 jmcneill #ifdef ALC_CHECKSUM
635 1.1 jmcneill ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
636 1.1 jmcneill IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
637 1.1 jmcneill IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
638 1.1 jmcneill #endif
639 1.1 jmcneill
640 1.1 jmcneill #if NVLAN > 0
641 1.1 jmcneill sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
642 1.1 jmcneill #endif
643 1.1 jmcneill
644 1.1 jmcneill /* Set up MII bus. */
645 1.1 jmcneill sc->sc_miibus.mii_ifp = ifp;
646 1.1 jmcneill sc->sc_miibus.mii_readreg = alc_miibus_readreg;
647 1.1 jmcneill sc->sc_miibus.mii_writereg = alc_miibus_writereg;
648 1.1 jmcneill sc->sc_miibus.mii_statchg = alc_miibus_statchg;
649 1.1 jmcneill
650 1.1 jmcneill sc->sc_ec.ec_mii = &sc->sc_miibus;
651 1.1 jmcneill ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
652 1.1 jmcneill alc_mediastatus);
653 1.1 jmcneill mii_flags = 0;
654 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0)
655 1.1 jmcneill mii_flags |= MIIF_DOPAUSE;
656 1.1 jmcneill mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
657 1.1 jmcneill MII_OFFSET_ANY, mii_flags);
658 1.1 jmcneill
659 1.1 jmcneill if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
660 1.1 jmcneill printf("%s: no PHY found!\n", device_xname(sc->sc_dev));
661 1.1 jmcneill ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
662 1.1 jmcneill 0, NULL);
663 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
664 1.1 jmcneill } else
665 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
666 1.1 jmcneill
667 1.1 jmcneill if_attach(ifp);
668 1.1 jmcneill ether_ifattach(ifp, sc->alc_eaddr);
669 1.1 jmcneill
670 1.1 jmcneill if (!pmf_device_register(self, NULL, NULL))
671 1.1 jmcneill aprint_error_dev(self, "couldn't establish power handler\n");
672 1.1 jmcneill else
673 1.1 jmcneill pmf_class_network_register(self, ifp);
674 1.1 jmcneill
675 1.1 jmcneill return;
676 1.1 jmcneill fail:
677 1.1 jmcneill alc_dma_free(sc);
678 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
679 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
680 1.1 jmcneill sc->sc_irq_handle = NULL;
681 1.1 jmcneill }
682 1.1 jmcneill if (sc->sc_mem_size) {
683 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
684 1.1 jmcneill sc->sc_mem_size = 0;
685 1.1 jmcneill }
686 1.1 jmcneill }
687 1.1 jmcneill
688 1.1 jmcneill static int
689 1.1 jmcneill alc_detach(device_t self, int flags)
690 1.1 jmcneill {
691 1.1 jmcneill struct alc_softc *sc = device_private(self);
692 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
693 1.1 jmcneill int s;
694 1.1 jmcneill
695 1.1 jmcneill s = splnet();
696 1.1 jmcneill alc_stop(ifp, 0);
697 1.1 jmcneill splx(s);
698 1.1 jmcneill
699 1.1 jmcneill mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
700 1.1 jmcneill
701 1.1 jmcneill /* Delete all remaining media. */
702 1.1 jmcneill ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
703 1.1 jmcneill
704 1.1 jmcneill ether_ifdetach(ifp);
705 1.1 jmcneill if_detach(ifp);
706 1.1 jmcneill alc_dma_free(sc);
707 1.1 jmcneill
708 1.1 jmcneill alc_phy_down(sc);
709 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
710 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
711 1.1 jmcneill sc->sc_irq_handle = NULL;
712 1.1 jmcneill }
713 1.1 jmcneill if (sc->sc_mem_size) {
714 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
715 1.1 jmcneill sc->sc_mem_size = 0;
716 1.1 jmcneill }
717 1.1 jmcneill
718 1.1 jmcneill return (0);
719 1.1 jmcneill }
720 1.1 jmcneill
721 1.1 jmcneill static int
722 1.1 jmcneill alc_dma_alloc(struct alc_softc *sc)
723 1.1 jmcneill {
724 1.1 jmcneill struct alc_txdesc *txd;
725 1.1 jmcneill struct alc_rxdesc *rxd;
726 1.1 jmcneill int nsegs, error, i;
727 1.1 jmcneill
728 1.1 jmcneill /*
729 1.1 jmcneill * Create DMA stuffs for TX ring
730 1.1 jmcneill */
731 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
732 1.1 jmcneill ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
733 1.1 jmcneill if (error) {
734 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
735 1.1 jmcneill return (ENOBUFS);
736 1.1 jmcneill }
737 1.1 jmcneill
738 1.1 jmcneill /* Allocate DMA'able memory for TX ring */
739 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
740 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
741 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
742 1.1 jmcneill if (error) {
743 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Tx ring.\n",
744 1.1 jmcneill device_xname(sc->sc_dev));
745 1.1 jmcneill return error;
746 1.1 jmcneill }
747 1.1 jmcneill
748 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
749 1.1 jmcneill nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring,
750 1.1 jmcneill BUS_DMA_NOWAIT);
751 1.1 jmcneill if (error)
752 1.1 jmcneill return (ENOBUFS);
753 1.1 jmcneill
754 1.1 jmcneill /* Load the DMA map for Tx ring. */
755 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
756 1.1 jmcneill sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
757 1.1 jmcneill if (error) {
758 1.1 jmcneill printf("%s: could not load DMA'able memory for Tx ring.\n",
759 1.1 jmcneill device_xname(sc->sc_dev));
760 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
761 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
762 1.1 jmcneill return error;
763 1.1 jmcneill }
764 1.1 jmcneill
765 1.1 jmcneill sc->alc_rdata.alc_tx_ring_paddr =
766 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
767 1.1 jmcneill
768 1.1 jmcneill /*
769 1.1 jmcneill * Create DMA stuffs for RX ring
770 1.1 jmcneill */
771 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
772 1.1 jmcneill ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
773 1.1 jmcneill if (error)
774 1.1 jmcneill return (ENOBUFS);
775 1.1 jmcneill
776 1.1 jmcneill /* Allocate DMA'able memory for RX ring */
777 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
778 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
779 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
780 1.1 jmcneill if (error) {
781 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx ring.\n",
782 1.1 jmcneill device_xname(sc->sc_dev));
783 1.1 jmcneill return error;
784 1.1 jmcneill }
785 1.1 jmcneill
786 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
787 1.1 jmcneill nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring,
788 1.1 jmcneill BUS_DMA_NOWAIT);
789 1.1 jmcneill if (error)
790 1.1 jmcneill return (ENOBUFS);
791 1.1 jmcneill
792 1.1 jmcneill /* Load the DMA map for Rx ring. */
793 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
794 1.1 jmcneill sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
795 1.1 jmcneill if (error) {
796 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx ring.\n",
797 1.1 jmcneill device_xname(sc->sc_dev));
798 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
799 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
800 1.1 jmcneill return error;
801 1.1 jmcneill }
802 1.1 jmcneill
803 1.1 jmcneill sc->alc_rdata.alc_rx_ring_paddr =
804 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
805 1.1 jmcneill
806 1.1 jmcneill /*
807 1.1 jmcneill * Create DMA stuffs for RX return ring
808 1.1 jmcneill */
809 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
810 1.1 jmcneill ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
811 1.1 jmcneill if (error)
812 1.1 jmcneill return (ENOBUFS);
813 1.1 jmcneill
814 1.1 jmcneill /* Allocate DMA'able memory for RX return ring */
815 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
816 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
817 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
818 1.1 jmcneill if (error) {
819 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx "
820 1.1 jmcneill "return ring.\n", device_xname(sc->sc_dev));
821 1.1 jmcneill return error;
822 1.1 jmcneill }
823 1.1 jmcneill
824 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
825 1.1 jmcneill nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring,
826 1.1 jmcneill BUS_DMA_NOWAIT);
827 1.1 jmcneill if (error)
828 1.1 jmcneill return (ENOBUFS);
829 1.1 jmcneill
830 1.1 jmcneill /* Load the DMA map for Rx return ring. */
831 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
832 1.1 jmcneill sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
833 1.1 jmcneill if (error) {
834 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx return ring."
835 1.1 jmcneill "\n", device_xname(sc->sc_dev));
836 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
837 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
838 1.1 jmcneill return error;
839 1.1 jmcneill }
840 1.1 jmcneill
841 1.1 jmcneill sc->alc_rdata.alc_rr_ring_paddr =
842 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
843 1.1 jmcneill
844 1.1 jmcneill /*
845 1.1 jmcneill * Create DMA stuffs for CMB block
846 1.1 jmcneill */
847 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
848 1.1 jmcneill ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
849 1.1 jmcneill &sc->alc_cdata.alc_cmb_map);
850 1.1 jmcneill if (error)
851 1.1 jmcneill return (ENOBUFS);
852 1.1 jmcneill
853 1.1 jmcneill /* Allocate DMA'able memory for CMB block */
854 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
855 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
856 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
857 1.1 jmcneill if (error) {
858 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
859 1.1 jmcneill "CMB block\n", device_xname(sc->sc_dev));
860 1.1 jmcneill return error;
861 1.1 jmcneill }
862 1.1 jmcneill
863 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
864 1.1 jmcneill nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb,
865 1.1 jmcneill BUS_DMA_NOWAIT);
866 1.1 jmcneill if (error)
867 1.1 jmcneill return (ENOBUFS);
868 1.1 jmcneill
869 1.1 jmcneill /* Load the DMA map for CMB block. */
870 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
871 1.1 jmcneill sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
872 1.1 jmcneill BUS_DMA_WAITOK);
873 1.1 jmcneill if (error) {
874 1.1 jmcneill printf("%s: could not load DMA'able memory for CMB block\n",
875 1.1 jmcneill device_xname(sc->sc_dev));
876 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
877 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
878 1.1 jmcneill return error;
879 1.1 jmcneill }
880 1.1 jmcneill
881 1.1 jmcneill sc->alc_rdata.alc_cmb_paddr =
882 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
883 1.1 jmcneill
884 1.1 jmcneill /*
885 1.1 jmcneill * Create DMA stuffs for SMB block
886 1.1 jmcneill */
887 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
888 1.1 jmcneill ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
889 1.1 jmcneill &sc->alc_cdata.alc_smb_map);
890 1.1 jmcneill if (error)
891 1.1 jmcneill return (ENOBUFS);
892 1.1 jmcneill
893 1.1 jmcneill /* Allocate DMA'able memory for SMB block */
894 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
895 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
896 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
897 1.1 jmcneill if (error) {
898 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
899 1.1 jmcneill "SMB block\n", device_xname(sc->sc_dev));
900 1.1 jmcneill return error;
901 1.1 jmcneill }
902 1.1 jmcneill
903 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
904 1.1 jmcneill nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb,
905 1.1 jmcneill BUS_DMA_NOWAIT);
906 1.1 jmcneill if (error)
907 1.1 jmcneill return (ENOBUFS);
908 1.1 jmcneill
909 1.1 jmcneill /* Load the DMA map for SMB block */
910 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
911 1.1 jmcneill sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
912 1.1 jmcneill BUS_DMA_WAITOK);
913 1.1 jmcneill if (error) {
914 1.1 jmcneill printf("%s: could not load DMA'able memory for SMB block\n",
915 1.1 jmcneill device_xname(sc->sc_dev));
916 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
917 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
918 1.1 jmcneill return error;
919 1.1 jmcneill }
920 1.1 jmcneill
921 1.1 jmcneill sc->alc_rdata.alc_smb_paddr =
922 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
923 1.1 jmcneill
924 1.1 jmcneill
925 1.1 jmcneill /* Create DMA maps for Tx buffers. */
926 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
927 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
928 1.1 jmcneill txd->tx_m = NULL;
929 1.1 jmcneill txd->tx_dmamap = NULL;
930 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
931 1.1 jmcneill ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
932 1.1 jmcneill &txd->tx_dmamap);
933 1.1 jmcneill if (error) {
934 1.1 jmcneill printf("%s: could not create Tx dmamap.\n",
935 1.1 jmcneill device_xname(sc->sc_dev));
936 1.1 jmcneill return error;
937 1.1 jmcneill }
938 1.1 jmcneill }
939 1.1 jmcneill
940 1.1 jmcneill /* Create DMA maps for Rx buffers. */
941 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
942 1.1 jmcneill BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
943 1.1 jmcneill if (error) {
944 1.1 jmcneill printf("%s: could not create spare Rx dmamap.\n",
945 1.1 jmcneill device_xname(sc->sc_dev));
946 1.1 jmcneill return error;
947 1.1 jmcneill }
948 1.1 jmcneill
949 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
950 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
951 1.1 jmcneill rxd->rx_m = NULL;
952 1.1 jmcneill rxd->rx_dmamap = NULL;
953 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
954 1.1 jmcneill MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
955 1.1 jmcneill if (error) {
956 1.1 jmcneill printf("%s: could not create Rx dmamap.\n",
957 1.1 jmcneill device_xname(sc->sc_dev));
958 1.1 jmcneill return error;
959 1.1 jmcneill }
960 1.1 jmcneill }
961 1.1 jmcneill
962 1.1 jmcneill return (0);
963 1.1 jmcneill }
964 1.1 jmcneill
965 1.1 jmcneill
966 1.1 jmcneill static void
967 1.1 jmcneill alc_dma_free(struct alc_softc *sc)
968 1.1 jmcneill {
969 1.1 jmcneill struct alc_txdesc *txd;
970 1.1 jmcneill struct alc_rxdesc *rxd;
971 1.1 jmcneill int i;
972 1.1 jmcneill
973 1.1 jmcneill /* Tx buffers */
974 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
975 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
976 1.1 jmcneill if (txd->tx_dmamap != NULL) {
977 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
978 1.1 jmcneill txd->tx_dmamap = NULL;
979 1.1 jmcneill }
980 1.1 jmcneill }
981 1.1 jmcneill /* Rx buffers */
982 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
983 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
984 1.1 jmcneill if (rxd->rx_dmamap != NULL) {
985 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
986 1.1 jmcneill rxd->rx_dmamap = NULL;
987 1.1 jmcneill }
988 1.1 jmcneill }
989 1.1 jmcneill if (sc->alc_cdata.alc_rx_sparemap != NULL) {
990 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
991 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = NULL;
992 1.1 jmcneill }
993 1.1 jmcneill
994 1.1 jmcneill /* Tx ring. */
995 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL)
996 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
997 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL &&
998 1.1 jmcneill sc->alc_rdata.alc_tx_ring != NULL)
999 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1000 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
1001 1.1 jmcneill sc->alc_rdata.alc_tx_ring = NULL;
1002 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
1003 1.1 jmcneill
1004 1.1 jmcneill /* Rx ring. */
1005 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL)
1006 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1007 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1008 1.1 jmcneill sc->alc_rdata.alc_rx_ring != NULL)
1009 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1010 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1011 1.1 jmcneill sc->alc_rdata.alc_rx_ring = NULL;
1012 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map = NULL;
1013 1.1 jmcneill
1014 1.1 jmcneill /* Rx return ring. */
1015 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL)
1016 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1017 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1018 1.1 jmcneill sc->alc_rdata.alc_rr_ring != NULL)
1019 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1020 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1021 1.1 jmcneill sc->alc_rdata.alc_rr_ring = NULL;
1022 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map = NULL;
1023 1.1 jmcneill
1024 1.1 jmcneill /* CMB block */
1025 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL)
1026 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1027 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL &&
1028 1.1 jmcneill sc->alc_rdata.alc_cmb != NULL)
1029 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1030 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1031 1.1 jmcneill sc->alc_rdata.alc_cmb = NULL;
1032 1.1 jmcneill sc->alc_cdata.alc_cmb_map = NULL;
1033 1.1 jmcneill
1034 1.1 jmcneill /* SMB block */
1035 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL)
1036 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1037 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL &&
1038 1.1 jmcneill sc->alc_rdata.alc_smb != NULL)
1039 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1040 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1041 1.1 jmcneill sc->alc_rdata.alc_smb = NULL;
1042 1.1 jmcneill sc->alc_cdata.alc_smb_map = NULL;
1043 1.1 jmcneill }
1044 1.1 jmcneill
1045 1.1 jmcneill static int
1046 1.1 jmcneill alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1047 1.1 jmcneill {
1048 1.1 jmcneill struct alc_txdesc *txd, *txd_last;
1049 1.1 jmcneill struct tx_desc *desc;
1050 1.1 jmcneill struct mbuf *m;
1051 1.1 jmcneill bus_dmamap_t map;
1052 1.1 jmcneill uint32_t cflags, poff, vtag;
1053 1.1 jmcneill int error, idx, nsegs, prod;
1054 1.1 jmcneill #if NVLAN > 0
1055 1.1 jmcneill struct m_tag *mtag;
1056 1.1 jmcneill #endif
1057 1.1 jmcneill
1058 1.1 jmcneill m = *m_head;
1059 1.1 jmcneill cflags = vtag = 0;
1060 1.1 jmcneill poff = 0;
1061 1.1 jmcneill
1062 1.1 jmcneill prod = sc->alc_cdata.alc_tx_prod;
1063 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1064 1.1 jmcneill txd_last = txd;
1065 1.1 jmcneill map = txd->tx_dmamap;
1066 1.1 jmcneill
1067 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1068 1.1 jmcneill
1069 1.1 jmcneill if (error == EFBIG) {
1070 1.1 jmcneill error = 0;
1071 1.1 jmcneill
1072 1.1 jmcneill *m_head = m_pullup(*m_head, MHLEN);
1073 1.1 jmcneill if (*m_head == NULL) {
1074 1.1 jmcneill printf("%s: can't defrag TX mbuf\n",
1075 1.1 jmcneill device_xname(sc->sc_dev));
1076 1.1 jmcneill return ENOBUFS;
1077 1.1 jmcneill }
1078 1.1 jmcneill
1079 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1080 1.1 jmcneill BUS_DMA_NOWAIT);
1081 1.1 jmcneill
1082 1.1 jmcneill if (error != 0) {
1083 1.1 jmcneill printf("%s: could not load defragged TX mbuf\n",
1084 1.1 jmcneill device_xname(sc->sc_dev));
1085 1.1 jmcneill m_freem(*m_head);
1086 1.1 jmcneill *m_head = NULL;
1087 1.1 jmcneill return error;
1088 1.1 jmcneill }
1089 1.1 jmcneill } else if (error) {
1090 1.1 jmcneill printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1091 1.1 jmcneill return (error);
1092 1.1 jmcneill }
1093 1.1 jmcneill
1094 1.1 jmcneill nsegs = map->dm_nsegs;
1095 1.1 jmcneill
1096 1.1 jmcneill if (nsegs == 0) {
1097 1.1 jmcneill m_freem(*m_head);
1098 1.1 jmcneill *m_head = NULL;
1099 1.1 jmcneill return (EIO);
1100 1.1 jmcneill }
1101 1.1 jmcneill
1102 1.1 jmcneill /* Check descriptor overrun. */
1103 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1104 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, map);
1105 1.1 jmcneill return (ENOBUFS);
1106 1.1 jmcneill }
1107 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1108 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1109 1.1 jmcneill
1110 1.1 jmcneill m = *m_head;
1111 1.1 jmcneill desc = NULL;
1112 1.1 jmcneill idx = 0;
1113 1.1 jmcneill #if NVLAN > 0
1114 1.1 jmcneill /* Configure VLAN hardware tag insertion. */
1115 1.1 jmcneill if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1116 1.1 jmcneill vtag = htons(VLAN_TAG_VALUE(mtag));
1117 1.1 jmcneill vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1118 1.1 jmcneill cflags |= TD_INS_VLAN_TAG;
1119 1.1 jmcneill }
1120 1.1 jmcneill #endif
1121 1.1 jmcneill /* Configure Tx checksum offload. */
1122 1.1 jmcneill if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1123 1.1 jmcneill cflags |= TD_CUSTOM_CSUM;
1124 1.1 jmcneill /* Set checksum start offset. */
1125 1.1 jmcneill cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1126 1.1 jmcneill TD_PLOAD_OFFSET_MASK;
1127 1.1 jmcneill }
1128 1.1 jmcneill for (; idx < nsegs; idx++) {
1129 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1130 1.1 jmcneill desc->len =
1131 1.1 jmcneill htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1132 1.1 jmcneill desc->flags = htole32(cflags);
1133 1.1 jmcneill desc->addr = htole64(map->dm_segs[idx].ds_addr);
1134 1.1 jmcneill sc->alc_cdata.alc_tx_cnt++;
1135 1.1 jmcneill ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1136 1.1 jmcneill }
1137 1.1 jmcneill /* Update producer index. */
1138 1.1 jmcneill sc->alc_cdata.alc_tx_prod = prod;
1139 1.1 jmcneill
1140 1.1 jmcneill /* Finally set EOP on the last descriptor. */
1141 1.1 jmcneill prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1142 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1143 1.1 jmcneill desc->flags |= htole32(TD_EOP);
1144 1.1 jmcneill
1145 1.1 jmcneill /* Swap dmamap of the first and the last. */
1146 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1147 1.1 jmcneill map = txd_last->tx_dmamap;
1148 1.1 jmcneill txd_last->tx_dmamap = txd->tx_dmamap;
1149 1.1 jmcneill txd->tx_dmamap = map;
1150 1.1 jmcneill txd->tx_m = m;
1151 1.1 jmcneill
1152 1.1 jmcneill return (0);
1153 1.1 jmcneill }
1154 1.1 jmcneill
1155 1.1 jmcneill static void
1156 1.1 jmcneill alc_start(struct ifnet *ifp)
1157 1.1 jmcneill {
1158 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1159 1.1 jmcneill struct mbuf *m_head;
1160 1.1 jmcneill int enq;
1161 1.1 jmcneill
1162 1.1 jmcneill if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1163 1.1 jmcneill return;
1164 1.1 jmcneill
1165 1.1 jmcneill /* Reclaim transmitted frames. */
1166 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1167 1.1 jmcneill alc_txeof(sc);
1168 1.1 jmcneill
1169 1.1 jmcneill enq = 0;
1170 1.1 jmcneill for (;;) {
1171 1.1 jmcneill IFQ_DEQUEUE(&ifp->if_snd, m_head);
1172 1.1 jmcneill if (m_head == NULL)
1173 1.1 jmcneill break;
1174 1.1 jmcneill
1175 1.1 jmcneill /*
1176 1.1 jmcneill * Pack the data into the transmit ring. If we
1177 1.1 jmcneill * don't have room, set the OACTIVE flag and wait
1178 1.1 jmcneill * for the NIC to drain the ring.
1179 1.1 jmcneill */
1180 1.1 jmcneill if (alc_encap(sc, &m_head)) {
1181 1.1 jmcneill if (m_head == NULL)
1182 1.1 jmcneill break;
1183 1.1 jmcneill ifp->if_flags |= IFF_OACTIVE;
1184 1.1 jmcneill break;
1185 1.1 jmcneill }
1186 1.1 jmcneill enq = 1;
1187 1.1 jmcneill
1188 1.1 jmcneill /*
1189 1.1 jmcneill * If there's a BPF listener, bounce a copy of this frame
1190 1.1 jmcneill * to him.
1191 1.1 jmcneill */
1192 1.1 jmcneill bpf_mtap(ifp, m_head);
1193 1.1 jmcneill }
1194 1.1 jmcneill
1195 1.1 jmcneill if (enq) {
1196 1.1 jmcneill /* Sync descriptors. */
1197 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1198 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1199 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1200 1.1 jmcneill /* Kick. Assume we're using normal Tx priority queue. */
1201 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1202 1.1 jmcneill (sc->alc_cdata.alc_tx_prod <<
1203 1.1 jmcneill MBOX_TD_PROD_LO_IDX_SHIFT) &
1204 1.1 jmcneill MBOX_TD_PROD_LO_IDX_MASK);
1205 1.1 jmcneill /* Set a timeout in case the chip goes out to lunch. */
1206 1.1 jmcneill ifp->if_timer = ALC_TX_TIMEOUT;
1207 1.1 jmcneill }
1208 1.1 jmcneill }
1209 1.1 jmcneill
1210 1.1 jmcneill static void
1211 1.1 jmcneill alc_watchdog(struct ifnet *ifp)
1212 1.1 jmcneill {
1213 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1214 1.1 jmcneill
1215 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1216 1.1 jmcneill printf("%s: watchdog timeout (missed link)\n",
1217 1.1 jmcneill device_xname(sc->sc_dev));
1218 1.1 jmcneill ifp->if_oerrors++;
1219 1.1 jmcneill alc_init(ifp);
1220 1.1 jmcneill return;
1221 1.1 jmcneill }
1222 1.1 jmcneill
1223 1.1 jmcneill printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1224 1.1 jmcneill ifp->if_oerrors++;
1225 1.1 jmcneill alc_init(ifp);
1226 1.1 jmcneill
1227 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1228 1.1 jmcneill alc_start(ifp);
1229 1.1 jmcneill }
1230 1.1 jmcneill
1231 1.1 jmcneill static int
1232 1.1 jmcneill alc_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1233 1.1 jmcneill {
1234 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1235 1.1 jmcneill int s, error = 0;
1236 1.1 jmcneill
1237 1.1 jmcneill s = splnet();
1238 1.1 jmcneill
1239 1.1 jmcneill error = ether_ioctl(ifp, cmd, data);
1240 1.1 jmcneill if (error == ENETRESET) {
1241 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING)
1242 1.1 jmcneill alc_iff(sc);
1243 1.1 jmcneill error = 0;
1244 1.1 jmcneill }
1245 1.1 jmcneill
1246 1.1 jmcneill splx(s);
1247 1.1 jmcneill return (error);
1248 1.1 jmcneill }
1249 1.1 jmcneill
1250 1.1 jmcneill static void
1251 1.1 jmcneill alc_mac_config(struct alc_softc *sc)
1252 1.1 jmcneill {
1253 1.1 jmcneill struct mii_data *mii;
1254 1.1 jmcneill uint32_t reg;
1255 1.1 jmcneill
1256 1.1 jmcneill mii = &sc->sc_miibus;
1257 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
1258 1.1 jmcneill reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1259 1.1 jmcneill MAC_CFG_SPEED_MASK);
1260 1.1 jmcneill /* Reprogram MAC with resolved speed/duplex. */
1261 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
1262 1.1 jmcneill case IFM_10_T:
1263 1.1 jmcneill case IFM_100_TX:
1264 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
1265 1.1 jmcneill break;
1266 1.1 jmcneill case IFM_1000_T:
1267 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
1268 1.1 jmcneill break;
1269 1.1 jmcneill }
1270 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1271 1.1 jmcneill reg |= MAC_CFG_FULL_DUPLEX;
1272 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1273 1.1 jmcneill reg |= MAC_CFG_TX_FC;
1274 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1275 1.1 jmcneill reg |= MAC_CFG_RX_FC;
1276 1.1 jmcneill }
1277 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1278 1.1 jmcneill }
1279 1.1 jmcneill
1280 1.1 jmcneill static void
1281 1.1 jmcneill alc_stats_clear(struct alc_softc *sc)
1282 1.1 jmcneill {
1283 1.1 jmcneill struct smb sb, *smb;
1284 1.1 jmcneill uint32_t *reg;
1285 1.1 jmcneill int i;
1286 1.1 jmcneill
1287 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1288 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1289 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1290 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1291 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1292 1.1 jmcneill /* Update done, clear. */
1293 1.1 jmcneill smb->updated = 0;
1294 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1295 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1296 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1297 1.1 jmcneill } else {
1298 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1299 1.1 jmcneill reg++) {
1300 1.1 jmcneill CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1301 1.1 jmcneill i += sizeof(uint32_t);
1302 1.1 jmcneill }
1303 1.1 jmcneill /* Read Tx statistics. */
1304 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1305 1.1 jmcneill reg++) {
1306 1.1 jmcneill CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1307 1.1 jmcneill i += sizeof(uint32_t);
1308 1.1 jmcneill }
1309 1.1 jmcneill }
1310 1.1 jmcneill }
1311 1.1 jmcneill
1312 1.1 jmcneill static void
1313 1.1 jmcneill alc_stats_update(struct alc_softc *sc)
1314 1.1 jmcneill {
1315 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1316 1.1 jmcneill struct alc_hw_stats *stat;
1317 1.1 jmcneill struct smb sb, *smb;
1318 1.1 jmcneill uint32_t *reg;
1319 1.1 jmcneill int i;
1320 1.1 jmcneill
1321 1.1 jmcneill stat = &sc->alc_stats;
1322 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1323 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1324 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1325 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1326 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1327 1.1 jmcneill if (smb->updated == 0)
1328 1.1 jmcneill return;
1329 1.1 jmcneill } else {
1330 1.1 jmcneill smb = &sb;
1331 1.1 jmcneill /* Read Rx statistics. */
1332 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1333 1.1 jmcneill reg++) {
1334 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1335 1.1 jmcneill i += sizeof(uint32_t);
1336 1.1 jmcneill }
1337 1.1 jmcneill /* Read Tx statistics. */
1338 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1339 1.1 jmcneill reg++) {
1340 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1341 1.1 jmcneill i += sizeof(uint32_t);
1342 1.1 jmcneill }
1343 1.1 jmcneill }
1344 1.1 jmcneill
1345 1.1 jmcneill /* Rx stats. */
1346 1.1 jmcneill stat->rx_frames += smb->rx_frames;
1347 1.1 jmcneill stat->rx_bcast_frames += smb->rx_bcast_frames;
1348 1.1 jmcneill stat->rx_mcast_frames += smb->rx_mcast_frames;
1349 1.1 jmcneill stat->rx_pause_frames += smb->rx_pause_frames;
1350 1.1 jmcneill stat->rx_control_frames += smb->rx_control_frames;
1351 1.1 jmcneill stat->rx_crcerrs += smb->rx_crcerrs;
1352 1.1 jmcneill stat->rx_lenerrs += smb->rx_lenerrs;
1353 1.1 jmcneill stat->rx_bytes += smb->rx_bytes;
1354 1.1 jmcneill stat->rx_runts += smb->rx_runts;
1355 1.1 jmcneill stat->rx_fragments += smb->rx_fragments;
1356 1.1 jmcneill stat->rx_pkts_64 += smb->rx_pkts_64;
1357 1.1 jmcneill stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1358 1.1 jmcneill stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1359 1.1 jmcneill stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1360 1.1 jmcneill stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1361 1.1 jmcneill stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1362 1.1 jmcneill stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1363 1.1 jmcneill stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1364 1.1 jmcneill stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1365 1.1 jmcneill stat->rx_rrs_errs += smb->rx_rrs_errs;
1366 1.1 jmcneill stat->rx_alignerrs += smb->rx_alignerrs;
1367 1.1 jmcneill stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1368 1.1 jmcneill stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1369 1.1 jmcneill stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1370 1.1 jmcneill
1371 1.1 jmcneill /* Tx stats. */
1372 1.1 jmcneill stat->tx_frames += smb->tx_frames;
1373 1.1 jmcneill stat->tx_bcast_frames += smb->tx_bcast_frames;
1374 1.1 jmcneill stat->tx_mcast_frames += smb->tx_mcast_frames;
1375 1.1 jmcneill stat->tx_pause_frames += smb->tx_pause_frames;
1376 1.1 jmcneill stat->tx_excess_defer += smb->tx_excess_defer;
1377 1.1 jmcneill stat->tx_control_frames += smb->tx_control_frames;
1378 1.1 jmcneill stat->tx_deferred += smb->tx_deferred;
1379 1.1 jmcneill stat->tx_bytes += smb->tx_bytes;
1380 1.1 jmcneill stat->tx_pkts_64 += smb->tx_pkts_64;
1381 1.1 jmcneill stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1382 1.1 jmcneill stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1383 1.1 jmcneill stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1384 1.1 jmcneill stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1385 1.1 jmcneill stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1386 1.1 jmcneill stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1387 1.1 jmcneill stat->tx_single_colls += smb->tx_single_colls;
1388 1.1 jmcneill stat->tx_multi_colls += smb->tx_multi_colls;
1389 1.1 jmcneill stat->tx_late_colls += smb->tx_late_colls;
1390 1.1 jmcneill stat->tx_excess_colls += smb->tx_excess_colls;
1391 1.1 jmcneill stat->tx_abort += smb->tx_abort;
1392 1.1 jmcneill stat->tx_underrun += smb->tx_underrun;
1393 1.1 jmcneill stat->tx_desc_underrun += smb->tx_desc_underrun;
1394 1.1 jmcneill stat->tx_lenerrs += smb->tx_lenerrs;
1395 1.1 jmcneill stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1396 1.1 jmcneill stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1397 1.1 jmcneill stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1398 1.1 jmcneill
1399 1.1 jmcneill /* Update counters in ifnet. */
1400 1.1 jmcneill ifp->if_opackets += smb->tx_frames;
1401 1.1 jmcneill
1402 1.1 jmcneill ifp->if_collisions += smb->tx_single_colls +
1403 1.1 jmcneill smb->tx_multi_colls * 2 + smb->tx_late_colls +
1404 1.1 jmcneill smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
1405 1.1 jmcneill
1406 1.1 jmcneill /*
1407 1.1 jmcneill * XXX
1408 1.1 jmcneill * tx_pkts_truncated counter looks suspicious. It constantly
1409 1.1 jmcneill * increments with no sign of Tx errors. This may indicate
1410 1.1 jmcneill * the counter name is not correct one so I've removed the
1411 1.1 jmcneill * counter in output errors.
1412 1.1 jmcneill */
1413 1.1 jmcneill ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
1414 1.1 jmcneill smb->tx_underrun;
1415 1.1 jmcneill
1416 1.1 jmcneill ifp->if_ipackets += smb->rx_frames;
1417 1.1 jmcneill
1418 1.1 jmcneill ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1419 1.1 jmcneill smb->rx_runts + smb->rx_pkts_truncated +
1420 1.1 jmcneill smb->rx_fifo_oflows + smb->rx_rrs_errs +
1421 1.1 jmcneill smb->rx_alignerrs;
1422 1.1 jmcneill
1423 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1424 1.1 jmcneill /* Update done, clear. */
1425 1.1 jmcneill smb->updated = 0;
1426 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1427 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1428 1.1 jmcneill }
1429 1.1 jmcneill }
1430 1.1 jmcneill
1431 1.1 jmcneill static int
1432 1.1 jmcneill alc_intr(void *arg)
1433 1.1 jmcneill {
1434 1.1 jmcneill struct alc_softc *sc = arg;
1435 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1436 1.1 jmcneill uint32_t status;
1437 1.1 jmcneill
1438 1.1 jmcneill status = CSR_READ_4(sc, ALC_INTR_STATUS);
1439 1.1 jmcneill if ((status & ALC_INTRS) == 0)
1440 1.1 jmcneill return (0);
1441 1.1 jmcneill
1442 1.1 jmcneill /* Acknowledge and disable interrupts. */
1443 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
1444 1.1 jmcneill
1445 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING) {
1446 1.1 jmcneill if (status & INTR_RX_PKT) {
1447 1.1 jmcneill int error;
1448 1.1 jmcneill
1449 1.1 jmcneill error = alc_rxintr(sc);
1450 1.1 jmcneill if (error) {
1451 1.1 jmcneill alc_init(ifp);
1452 1.1 jmcneill return (0);
1453 1.1 jmcneill }
1454 1.1 jmcneill }
1455 1.1 jmcneill
1456 1.1 jmcneill if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
1457 1.1 jmcneill INTR_TXQ_TO_RST)) {
1458 1.1 jmcneill if (status & INTR_DMA_RD_TO_RST)
1459 1.1 jmcneill printf("%s: DMA read error! -- resetting\n",
1460 1.1 jmcneill device_xname(sc->sc_dev));
1461 1.1 jmcneill if (status & INTR_DMA_WR_TO_RST)
1462 1.1 jmcneill printf("%s: DMA write error! -- resetting\n",
1463 1.1 jmcneill device_xname(sc->sc_dev));
1464 1.1 jmcneill if (status & INTR_TXQ_TO_RST)
1465 1.1 jmcneill printf("%s: TxQ reset! -- resetting\n",
1466 1.1 jmcneill device_xname(sc->sc_dev));
1467 1.1 jmcneill alc_init(ifp);
1468 1.1 jmcneill return (0);
1469 1.1 jmcneill }
1470 1.1 jmcneill
1471 1.1 jmcneill alc_txeof(sc);
1472 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1473 1.1 jmcneill alc_start(ifp);
1474 1.1 jmcneill }
1475 1.1 jmcneill
1476 1.1 jmcneill /* Re-enable interrupts. */
1477 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
1478 1.1 jmcneill return (1);
1479 1.1 jmcneill }
1480 1.1 jmcneill
1481 1.1 jmcneill static void
1482 1.1 jmcneill alc_txeof(struct alc_softc *sc)
1483 1.1 jmcneill {
1484 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1485 1.1 jmcneill struct alc_txdesc *txd;
1486 1.1 jmcneill uint32_t cons, prod;
1487 1.1 jmcneill int prog;
1488 1.1 jmcneill
1489 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1490 1.1 jmcneill return;
1491 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1492 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1493 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1494 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
1495 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1496 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize,
1497 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1498 1.1 jmcneill prod = sc->alc_rdata.alc_cmb->cons;
1499 1.1 jmcneill } else
1500 1.1 jmcneill prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
1501 1.1 jmcneill /* Assume we're using normal Tx priority queue. */
1502 1.1 jmcneill prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
1503 1.1 jmcneill MBOX_TD_CONS_LO_IDX_SHIFT;
1504 1.1 jmcneill cons = sc->alc_cdata.alc_tx_cons;
1505 1.1 jmcneill /*
1506 1.1 jmcneill * Go through our Tx list and free mbufs for those
1507 1.1 jmcneill * frames which have been transmitted.
1508 1.1 jmcneill */
1509 1.1 jmcneill for (prog = 0; cons != prod; prog++,
1510 1.1 jmcneill ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
1511 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt <= 0)
1512 1.1 jmcneill break;
1513 1.1 jmcneill prog++;
1514 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
1515 1.1 jmcneill sc->alc_cdata.alc_tx_cnt--;
1516 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[cons];
1517 1.1 jmcneill if (txd->tx_m != NULL) {
1518 1.1 jmcneill /* Reclaim transmitted mbufs. */
1519 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1520 1.1 jmcneill m_freem(txd->tx_m);
1521 1.1 jmcneill txd->tx_m = NULL;
1522 1.1 jmcneill }
1523 1.1 jmcneill }
1524 1.1 jmcneill
1525 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1526 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1527 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1528 1.1 jmcneill sc->alc_cdata.alc_tx_cons = cons;
1529 1.1 jmcneill /*
1530 1.1 jmcneill * Unarm watchdog timer only when there is no pending
1531 1.1 jmcneill * frames in Tx queue.
1532 1.1 jmcneill */
1533 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1534 1.1 jmcneill ifp->if_timer = 0;
1535 1.1 jmcneill }
1536 1.1 jmcneill
1537 1.1 jmcneill static int
1538 1.1 jmcneill alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, int init)
1539 1.1 jmcneill {
1540 1.1 jmcneill struct mbuf *m;
1541 1.1 jmcneill bus_dmamap_t map;
1542 1.1 jmcneill int error;
1543 1.1 jmcneill
1544 1.1 jmcneill MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1545 1.1 jmcneill if (m == NULL)
1546 1.1 jmcneill return (ENOBUFS);
1547 1.1 jmcneill MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
1548 1.1 jmcneill if (!(m->m_flags & M_EXT)) {
1549 1.1 jmcneill m_freem(m);
1550 1.1 jmcneill return (ENOBUFS);
1551 1.1 jmcneill }
1552 1.1 jmcneill
1553 1.1 jmcneill m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
1554 1.1 jmcneill
1555 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat,
1556 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
1557 1.1 jmcneill
1558 1.1 jmcneill if (error != 0) {
1559 1.1 jmcneill if (!error) {
1560 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat,
1561 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap);
1562 1.1 jmcneill error = EFBIG;
1563 1.1 jmcneill printf("%s: too many segments?!\n",
1564 1.1 jmcneill device_xname(sc->sc_dev));
1565 1.1 jmcneill }
1566 1.1 jmcneill m_freem(m);
1567 1.1 jmcneill
1568 1.1 jmcneill if (init)
1569 1.1 jmcneill printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
1570 1.1 jmcneill
1571 1.1 jmcneill return (error);
1572 1.1 jmcneill }
1573 1.1 jmcneill
1574 1.1 jmcneill if (rxd->rx_m != NULL) {
1575 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1576 1.1 jmcneill rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1577 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1578 1.1 jmcneill }
1579 1.1 jmcneill map = rxd->rx_dmamap;
1580 1.1 jmcneill rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
1581 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = map;
1582 1.1 jmcneill rxd->rx_m = m;
1583 1.1 jmcneill rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
1584 1.1 jmcneill return (0);
1585 1.1 jmcneill }
1586 1.1 jmcneill
1587 1.1 jmcneill static int
1588 1.1 jmcneill alc_rxintr(struct alc_softc *sc)
1589 1.1 jmcneill {
1590 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1591 1.1 jmcneill struct rx_rdesc *rrd;
1592 1.1 jmcneill uint32_t nsegs, status;
1593 1.1 jmcneill int rr_cons, prog;
1594 1.1 jmcneill
1595 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1596 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1597 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1598 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1599 1.1 jmcneill rr_cons = sc->alc_cdata.alc_rr_cons;
1600 1.1 jmcneill for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
1601 1.1 jmcneill rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
1602 1.1 jmcneill status = le32toh(rrd->status);
1603 1.1 jmcneill if ((status & RRD_VALID) == 0)
1604 1.1 jmcneill break;
1605 1.1 jmcneill nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
1606 1.1 jmcneill if (nsegs == 0) {
1607 1.1 jmcneill /* This should not happen! */
1608 1.1 jmcneill if (alcdebug)
1609 1.1 jmcneill printf("%s: unexpected segment count -- "
1610 1.1 jmcneill "resetting\n", device_xname(sc->sc_dev));
1611 1.1 jmcneill return (EIO);
1612 1.1 jmcneill }
1613 1.1 jmcneill alc_rxeof(sc, rrd);
1614 1.1 jmcneill /* Clear Rx return status. */
1615 1.1 jmcneill rrd->status = 0;
1616 1.1 jmcneill ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
1617 1.1 jmcneill sc->alc_cdata.alc_rx_cons += nsegs;
1618 1.1 jmcneill sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
1619 1.1 jmcneill prog += nsegs;
1620 1.1 jmcneill }
1621 1.1 jmcneill
1622 1.1 jmcneill if (prog > 0) {
1623 1.1 jmcneill /* Update the consumer index. */
1624 1.1 jmcneill sc->alc_cdata.alc_rr_cons = rr_cons;
1625 1.1 jmcneill /* Sync Rx return descriptors. */
1626 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1627 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1628 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1629 1.1 jmcneill /*
1630 1.1 jmcneill * Sync updated Rx descriptors such that controller see
1631 1.1 jmcneill * modified buffer addresses.
1632 1.1 jmcneill */
1633 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1634 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1635 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1636 1.1 jmcneill /*
1637 1.1 jmcneill * Let controller know availability of new Rx buffers.
1638 1.1 jmcneill * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
1639 1.1 jmcneill * it may be possible to update ALC_MBOX_RD0_PROD_IDX
1640 1.1 jmcneill * only when Rx buffer pre-fetching is required. In
1641 1.1 jmcneill * addition we already set ALC_RX_RD_FREE_THRESH to
1642 1.1 jmcneill * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
1643 1.1 jmcneill * it still seems that pre-fetching needs more
1644 1.1 jmcneill * experimentation.
1645 1.1 jmcneill */
1646 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
1647 1.1 jmcneill sc->alc_cdata.alc_rx_cons);
1648 1.1 jmcneill }
1649 1.1 jmcneill
1650 1.1 jmcneill return (0);
1651 1.1 jmcneill }
1652 1.1 jmcneill
1653 1.1 jmcneill /* Receive a frame. */
1654 1.1 jmcneill static void
1655 1.1 jmcneill alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
1656 1.1 jmcneill {
1657 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1658 1.1 jmcneill struct alc_rxdesc *rxd;
1659 1.1 jmcneill struct mbuf *mp, *m;
1660 1.1 jmcneill uint32_t rdinfo, status;
1661 1.1 jmcneill int count, nsegs, rx_cons;
1662 1.1 jmcneill
1663 1.1 jmcneill status = le32toh(rrd->status);
1664 1.1 jmcneill rdinfo = le32toh(rrd->rdinfo);
1665 1.1 jmcneill rx_cons = RRD_RD_IDX(rdinfo);
1666 1.1 jmcneill nsegs = RRD_RD_CNT(rdinfo);
1667 1.1 jmcneill
1668 1.1 jmcneill sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
1669 1.1 jmcneill if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
1670 1.1 jmcneill /*
1671 1.1 jmcneill * We want to pass the following frames to upper
1672 1.1 jmcneill * layer regardless of error status of Rx return
1673 1.1 jmcneill * ring.
1674 1.1 jmcneill *
1675 1.1 jmcneill * o IP/TCP/UDP checksum is bad.
1676 1.1 jmcneill * o frame length and protocol specific length
1677 1.1 jmcneill * does not match.
1678 1.1 jmcneill *
1679 1.1 jmcneill * Force network stack compute checksum for
1680 1.1 jmcneill * errored frames.
1681 1.1 jmcneill */
1682 1.1 jmcneill status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
1683 1.1 jmcneill if ((RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC |
1684 1.1 jmcneill RRD_ERR_RUNT) != 0)
1685 1.1 jmcneill return;
1686 1.1 jmcneill }
1687 1.1 jmcneill
1688 1.1 jmcneill for (count = 0; count < nsegs; count++,
1689 1.1 jmcneill ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
1690 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
1691 1.1 jmcneill mp = rxd->rx_m;
1692 1.1 jmcneill /* Add a new receive buffer to the ring. */
1693 1.1 jmcneill if (alc_newbuf(sc, rxd, 0) != 0) {
1694 1.1 jmcneill ifp->if_iqdrops++;
1695 1.1 jmcneill /* Reuse Rx buffers. */
1696 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
1697 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
1698 1.1 jmcneill break;
1699 1.1 jmcneill }
1700 1.1 jmcneill
1701 1.1 jmcneill /*
1702 1.1 jmcneill * Assume we've received a full sized frame.
1703 1.1 jmcneill * Actual size is fixed when we encounter the end of
1704 1.1 jmcneill * multi-segmented frame.
1705 1.1 jmcneill */
1706 1.1 jmcneill mp->m_len = sc->alc_buf_size;
1707 1.1 jmcneill
1708 1.1 jmcneill /* Chain received mbufs. */
1709 1.1 jmcneill if (sc->alc_cdata.alc_rxhead == NULL) {
1710 1.1 jmcneill sc->alc_cdata.alc_rxhead = mp;
1711 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1712 1.1 jmcneill } else {
1713 1.1 jmcneill mp->m_flags &= ~M_PKTHDR;
1714 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail =
1715 1.1 jmcneill sc->alc_cdata.alc_rxtail;
1716 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = mp;
1717 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1718 1.1 jmcneill }
1719 1.1 jmcneill
1720 1.1 jmcneill if (count == nsegs - 1) {
1721 1.1 jmcneill /* Last desc. for this frame. */
1722 1.1 jmcneill m = sc->alc_cdata.alc_rxhead;
1723 1.1 jmcneill m->m_flags |= M_PKTHDR;
1724 1.1 jmcneill /*
1725 1.1 jmcneill * It seems that L1C/L2C controller has no way
1726 1.1 jmcneill * to tell hardware to strip CRC bytes.
1727 1.1 jmcneill */
1728 1.1 jmcneill m->m_pkthdr.len =
1729 1.1 jmcneill sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
1730 1.1 jmcneill if (nsegs > 1) {
1731 1.1 jmcneill /* Set last mbuf size. */
1732 1.1 jmcneill mp->m_len = sc->alc_cdata.alc_rxlen -
1733 1.1 jmcneill (nsegs - 1) * sc->alc_buf_size;
1734 1.1 jmcneill /* Remove the CRC bytes in chained mbufs. */
1735 1.1 jmcneill if (mp->m_len <= ETHER_CRC_LEN) {
1736 1.1 jmcneill sc->alc_cdata.alc_rxtail =
1737 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail;
1738 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_len -=
1739 1.1 jmcneill (ETHER_CRC_LEN - mp->m_len);
1740 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = NULL;
1741 1.1 jmcneill m_freem(mp);
1742 1.1 jmcneill } else {
1743 1.1 jmcneill mp->m_len -= ETHER_CRC_LEN;
1744 1.1 jmcneill }
1745 1.1 jmcneill } else
1746 1.1 jmcneill m->m_len = m->m_pkthdr.len;
1747 1.1 jmcneill m->m_pkthdr.rcvif = ifp;
1748 1.1 jmcneill #if NVLAN > 0
1749 1.1 jmcneill /*
1750 1.1 jmcneill * Due to hardware bugs, Rx checksum offloading
1751 1.1 jmcneill * was intentionally disabled.
1752 1.1 jmcneill */
1753 1.1 jmcneill if (status & RRD_VLAN_TAG) {
1754 1.1 jmcneill u_int32_t vtag = RRD_VLAN(le32toh(rrd->vtag));
1755 1.1 jmcneill VLAN_INPUT_TAG(ifp, m, ntohs(vtag), );
1756 1.1 jmcneill }
1757 1.1 jmcneill #endif
1758 1.1 jmcneill
1759 1.1 jmcneill bpf_mtap(ifp, m);
1760 1.1 jmcneill
1761 1.1 jmcneill {
1762 1.1 jmcneill /* Pass it on. */
1763 1.1 jmcneill ether_input(ifp, m);
1764 1.1 jmcneill }
1765 1.1 jmcneill }
1766 1.1 jmcneill }
1767 1.1 jmcneill /* Reset mbuf chains. */
1768 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
1769 1.1 jmcneill }
1770 1.1 jmcneill
1771 1.1 jmcneill static void
1772 1.1 jmcneill alc_tick(void *xsc)
1773 1.1 jmcneill {
1774 1.1 jmcneill struct alc_softc *sc = xsc;
1775 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
1776 1.1 jmcneill int s;
1777 1.1 jmcneill
1778 1.1 jmcneill s = splnet();
1779 1.1 jmcneill mii_tick(mii);
1780 1.1 jmcneill alc_stats_update(sc);
1781 1.1 jmcneill splx(s);
1782 1.1 jmcneill
1783 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
1784 1.1 jmcneill }
1785 1.1 jmcneill
1786 1.1 jmcneill static void
1787 1.1 jmcneill alc_reset(struct alc_softc *sc)
1788 1.1 jmcneill {
1789 1.1 jmcneill uint32_t reg;
1790 1.1 jmcneill int i;
1791 1.1 jmcneill
1792 1.1 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, MASTER_RESET);
1793 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
1794 1.1 jmcneill DELAY(10);
1795 1.1 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
1796 1.1 jmcneill break;
1797 1.1 jmcneill }
1798 1.1 jmcneill if (i == 0)
1799 1.1 jmcneill printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
1800 1.1 jmcneill
1801 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
1802 1.1 jmcneill if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
1803 1.1 jmcneill break;
1804 1.1 jmcneill DELAY(10);
1805 1.1 jmcneill }
1806 1.1 jmcneill
1807 1.1 jmcneill if (i == 0)
1808 1.1 jmcneill printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1809 1.1 jmcneill reg);
1810 1.1 jmcneill }
1811 1.1 jmcneill
1812 1.1 jmcneill static int
1813 1.1 jmcneill alc_init(struct ifnet *ifp)
1814 1.1 jmcneill {
1815 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1816 1.1 jmcneill struct mii_data *mii;
1817 1.1 jmcneill uint8_t eaddr[ETHER_ADDR_LEN];
1818 1.1 jmcneill bus_addr_t paddr;
1819 1.1 jmcneill uint32_t reg, rxf_hi, rxf_lo;
1820 1.1 jmcneill int error;
1821 1.1 jmcneill
1822 1.1 jmcneill /*
1823 1.1 jmcneill * Cancel any pending I/O.
1824 1.1 jmcneill */
1825 1.1 jmcneill alc_stop(ifp, 0);
1826 1.1 jmcneill /*
1827 1.1 jmcneill * Reset the chip to a known state.
1828 1.1 jmcneill */
1829 1.1 jmcneill alc_reset(sc);
1830 1.1 jmcneill
1831 1.1 jmcneill /* Initialize Rx descriptors. */
1832 1.1 jmcneill error = alc_init_rx_ring(sc);
1833 1.1 jmcneill if (error != 0) {
1834 1.1 jmcneill printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1835 1.1 jmcneill alc_stop(ifp, 0);
1836 1.1 jmcneill return (error);
1837 1.1 jmcneill }
1838 1.1 jmcneill alc_init_rr_ring(sc);
1839 1.1 jmcneill alc_init_tx_ring(sc);
1840 1.1 jmcneill alc_init_cmb(sc);
1841 1.1 jmcneill alc_init_smb(sc);
1842 1.1 jmcneill
1843 1.1 jmcneill /* Reprogram the station address. */
1844 1.1 jmcneill memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1845 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR0,
1846 1.1 jmcneill eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1847 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
1848 1.1 jmcneill /*
1849 1.1 jmcneill * Clear WOL status and disable all WOL feature as WOL
1850 1.1 jmcneill * would interfere Rx operation under normal environments.
1851 1.1 jmcneill */
1852 1.1 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
1853 1.1 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
1854 1.1 jmcneill /* Set Tx descriptor base addresses. */
1855 1.1 jmcneill paddr = sc->alc_rdata.alc_tx_ring_paddr;
1856 1.1 jmcneill CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
1857 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
1858 1.1 jmcneill /* We don't use high priority ring. */
1859 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
1860 1.1 jmcneill /* Set Tx descriptor counter. */
1861 1.1 jmcneill CSR_WRITE_4(sc, ALC_TD_RING_CNT,
1862 1.1 jmcneill (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
1863 1.1 jmcneill /* Set Rx descriptor base addresses. */
1864 1.1 jmcneill paddr = sc->alc_rdata.alc_rx_ring_paddr;
1865 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
1866 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
1867 1.1 jmcneill /* We use one Rx ring. */
1868 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
1869 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
1870 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
1871 1.1 jmcneill /* Set Rx descriptor counter. */
1872 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD_RING_CNT,
1873 1.1 jmcneill (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
1874 1.1 jmcneill
1875 1.1 jmcneill /*
1876 1.1 jmcneill * Let hardware split jumbo frames into alc_max_buf_sized chunks.
1877 1.1 jmcneill * if it do not fit the buffer size. Rx return descriptor holds
1878 1.1 jmcneill * a counter that indicates how many fragments were made by the
1879 1.1 jmcneill * hardware. The buffer size should be multiple of 8 bytes.
1880 1.1 jmcneill * Since hardware has limit on the size of buffer size, always
1881 1.1 jmcneill * use the maximum value.
1882 1.1 jmcneill * For strict-alignment architectures make sure to reduce buffer
1883 1.1 jmcneill * size by 8 bytes to make room for alignment fixup.
1884 1.1 jmcneill */
1885 1.1 jmcneill sc->alc_buf_size = RX_BUF_SIZE_MAX;
1886 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
1887 1.1 jmcneill
1888 1.1 jmcneill paddr = sc->alc_rdata.alc_rr_ring_paddr;
1889 1.1 jmcneill /* Set Rx return descriptor base addresses. */
1890 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
1891 1.1 jmcneill /* We use one Rx return ring. */
1892 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
1893 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
1894 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
1895 1.1 jmcneill /* Set Rx return descriptor counter. */
1896 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
1897 1.1 jmcneill (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
1898 1.1 jmcneill paddr = sc->alc_rdata.alc_cmb_paddr;
1899 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
1900 1.1 jmcneill paddr = sc->alc_rdata.alc_smb_paddr;
1901 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
1902 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
1903 1.1 jmcneill
1904 1.1 jmcneill /* Tell hardware that we're ready to load DMA blocks. */
1905 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
1906 1.1 jmcneill
1907 1.1 jmcneill /* Configure interrupt moderation timer. */
1908 1.1 jmcneill sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
1909 1.1 jmcneill sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
1910 1.1 jmcneill reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
1911 1.1 jmcneill reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
1912 1.1 jmcneill CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
1913 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MASTER_CFG);
1914 1.1 jmcneill reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK);
1915 1.1 jmcneill /*
1916 1.1 jmcneill * We don't want to automatic interrupt clear as task queue
1917 1.1 jmcneill * for the interrupt should know interrupt status.
1918 1.1 jmcneill */
1919 1.1 jmcneill reg &= ~MASTER_INTR_RD_CLR;
1920 1.1 jmcneill reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB);
1921 1.1 jmcneill if (ALC_USECS(sc->alc_int_rx_mod) != 0)
1922 1.1 jmcneill reg |= MASTER_IM_RX_TIMER_ENB;
1923 1.1 jmcneill if (ALC_USECS(sc->alc_int_tx_mod) != 0)
1924 1.1 jmcneill reg |= MASTER_IM_TX_TIMER_ENB;
1925 1.1 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
1926 1.1 jmcneill /*
1927 1.1 jmcneill * Disable interrupt re-trigger timer. We don't want automatic
1928 1.1 jmcneill * re-triggering of un-ACKed interrupts.
1929 1.1 jmcneill */
1930 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
1931 1.1 jmcneill /* Configure CMB. */
1932 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
1933 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1934 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
1935 1.1 jmcneill else
1936 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
1937 1.1 jmcneill /*
1938 1.1 jmcneill * Hardware can be configured to issue SMB interrupt based
1939 1.1 jmcneill * on programmed interval. Since there is a callout that is
1940 1.1 jmcneill * invoked for every hz in driver we use that instead of
1941 1.1 jmcneill * relying on periodic SMB interrupt.
1942 1.1 jmcneill */
1943 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
1944 1.1 jmcneill /* Clear MAC statistics. */
1945 1.1 jmcneill alc_stats_clear(sc);
1946 1.1 jmcneill
1947 1.1 jmcneill /*
1948 1.1 jmcneill * Always use maximum frame size that controller can support.
1949 1.1 jmcneill * Otherwise received frames that has larger frame length
1950 1.1 jmcneill * than alc(4) MTU would be silently dropped in hardware. This
1951 1.1 jmcneill * would make path-MTU discovery hard as sender wouldn't get
1952 1.1 jmcneill * any responses from receiver. alc(4) supports
1953 1.1 jmcneill * multi-fragmented frames on Rx path so it has no issue on
1954 1.1 jmcneill * assembling fragmented frames. Using maximum frame size also
1955 1.1 jmcneill * removes the need to reinitialize hardware when interface
1956 1.1 jmcneill * MTU configuration was changed.
1957 1.1 jmcneill *
1958 1.1 jmcneill * Be conservative in what you do, be liberal in what you
1959 1.1 jmcneill * accept from others - RFC 793.
1960 1.1 jmcneill */
1961 1.1 jmcneill CSR_WRITE_4(sc, ALC_FRAME_SIZE, ALC_JUMBO_FRAMELEN);
1962 1.1 jmcneill
1963 1.1 jmcneill /* Disable header split(?) */
1964 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
1965 1.1 jmcneill
1966 1.1 jmcneill /* Configure IPG/IFG parameters. */
1967 1.1 jmcneill CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
1968 1.1 jmcneill ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
1969 1.1 jmcneill ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1970 1.1 jmcneill ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1971 1.1 jmcneill ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
1972 1.1 jmcneill /* Set parameters for half-duplex media. */
1973 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDPX_CFG,
1974 1.1 jmcneill ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1975 1.1 jmcneill HDPX_CFG_LCOL_MASK) |
1976 1.1 jmcneill ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1977 1.1 jmcneill HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1978 1.1 jmcneill ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1979 1.1 jmcneill HDPX_CFG_ABEBT_MASK) |
1980 1.1 jmcneill ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1981 1.1 jmcneill HDPX_CFG_JAMIPG_MASK));
1982 1.1 jmcneill /*
1983 1.1 jmcneill * Set TSO/checksum offload threshold. For frames that is
1984 1.1 jmcneill * larger than this threshold, hardware wouldn't do
1985 1.1 jmcneill * TSO/checksum offloading.
1986 1.1 jmcneill */
1987 1.1 jmcneill CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
1988 1.1 jmcneill (ALC_JUMBO_FRAMELEN >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
1989 1.1 jmcneill TSO_OFFLOAD_THRESH_MASK);
1990 1.1 jmcneill /* Configure TxQ. */
1991 1.1 jmcneill reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
1992 1.1 jmcneill TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
1993 1.1 jmcneill reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
1994 1.1 jmcneill TXQ_CFG_TD_BURST_MASK;
1995 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
1996 1.1 jmcneill
1997 1.1 jmcneill /* Configure Rx free descriptor pre-fetching. */
1998 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
1999 1.1 jmcneill ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
2000 1.1 jmcneill RX_RD_FREE_THRESH_HI_MASK) |
2001 1.1 jmcneill ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
2002 1.1 jmcneill RX_RD_FREE_THRESH_LO_MASK));
2003 1.1 jmcneill
2004 1.1 jmcneill /*
2005 1.1 jmcneill * Configure flow control parameters.
2006 1.1 jmcneill * XON : 80% of Rx FIFO
2007 1.1 jmcneill * XOFF : 30% of Rx FIFO
2008 1.1 jmcneill */
2009 1.1 jmcneill reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2010 1.1 jmcneill rxf_hi = (reg * 8) / 10;
2011 1.1 jmcneill rxf_lo = (reg * 3)/ 10;
2012 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2013 1.1 jmcneill ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2014 1.1 jmcneill RX_FIFO_PAUSE_THRESH_LO_MASK) |
2015 1.1 jmcneill ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2016 1.1 jmcneill RX_FIFO_PAUSE_THRESH_HI_MASK));
2017 1.1 jmcneill
2018 1.1 jmcneill /* Disable RSS until I understand L1C/L2C's RSS logic. */
2019 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2020 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2021 1.1 jmcneill
2022 1.1 jmcneill /* Configure RxQ. */
2023 1.1 jmcneill reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2024 1.1 jmcneill RXQ_CFG_RD_BURST_MASK;
2025 1.1 jmcneill reg |= RXQ_CFG_RSS_MODE_DIS;
2026 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
2027 1.1 jmcneill reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M;
2028 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2029 1.1 jmcneill
2030 1.1 jmcneill /* Configure Rx DMAW request thresold. */
2031 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD_DMA_CFG,
2032 1.1 jmcneill ((RD_DMA_CFG_THRESH_DEFAULT << RD_DMA_CFG_THRESH_SHIFT) &
2033 1.1 jmcneill RD_DMA_CFG_THRESH_MASK) |
2034 1.1 jmcneill ((ALC_RD_DMA_CFG_USECS(0) << RD_DMA_CFG_TIMER_SHIFT) &
2035 1.1 jmcneill RD_DMA_CFG_TIMER_MASK));
2036 1.1 jmcneill /* Configure DMA parameters. */
2037 1.1 jmcneill reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2038 1.1 jmcneill reg |= sc->alc_rcb;
2039 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2040 1.1 jmcneill reg |= DMA_CFG_CMB_ENB;
2041 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2042 1.1 jmcneill reg |= DMA_CFG_SMB_ENB;
2043 1.1 jmcneill else
2044 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2045 1.1 jmcneill reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2046 1.1 jmcneill DMA_CFG_RD_BURST_SHIFT;
2047 1.1 jmcneill reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2048 1.1 jmcneill DMA_CFG_WR_BURST_SHIFT;
2049 1.1 jmcneill reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2050 1.1 jmcneill DMA_CFG_RD_DELAY_CNT_MASK;
2051 1.1 jmcneill reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2052 1.1 jmcneill DMA_CFG_WR_DELAY_CNT_MASK;
2053 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2054 1.1 jmcneill
2055 1.1 jmcneill /*
2056 1.1 jmcneill * Configure Tx/Rx MACs.
2057 1.1 jmcneill * - Auto-padding for short frames.
2058 1.1 jmcneill * - Enable CRC generation.
2059 1.1 jmcneill * Actual reconfiguration of MAC for resolved speed/duplex
2060 1.1 jmcneill * is followed after detection of link establishment.
2061 1.1 jmcneill * AR8131/AR8132 always does checksum computation regardless
2062 1.1 jmcneill * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
2063 1.1 jmcneill * have bug in protocol field in Rx return structure so
2064 1.1 jmcneill * these controllers can't handle fragmented frames. Disable
2065 1.1 jmcneill * Rx checksum offloading until there is a newer controller
2066 1.1 jmcneill * that has sane implementation.
2067 1.1 jmcneill */
2068 1.1 jmcneill reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2069 1.1 jmcneill ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2070 1.1 jmcneill MAC_CFG_PREAMBLE_MASK);
2071 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
2072 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
2073 1.1 jmcneill else
2074 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
2075 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2076 1.1 jmcneill
2077 1.1 jmcneill /* Set up the receive filter. */
2078 1.1 jmcneill alc_iff(sc);
2079 1.1 jmcneill alc_rxvlan(sc);
2080 1.1 jmcneill
2081 1.1 jmcneill /* Acknowledge all pending interrupts and clear it. */
2082 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
2083 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2084 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
2085 1.1 jmcneill
2086 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2087 1.1 jmcneill /* Switch to the current media. */
2088 1.1 jmcneill mii = &sc->sc_miibus;
2089 1.1 jmcneill mii_mediachg(mii);
2090 1.1 jmcneill
2091 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2092 1.1 jmcneill
2093 1.1 jmcneill ifp->if_flags |= IFF_RUNNING;
2094 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
2095 1.1 jmcneill
2096 1.1 jmcneill return (0);
2097 1.1 jmcneill }
2098 1.1 jmcneill
2099 1.1 jmcneill static void
2100 1.1 jmcneill alc_stop(struct ifnet *ifp, int disable)
2101 1.1 jmcneill {
2102 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2103 1.1 jmcneill struct alc_txdesc *txd;
2104 1.1 jmcneill struct alc_rxdesc *rxd;
2105 1.1 jmcneill uint32_t reg;
2106 1.1 jmcneill int i;
2107 1.1 jmcneill
2108 1.1 jmcneill callout_stop(&sc->sc_tick_ch);
2109 1.1 jmcneill
2110 1.1 jmcneill /*
2111 1.1 jmcneill * Mark the interface down and cancel the watchdog timer.
2112 1.1 jmcneill */
2113 1.1 jmcneill ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2114 1.1 jmcneill ifp->if_timer = 0;
2115 1.1 jmcneill
2116 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2117 1.1 jmcneill
2118 1.1 jmcneill alc_stats_update(sc);
2119 1.1 jmcneill
2120 1.1 jmcneill mii_down(&sc->sc_miibus);
2121 1.1 jmcneill
2122 1.1 jmcneill /* Disable interrupts. */
2123 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
2124 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2125 1.1 jmcneill alc_stop_queue(sc);
2126 1.1 jmcneill
2127 1.1 jmcneill /* Disable DMA. */
2128 1.1 jmcneill reg = CSR_READ_4(sc, ALC_DMA_CFG);
2129 1.1 jmcneill reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
2130 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2131 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2132 1.1 jmcneill DELAY(1000);
2133 1.1 jmcneill
2134 1.1 jmcneill /* Stop Rx/Tx MACs. */
2135 1.1 jmcneill alc_stop_mac(sc);
2136 1.1 jmcneill
2137 1.1 jmcneill /* Disable interrupts which might be touched in taskq handler. */
2138 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2139 1.1 jmcneill
2140 1.1 jmcneill /* Reclaim Rx buffers that have been processed. */
2141 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
2142 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
2143 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2144 1.1 jmcneill /*
2145 1.1 jmcneill * Free Tx/Rx mbufs still in the queues.
2146 1.1 jmcneill */
2147 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2148 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2149 1.1 jmcneill if (rxd->rx_m != NULL) {
2150 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2151 1.1 jmcneill m_freem(rxd->rx_m);
2152 1.1 jmcneill rxd->rx_m = NULL;
2153 1.1 jmcneill }
2154 1.1 jmcneill }
2155 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2156 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2157 1.1 jmcneill if (txd->tx_m != NULL) {
2158 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2159 1.1 jmcneill m_freem(txd->tx_m);
2160 1.1 jmcneill txd->tx_m = NULL;
2161 1.1 jmcneill }
2162 1.1 jmcneill }
2163 1.1 jmcneill }
2164 1.1 jmcneill
2165 1.1 jmcneill static void
2166 1.1 jmcneill alc_stop_mac(struct alc_softc *sc)
2167 1.1 jmcneill {
2168 1.1 jmcneill uint32_t reg;
2169 1.1 jmcneill int i;
2170 1.1 jmcneill
2171 1.1 jmcneill /* Disable Rx/Tx MAC. */
2172 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2173 1.1 jmcneill if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2174 1.1 jmcneill reg &= ~MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
2175 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2176 1.1 jmcneill }
2177 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2178 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2179 1.1 jmcneill if (reg == 0)
2180 1.1 jmcneill break;
2181 1.1 jmcneill DELAY(10);
2182 1.1 jmcneill }
2183 1.1 jmcneill if (i == 0)
2184 1.1 jmcneill printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
2185 1.1 jmcneill device_xname(sc->sc_dev), reg);
2186 1.1 jmcneill }
2187 1.1 jmcneill
2188 1.1 jmcneill static void
2189 1.1 jmcneill alc_start_queue(struct alc_softc *sc)
2190 1.1 jmcneill {
2191 1.1 jmcneill uint32_t qcfg[] = {
2192 1.1 jmcneill 0,
2193 1.1 jmcneill RXQ_CFG_QUEUE0_ENB,
2194 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
2195 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
2196 1.1 jmcneill RXQ_CFG_ENB
2197 1.1 jmcneill };
2198 1.1 jmcneill uint32_t cfg;
2199 1.1 jmcneill
2200 1.1 jmcneill /* Enable RxQ. */
2201 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
2202 1.1 jmcneill cfg &= ~RXQ_CFG_ENB;
2203 1.1 jmcneill cfg |= qcfg[1];
2204 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
2205 1.1 jmcneill /* Enable TxQ. */
2206 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
2207 1.1 jmcneill cfg |= TXQ_CFG_ENB;
2208 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
2209 1.1 jmcneill }
2210 1.1 jmcneill
2211 1.1 jmcneill static void
2212 1.1 jmcneill alc_stop_queue(struct alc_softc *sc)
2213 1.1 jmcneill {
2214 1.1 jmcneill uint32_t reg;
2215 1.1 jmcneill int i;
2216 1.1 jmcneill
2217 1.1 jmcneill /* Disable RxQ. */
2218 1.1 jmcneill reg = CSR_READ_4(sc, ALC_RXQ_CFG);
2219 1.1 jmcneill if ((reg & RXQ_CFG_ENB) != 0) {
2220 1.1 jmcneill reg &= ~RXQ_CFG_ENB;
2221 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2222 1.1 jmcneill }
2223 1.1 jmcneill /* Disable TxQ. */
2224 1.1 jmcneill reg = CSR_READ_4(sc, ALC_TXQ_CFG);
2225 1.1 jmcneill if ((reg & TXQ_CFG_ENB) == 0) {
2226 1.1 jmcneill reg &= ~TXQ_CFG_ENB;
2227 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
2228 1.1 jmcneill }
2229 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2230 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2231 1.1 jmcneill if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2232 1.1 jmcneill break;
2233 1.1 jmcneill DELAY(10);
2234 1.1 jmcneill }
2235 1.1 jmcneill if (i == 0)
2236 1.1 jmcneill printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
2237 1.1 jmcneill device_xname(sc->sc_dev), reg);
2238 1.1 jmcneill }
2239 1.1 jmcneill
2240 1.1 jmcneill static void
2241 1.1 jmcneill alc_init_tx_ring(struct alc_softc *sc)
2242 1.1 jmcneill {
2243 1.1 jmcneill struct alc_ring_data *rd;
2244 1.1 jmcneill struct alc_txdesc *txd;
2245 1.1 jmcneill int i;
2246 1.1 jmcneill
2247 1.1 jmcneill sc->alc_cdata.alc_tx_prod = 0;
2248 1.1 jmcneill sc->alc_cdata.alc_tx_cons = 0;
2249 1.1 jmcneill sc->alc_cdata.alc_tx_cnt = 0;
2250 1.1 jmcneill
2251 1.1 jmcneill rd = &sc->alc_rdata;
2252 1.1 jmcneill memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ);
2253 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2254 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2255 1.1 jmcneill txd->tx_m = NULL;
2256 1.1 jmcneill }
2257 1.1 jmcneill
2258 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2259 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2260 1.1 jmcneill }
2261 1.1 jmcneill
2262 1.1 jmcneill static int
2263 1.1 jmcneill alc_init_rx_ring(struct alc_softc *sc)
2264 1.1 jmcneill {
2265 1.1 jmcneill struct alc_ring_data *rd;
2266 1.1 jmcneill struct alc_rxdesc *rxd;
2267 1.1 jmcneill int i;
2268 1.1 jmcneill
2269 1.1 jmcneill sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
2270 1.1 jmcneill rd = &sc->alc_rdata;
2271 1.1 jmcneill memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ);
2272 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2273 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2274 1.1 jmcneill rxd->rx_m = NULL;
2275 1.1 jmcneill rxd->rx_desc = &rd->alc_rx_ring[i];
2276 1.1 jmcneill if (alc_newbuf(sc, rxd, 1) != 0)
2277 1.1 jmcneill return (ENOBUFS);
2278 1.1 jmcneill }
2279 1.1 jmcneill
2280 1.1 jmcneill /*
2281 1.1 jmcneill * Since controller does not update Rx descriptors, driver
2282 1.1 jmcneill * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
2283 1.1 jmcneill * is enough to ensure coherence.
2284 1.1 jmcneill */
2285 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2286 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2287 1.1 jmcneill /* Let controller know availability of new Rx buffers. */
2288 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
2289 1.1 jmcneill
2290 1.1 jmcneill return (0);
2291 1.1 jmcneill }
2292 1.1 jmcneill
2293 1.1 jmcneill static void
2294 1.1 jmcneill alc_init_rr_ring(struct alc_softc *sc)
2295 1.1 jmcneill {
2296 1.1 jmcneill struct alc_ring_data *rd;
2297 1.1 jmcneill
2298 1.1 jmcneill sc->alc_cdata.alc_rr_cons = 0;
2299 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2300 1.1 jmcneill
2301 1.1 jmcneill rd = &sc->alc_rdata;
2302 1.1 jmcneill memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ);
2303 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2304 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2305 1.1 jmcneill }
2306 1.1 jmcneill
2307 1.1 jmcneill static void
2308 1.1 jmcneill alc_init_cmb(struct alc_softc *sc)
2309 1.1 jmcneill {
2310 1.1 jmcneill struct alc_ring_data *rd;
2311 1.1 jmcneill
2312 1.1 jmcneill rd = &sc->alc_rdata;
2313 1.1 jmcneill memset(rd->alc_cmb, 0, ALC_CMB_SZ);
2314 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2315 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2316 1.1 jmcneill }
2317 1.1 jmcneill
2318 1.1 jmcneill static void
2319 1.1 jmcneill alc_init_smb(struct alc_softc *sc)
2320 1.1 jmcneill {
2321 1.1 jmcneill struct alc_ring_data *rd;
2322 1.1 jmcneill
2323 1.1 jmcneill rd = &sc->alc_rdata;
2324 1.1 jmcneill memset(rd->alc_smb, 0, ALC_SMB_SZ);
2325 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2326 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2327 1.1 jmcneill }
2328 1.1 jmcneill
2329 1.1 jmcneill static void
2330 1.1 jmcneill alc_rxvlan(struct alc_softc *sc)
2331 1.1 jmcneill {
2332 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
2333 1.1 jmcneill uint32_t reg;
2334 1.1 jmcneill
2335 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2336 1.1 jmcneill if (ifp->if_capabilities & ETHERCAP_VLAN_HWTAGGING)
2337 1.1 jmcneill reg |= MAC_CFG_VLAN_TAG_STRIP;
2338 1.1 jmcneill else
2339 1.1 jmcneill reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2340 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2341 1.1 jmcneill }
2342 1.1 jmcneill
2343 1.1 jmcneill static void
2344 1.1 jmcneill alc_iff(struct alc_softc *sc)
2345 1.1 jmcneill {
2346 1.1 jmcneill struct ethercom *ec = &sc->sc_ec;
2347 1.1 jmcneill struct ifnet *ifp = &ec->ec_if;
2348 1.1 jmcneill struct ether_multi *enm;
2349 1.1 jmcneill struct ether_multistep step;
2350 1.1 jmcneill uint32_t crc;
2351 1.1 jmcneill uint32_t mchash[2];
2352 1.1 jmcneill uint32_t rxcfg;
2353 1.1 jmcneill
2354 1.1 jmcneill rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
2355 1.1 jmcneill rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2356 1.1 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
2357 1.1 jmcneill
2358 1.1 jmcneill /*
2359 1.1 jmcneill * Always accept broadcast frames.
2360 1.1 jmcneill */
2361 1.1 jmcneill rxcfg |= MAC_CFG_BCAST;
2362 1.1 jmcneill
2363 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2364 1.1 jmcneill ifp->if_flags |= IFF_ALLMULTI;
2365 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC)
2366 1.1 jmcneill rxcfg |= MAC_CFG_PROMISC;
2367 1.1 jmcneill else
2368 1.1 jmcneill rxcfg |= MAC_CFG_ALLMULTI;
2369 1.1 jmcneill mchash[0] = mchash[1] = 0xFFFFFFFF;
2370 1.1 jmcneill } else {
2371 1.1 jmcneill /* Program new filter. */
2372 1.1 jmcneill memset(mchash, 0, sizeof(mchash));
2373 1.1 jmcneill
2374 1.1 jmcneill ETHER_FIRST_MULTI(step, ec, enm);
2375 1.1 jmcneill while (enm != NULL) {
2376 1.1 jmcneill crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2377 1.1 jmcneill mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2378 1.1 jmcneill ETHER_NEXT_MULTI(step, enm);
2379 1.1 jmcneill }
2380 1.1 jmcneill }
2381 1.1 jmcneill
2382 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
2383 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
2384 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
2385 1.1 jmcneill }
2386 1.1 jmcneill
2387 1.1 jmcneill MODULE(MODULE_CLASS_DRIVER, if_alc, NULL);
2388 1.1 jmcneill
2389 1.1 jmcneill #ifdef _MODULE
2390 1.1 jmcneill #include "ioconf.c"
2391 1.1 jmcneill #endif
2392 1.1 jmcneill
2393 1.1 jmcneill static int
2394 1.1 jmcneill if_alc_modcmd(modcmd_t cmd, void *opaque)
2395 1.1 jmcneill {
2396 1.1 jmcneill int error = 0;
2397 1.1 jmcneill
2398 1.1 jmcneill switch (cmd) {
2399 1.1 jmcneill case MODULE_CMD_INIT:
2400 1.1 jmcneill #ifdef _MODULE
2401 1.1 jmcneill error = config_init_component(cfdriver_ioconf_if_alc,
2402 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2403 1.1 jmcneill #endif
2404 1.1 jmcneill return error;
2405 1.1 jmcneill case MODULE_CMD_FINI:
2406 1.1 jmcneill #ifdef _MODULE
2407 1.1 jmcneill error = config_fini_component(cfdriver_ioconf_if_alc,
2408 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2409 1.1 jmcneill #endif
2410 1.1 jmcneill return error;
2411 1.1 jmcneill default:
2412 1.1 jmcneill return ENOTTY;
2413 1.1 jmcneill }
2414 1.1 jmcneill }
2415