if_alc.c revision 1.5 1 1.1 jmcneill /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */
2 1.1 jmcneill /*-
3 1.1 jmcneill * Copyright (c) 2009, Pyun YongHyeon <yongari (at) FreeBSD.org>
4 1.1 jmcneill * All rights reserved.
5 1.1 jmcneill *
6 1.1 jmcneill * Redistribution and use in source and binary forms, with or without
7 1.1 jmcneill * modification, are permitted provided that the following conditions
8 1.1 jmcneill * are met:
9 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright
10 1.1 jmcneill * notice unmodified, this list of conditions, and the following
11 1.1 jmcneill * disclaimer.
12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the
14 1.1 jmcneill * documentation and/or other materials provided with the distribution.
15 1.1 jmcneill *
16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 jmcneill * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 jmcneill * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 jmcneill * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 jmcneill * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 jmcneill * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 jmcneill * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 jmcneill * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 jmcneill * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 jmcneill * SUCH DAMAGE.
27 1.1 jmcneill */
28 1.1 jmcneill
29 1.2 jmcneill /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
30 1.1 jmcneill
31 1.1 jmcneill #ifdef _KERNEL_OPT
32 1.1 jmcneill #include "vlan.h"
33 1.1 jmcneill #endif
34 1.1 jmcneill
35 1.1 jmcneill #include <sys/param.h>
36 1.1 jmcneill #include <sys/proc.h>
37 1.1 jmcneill #include <sys/endian.h>
38 1.1 jmcneill #include <sys/systm.h>
39 1.1 jmcneill #include <sys/types.h>
40 1.1 jmcneill #include <sys/sockio.h>
41 1.1 jmcneill #include <sys/mbuf.h>
42 1.1 jmcneill #include <sys/queue.h>
43 1.1 jmcneill #include <sys/kernel.h>
44 1.1 jmcneill #include <sys/device.h>
45 1.1 jmcneill #include <sys/callout.h>
46 1.1 jmcneill #include <sys/socket.h>
47 1.1 jmcneill #include <sys/module.h>
48 1.1 jmcneill
49 1.1 jmcneill #include <sys/bus.h>
50 1.1 jmcneill
51 1.1 jmcneill #include <net/if.h>
52 1.1 jmcneill #include <net/if_dl.h>
53 1.1 jmcneill #include <net/if_llc.h>
54 1.1 jmcneill #include <net/if_media.h>
55 1.1 jmcneill #include <net/if_ether.h>
56 1.1 jmcneill
57 1.1 jmcneill #include <net/bpf.h>
58 1.1 jmcneill
59 1.1 jmcneill #ifdef INET
60 1.1 jmcneill #include <netinet/in.h>
61 1.1 jmcneill #include <netinet/in_systm.h>
62 1.1 jmcneill #include <netinet/in_var.h>
63 1.1 jmcneill #include <netinet/ip.h>
64 1.1 jmcneill #endif
65 1.1 jmcneill
66 1.1 jmcneill #include <net/if_types.h>
67 1.1 jmcneill #include <net/if_vlanvar.h>
68 1.1 jmcneill
69 1.1 jmcneill #include <net/bpf.h>
70 1.1 jmcneill
71 1.1 jmcneill #include <sys/rnd.h>
72 1.1 jmcneill
73 1.1 jmcneill #include <dev/mii/mii.h>
74 1.1 jmcneill #include <dev/mii/miivar.h>
75 1.1 jmcneill
76 1.1 jmcneill #include <dev/pci/pcireg.h>
77 1.1 jmcneill #include <dev/pci/pcivar.h>
78 1.1 jmcneill #include <dev/pci/pcidevs.h>
79 1.1 jmcneill
80 1.1 jmcneill #include <dev/pci/if_alcreg.h>
81 1.1 jmcneill
82 1.2 jmcneill /*
83 1.2 jmcneill * Devices supported by this driver.
84 1.2 jmcneill */
85 1.2 jmcneill static struct alc_ident alc_ident_table[] = {
86 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024,
87 1.2 jmcneill "Atheros AR8131 PCIe Gigabit Ethernet" },
88 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024,
89 1.2 jmcneill "Atheros AR8132 PCIe Fast Ethernet" },
90 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024,
91 1.2 jmcneill "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
92 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024,
93 1.2 jmcneill "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
94 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024,
95 1.2 jmcneill "Atheros AR8152 v1.1 PCIe Fast Ethernet" },
96 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024,
97 1.2 jmcneill "Atheros AR8152 v2.0 PCIe Fast Ethernet" },
98 1.2 jmcneill { 0, 0, 0, NULL },
99 1.2 jmcneill };
100 1.2 jmcneill
101 1.1 jmcneill static int alc_match(device_t, cfdata_t, void *);
102 1.1 jmcneill static void alc_attach(device_t, device_t, void *);
103 1.1 jmcneill static int alc_detach(device_t, int);
104 1.1 jmcneill
105 1.1 jmcneill static int alc_init(struct ifnet *);
106 1.1 jmcneill static void alc_start(struct ifnet *);
107 1.1 jmcneill static int alc_ioctl(struct ifnet *, u_long, void *);
108 1.1 jmcneill static void alc_watchdog(struct ifnet *);
109 1.1 jmcneill static int alc_mediachange(struct ifnet *);
110 1.1 jmcneill static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
111 1.1 jmcneill
112 1.2 jmcneill static void alc_aspm(struct alc_softc *, int);
113 1.1 jmcneill static void alc_disable_l0s_l1(struct alc_softc *);
114 1.1 jmcneill static int alc_dma_alloc(struct alc_softc *);
115 1.1 jmcneill static void alc_dma_free(struct alc_softc *);
116 1.1 jmcneill static int alc_encap(struct alc_softc *, struct mbuf **);
117 1.2 jmcneill static struct alc_ident *
118 1.2 jmcneill alc_find_ident(struct pci_attach_args *);
119 1.1 jmcneill static void alc_get_macaddr(struct alc_softc *);
120 1.1 jmcneill static void alc_init_cmb(struct alc_softc *);
121 1.1 jmcneill static void alc_init_rr_ring(struct alc_softc *);
122 1.1 jmcneill static int alc_init_rx_ring(struct alc_softc *);
123 1.1 jmcneill static void alc_init_smb(struct alc_softc *);
124 1.1 jmcneill static void alc_init_tx_ring(struct alc_softc *);
125 1.1 jmcneill static int alc_intr(void *);
126 1.1 jmcneill static void alc_mac_config(struct alc_softc *);
127 1.1 jmcneill static int alc_miibus_readreg(device_t, int, int);
128 1.1 jmcneill static void alc_miibus_statchg(device_t);
129 1.1 jmcneill static void alc_miibus_writereg(device_t, int, int, int);
130 1.1 jmcneill static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, int);
131 1.1 jmcneill static void alc_phy_down(struct alc_softc *);
132 1.1 jmcneill static void alc_phy_reset(struct alc_softc *);
133 1.1 jmcneill static void alc_reset(struct alc_softc *);
134 1.1 jmcneill static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
135 1.1 jmcneill static int alc_rxintr(struct alc_softc *);
136 1.1 jmcneill static void alc_iff(struct alc_softc *);
137 1.1 jmcneill static void alc_rxvlan(struct alc_softc *);
138 1.1 jmcneill static void alc_start_queue(struct alc_softc *);
139 1.1 jmcneill static void alc_stats_clear(struct alc_softc *);
140 1.1 jmcneill static void alc_stats_update(struct alc_softc *);
141 1.1 jmcneill static void alc_stop(struct ifnet *, int);
142 1.1 jmcneill static void alc_stop_mac(struct alc_softc *);
143 1.1 jmcneill static void alc_stop_queue(struct alc_softc *);
144 1.1 jmcneill static void alc_tick(void *);
145 1.1 jmcneill static void alc_txeof(struct alc_softc *);
146 1.1 jmcneill
147 1.1 jmcneill uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
148 1.1 jmcneill
149 1.1 jmcneill CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc),
150 1.1 jmcneill alc_match, alc_attach, alc_detach, NULL);
151 1.1 jmcneill
152 1.1 jmcneill int alcdebug = 0;
153 1.1 jmcneill #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
154 1.1 jmcneill
155 1.1 jmcneill #define ETHER_ALIGN 2
156 1.1 jmcneill #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
157 1.1 jmcneill
158 1.1 jmcneill static int
159 1.1 jmcneill alc_miibus_readreg(device_t dev, int phy, int reg)
160 1.1 jmcneill {
161 1.1 jmcneill struct alc_softc *sc = device_private(dev);
162 1.1 jmcneill uint32_t v;
163 1.1 jmcneill int i;
164 1.1 jmcneill
165 1.1 jmcneill if (phy != sc->alc_phyaddr)
166 1.1 jmcneill return (0);
167 1.1 jmcneill
168 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
169 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
170 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
171 1.1 jmcneill DELAY(5);
172 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
173 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
174 1.1 jmcneill break;
175 1.1 jmcneill }
176 1.1 jmcneill
177 1.1 jmcneill if (i == 0) {
178 1.1 jmcneill printf("%s: phy read timeout: phy %d, reg %d\n",
179 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
180 1.1 jmcneill return (0);
181 1.1 jmcneill }
182 1.1 jmcneill
183 1.1 jmcneill return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
184 1.1 jmcneill }
185 1.1 jmcneill
186 1.1 jmcneill static void
187 1.1 jmcneill alc_miibus_writereg(device_t dev, int phy, int reg, int val)
188 1.1 jmcneill {
189 1.1 jmcneill struct alc_softc *sc = device_private(dev);
190 1.1 jmcneill uint32_t v;
191 1.1 jmcneill int i;
192 1.1 jmcneill
193 1.1 jmcneill if (phy != sc->alc_phyaddr)
194 1.1 jmcneill return;
195 1.1 jmcneill
196 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
197 1.1 jmcneill (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
198 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
199 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
200 1.1 jmcneill DELAY(5);
201 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
202 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
203 1.1 jmcneill break;
204 1.1 jmcneill }
205 1.1 jmcneill
206 1.1 jmcneill if (i == 0)
207 1.1 jmcneill printf("%s: phy write timeout: phy %d, reg %d\n",
208 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
209 1.1 jmcneill }
210 1.1 jmcneill
211 1.1 jmcneill static void
212 1.1 jmcneill alc_miibus_statchg(device_t dev)
213 1.1 jmcneill {
214 1.1 jmcneill struct alc_softc *sc = device_private(dev);
215 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
216 1.1 jmcneill struct mii_data *mii;
217 1.1 jmcneill uint32_t reg;
218 1.1 jmcneill
219 1.1 jmcneill if ((ifp->if_flags & IFF_RUNNING) == 0)
220 1.1 jmcneill return;
221 1.1 jmcneill
222 1.1 jmcneill mii = &sc->sc_miibus;
223 1.1 jmcneill
224 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
225 1.1 jmcneill if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
226 1.1 jmcneill (IFM_ACTIVE | IFM_AVALID)) {
227 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
228 1.1 jmcneill case IFM_10_T:
229 1.1 jmcneill case IFM_100_TX:
230 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
231 1.1 jmcneill break;
232 1.1 jmcneill case IFM_1000_T:
233 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
234 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
235 1.1 jmcneill break;
236 1.1 jmcneill default:
237 1.1 jmcneill break;
238 1.1 jmcneill }
239 1.1 jmcneill }
240 1.1 jmcneill alc_stop_queue(sc);
241 1.1 jmcneill /* Stop Rx/Tx MACs. */
242 1.1 jmcneill alc_stop_mac(sc);
243 1.1 jmcneill
244 1.1 jmcneill /* Program MACs with resolved speed/duplex/flow-control. */
245 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
246 1.1 jmcneill alc_start_queue(sc);
247 1.1 jmcneill alc_mac_config(sc);
248 1.1 jmcneill /* Re-enable Tx/Rx MACs. */
249 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
250 1.1 jmcneill reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
251 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
252 1.2 jmcneill alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
253 1.1 jmcneill }
254 1.1 jmcneill }
255 1.1 jmcneill
256 1.1 jmcneill static void
257 1.1 jmcneill alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
258 1.1 jmcneill {
259 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
260 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
261 1.1 jmcneill
262 1.1 jmcneill mii_pollstat(mii);
263 1.1 jmcneill ifmr->ifm_status = mii->mii_media_status;
264 1.1 jmcneill ifmr->ifm_active = mii->mii_media_active;
265 1.1 jmcneill }
266 1.1 jmcneill
267 1.1 jmcneill static int
268 1.1 jmcneill alc_mediachange(struct ifnet *ifp)
269 1.1 jmcneill {
270 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
271 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
272 1.1 jmcneill int error;
273 1.1 jmcneill
274 1.1 jmcneill if (mii->mii_instance != 0) {
275 1.1 jmcneill struct mii_softc *miisc;
276 1.1 jmcneill
277 1.1 jmcneill LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
278 1.1 jmcneill mii_phy_reset(miisc);
279 1.1 jmcneill }
280 1.1 jmcneill error = mii_mediachg(mii);
281 1.1 jmcneill
282 1.1 jmcneill return (error);
283 1.1 jmcneill }
284 1.1 jmcneill
285 1.2 jmcneill static struct alc_ident *
286 1.2 jmcneill alc_find_ident(struct pci_attach_args *pa)
287 1.2 jmcneill {
288 1.2 jmcneill struct alc_ident *ident;
289 1.2 jmcneill uint16_t vendor, devid;
290 1.2 jmcneill
291 1.2 jmcneill vendor = PCI_VENDOR(pa->pa_id);
292 1.2 jmcneill devid = PCI_PRODUCT(pa->pa_id);
293 1.2 jmcneill for (ident = alc_ident_table; ident->name != NULL; ident++) {
294 1.2 jmcneill if (vendor == ident->vendorid && devid == ident->deviceid)
295 1.2 jmcneill return (ident);
296 1.2 jmcneill }
297 1.2 jmcneill
298 1.2 jmcneill return (NULL);
299 1.2 jmcneill }
300 1.2 jmcneill
301 1.1 jmcneill static int
302 1.1 jmcneill alc_match(device_t dev, cfdata_t match, void *aux)
303 1.1 jmcneill {
304 1.1 jmcneill struct pci_attach_args *pa = aux;
305 1.1 jmcneill
306 1.2 jmcneill return alc_find_ident(pa) != NULL;
307 1.1 jmcneill }
308 1.1 jmcneill
309 1.1 jmcneill static void
310 1.1 jmcneill alc_get_macaddr(struct alc_softc *sc)
311 1.1 jmcneill {
312 1.1 jmcneill uint32_t ea[2], opt;
313 1.2 jmcneill uint16_t val;
314 1.2 jmcneill int eeprom, i;
315 1.1 jmcneill
316 1.2 jmcneill eeprom = 0;
317 1.1 jmcneill opt = CSR_READ_4(sc, ALC_OPT_CFG);
318 1.2 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
319 1.2 jmcneill (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
320 1.1 jmcneill /*
321 1.1 jmcneill * EEPROM found, let TWSI reload EEPROM configuration.
322 1.1 jmcneill * This will set ethernet address of controller.
323 1.1 jmcneill */
324 1.2 jmcneill eeprom++;
325 1.2 jmcneill switch (sc->alc_ident->deviceid) {
326 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
327 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
328 1.2 jmcneill if ((opt & OPT_CFG_CLK_ENB) == 0) {
329 1.2 jmcneill opt |= OPT_CFG_CLK_ENB;
330 1.2 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
331 1.2 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
332 1.2 jmcneill DELAY(1000);
333 1.2 jmcneill }
334 1.2 jmcneill break;
335 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
336 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
337 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
338 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
339 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
340 1.2 jmcneill ALC_MII_DBG_ADDR, 0x00);
341 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
342 1.2 jmcneill ALC_MII_DBG_DATA);
343 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
344 1.2 jmcneill ALC_MII_DBG_DATA, val & 0xFF7F);
345 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
346 1.2 jmcneill ALC_MII_DBG_ADDR, 0x3B);
347 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
348 1.2 jmcneill ALC_MII_DBG_DATA);
349 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
350 1.2 jmcneill ALC_MII_DBG_DATA, val | 0x0008);
351 1.2 jmcneill DELAY(20);
352 1.2 jmcneill break;
353 1.1 jmcneill }
354 1.2 jmcneill
355 1.2 jmcneill CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
356 1.2 jmcneill CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
357 1.2 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
358 1.2 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
359 1.2 jmcneill
360 1.1 jmcneill CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
361 1.1 jmcneill TWSI_CFG_SW_LD_START);
362 1.1 jmcneill for (i = 100; i > 0; i--) {
363 1.1 jmcneill DELAY(1000);
364 1.1 jmcneill if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
365 1.1 jmcneill TWSI_CFG_SW_LD_START) == 0)
366 1.1 jmcneill break;
367 1.1 jmcneill }
368 1.1 jmcneill if (i == 0)
369 1.1 jmcneill printf("%s: reloading EEPROM timeout!\n",
370 1.1 jmcneill device_xname(sc->sc_dev));
371 1.1 jmcneill } else {
372 1.1 jmcneill if (alcdebug)
373 1.1 jmcneill printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev));
374 1.1 jmcneill }
375 1.2 jmcneill if (eeprom != 0) {
376 1.2 jmcneill switch (sc->alc_ident->deviceid) {
377 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
378 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
379 1.2 jmcneill if ((opt & OPT_CFG_CLK_ENB) != 0) {
380 1.2 jmcneill opt &= ~OPT_CFG_CLK_ENB;
381 1.2 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
382 1.2 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
383 1.2 jmcneill DELAY(1000);
384 1.2 jmcneill }
385 1.2 jmcneill break;
386 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
387 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
388 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
389 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
390 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
391 1.2 jmcneill ALC_MII_DBG_ADDR, 0x00);
392 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
393 1.2 jmcneill ALC_MII_DBG_DATA);
394 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
395 1.2 jmcneill ALC_MII_DBG_DATA, val | 0x0080);
396 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
397 1.2 jmcneill ALC_MII_DBG_ADDR, 0x3B);
398 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
399 1.2 jmcneill ALC_MII_DBG_DATA);
400 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
401 1.2 jmcneill ALC_MII_DBG_DATA, val & 0xFFF7);
402 1.2 jmcneill DELAY(20);
403 1.2 jmcneill break;
404 1.2 jmcneill }
405 1.1 jmcneill }
406 1.1 jmcneill
407 1.1 jmcneill ea[0] = CSR_READ_4(sc, ALC_PAR0);
408 1.1 jmcneill ea[1] = CSR_READ_4(sc, ALC_PAR1);
409 1.1 jmcneill sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
410 1.1 jmcneill sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
411 1.1 jmcneill sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
412 1.1 jmcneill sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
413 1.1 jmcneill sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
414 1.1 jmcneill sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
415 1.1 jmcneill }
416 1.1 jmcneill
417 1.1 jmcneill static void
418 1.1 jmcneill alc_disable_l0s_l1(struct alc_softc *sc)
419 1.1 jmcneill {
420 1.1 jmcneill uint32_t pmcfg;
421 1.1 jmcneill
422 1.1 jmcneill /* Another magic from vendor. */
423 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
424 1.1 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
425 1.1 jmcneill PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
426 1.1 jmcneill PM_CFG_SERDES_PD_EX_L1);
427 1.1 jmcneill pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
428 1.1 jmcneill PM_CFG_SERDES_L1_ENB;
429 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
430 1.1 jmcneill }
431 1.1 jmcneill
432 1.1 jmcneill static void
433 1.1 jmcneill alc_phy_reset(struct alc_softc *sc)
434 1.1 jmcneill {
435 1.1 jmcneill uint16_t data;
436 1.1 jmcneill
437 1.1 jmcneill /* Reset magic from Linux. */
438 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
439 1.1 jmcneill GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
440 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
441 1.1 jmcneill DELAY(10 * 1000);
442 1.1 jmcneill
443 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
444 1.1 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
445 1.1 jmcneill GPHY_CFG_SEL_ANA_RESET);
446 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
447 1.1 jmcneill DELAY(10 * 1000);
448 1.1 jmcneill
449 1.2 jmcneill /* DSP fixup, Vendor magic. */
450 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
451 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
452 1.2 jmcneill ALC_MII_DBG_ADDR, 0x000A);
453 1.2 jmcneill data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
454 1.2 jmcneill ALC_MII_DBG_DATA);
455 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
456 1.2 jmcneill ALC_MII_DBG_DATA, data & 0xDFFF);
457 1.2 jmcneill }
458 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
459 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
460 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
461 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
462 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
463 1.2 jmcneill ALC_MII_DBG_ADDR, 0x003B);
464 1.2 jmcneill data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
465 1.2 jmcneill ALC_MII_DBG_DATA);
466 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
467 1.2 jmcneill ALC_MII_DBG_DATA, data & 0xFFF7);
468 1.2 jmcneill DELAY(20 * 1000);
469 1.2 jmcneill }
470 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) {
471 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
472 1.2 jmcneill ALC_MII_DBG_ADDR, 0x0029);
473 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
474 1.2 jmcneill ALC_MII_DBG_DATA, 0x929D);
475 1.2 jmcneill }
476 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
477 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 ||
478 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
479 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
480 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
481 1.2 jmcneill ALC_MII_DBG_ADDR, 0x0029);
482 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
483 1.2 jmcneill ALC_MII_DBG_DATA, 0xB6DD);
484 1.2 jmcneill }
485 1.2 jmcneill
486 1.1 jmcneill /* Load DSP codes, vendor magic. */
487 1.1 jmcneill data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
488 1.1 jmcneill ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
489 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
490 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG18);
491 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
492 1.1 jmcneill ALC_MII_DBG_DATA, data);
493 1.1 jmcneill
494 1.1 jmcneill data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
495 1.1 jmcneill ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
496 1.1 jmcneill ANA_SERDES_EN_LCKDT;
497 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
498 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG5);
499 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
500 1.1 jmcneill ALC_MII_DBG_DATA, data);
501 1.1 jmcneill
502 1.1 jmcneill data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
503 1.1 jmcneill ANA_LONG_CABLE_TH_100_MASK) |
504 1.1 jmcneill ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
505 1.1 jmcneill ANA_SHORT_CABLE_TH_100_SHIFT) |
506 1.1 jmcneill ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
507 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
508 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG54);
509 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
510 1.1 jmcneill ALC_MII_DBG_DATA, data);
511 1.1 jmcneill
512 1.1 jmcneill data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
513 1.1 jmcneill ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
514 1.1 jmcneill ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
515 1.1 jmcneill ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
516 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
517 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG4);
518 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
519 1.1 jmcneill ALC_MII_DBG_DATA, data);
520 1.1 jmcneill
521 1.1 jmcneill data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
522 1.1 jmcneill ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
523 1.1 jmcneill ANA_OEN_125M;
524 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
525 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG0);
526 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
527 1.1 jmcneill ALC_MII_DBG_DATA, data);
528 1.1 jmcneill DELAY(1000);
529 1.1 jmcneill }
530 1.1 jmcneill
531 1.1 jmcneill static void
532 1.1 jmcneill alc_phy_down(struct alc_softc *sc)
533 1.1 jmcneill {
534 1.2 jmcneill switch (sc->alc_ident->deviceid) {
535 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
536 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
537 1.2 jmcneill /*
538 1.2 jmcneill * GPHY power down caused more problems on AR8151 v2.0.
539 1.2 jmcneill * When driver is reloaded after GPHY power down,
540 1.2 jmcneill * accesses to PHY/MAC registers hung the system. Only
541 1.2 jmcneill * cold boot recovered from it. I'm not sure whether
542 1.2 jmcneill * AR8151 v1.0 also requires this one though. I don't
543 1.2 jmcneill * have AR8151 v1.0 controller in hand.
544 1.2 jmcneill * The only option left is to isolate the PHY and
545 1.2 jmcneill * initiates power down the PHY which in turn saves
546 1.2 jmcneill * more power when driver is unloaded.
547 1.2 jmcneill */
548 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
549 1.2 jmcneill MII_BMCR, BMCR_ISO | BMCR_PDOWN);
550 1.2 jmcneill break;
551 1.2 jmcneill default:
552 1.2 jmcneill /* Force PHY down. */
553 1.2 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
554 1.2 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
555 1.2 jmcneill GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
556 1.2 jmcneill GPHY_CFG_PWDOWN_HW);
557 1.2 jmcneill DELAY(1000);
558 1.2 jmcneill break;
559 1.2 jmcneill }
560 1.1 jmcneill }
561 1.1 jmcneill
562 1.1 jmcneill static void
563 1.2 jmcneill alc_aspm(struct alc_softc *sc, int media)
564 1.1 jmcneill {
565 1.1 jmcneill uint32_t pmcfg;
566 1.2 jmcneill uint16_t linkcfg;
567 1.2 jmcneill
568 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
569 1.2 jmcneill if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
570 1.2 jmcneill (ALC_FLAG_APS | ALC_FLAG_PCIE))
571 1.2 jmcneill linkcfg = CSR_READ_2(sc, sc->alc_expcap +
572 1.2 jmcneill PCI_PCIE_LCSR);
573 1.2 jmcneill else
574 1.2 jmcneill linkcfg = 0;
575 1.1 jmcneill pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
576 1.2 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
577 1.1 jmcneill pmcfg |= PM_CFG_MAC_ASPM_CHK;
578 1.2 jmcneill pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
579 1.2 jmcneill pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
580 1.2 jmcneill
581 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
582 1.2 jmcneill /* Disable extended sync except AR8152 B v1.0 */
583 1.2 jmcneill linkcfg &= ~0x80;
584 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
585 1.2 jmcneill sc->alc_rev == ATHEROS_AR8152_B_V10)
586 1.2 jmcneill linkcfg |= 0x80;
587 1.2 jmcneill CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR,
588 1.2 jmcneill linkcfg);
589 1.2 jmcneill pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
590 1.2 jmcneill PM_CFG_HOTRST);
591 1.2 jmcneill pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
592 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
593 1.2 jmcneill pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
594 1.2 jmcneill pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
595 1.2 jmcneill PM_CFG_PM_REQ_TIMER_SHIFT);
596 1.2 jmcneill pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
597 1.2 jmcneill }
598 1.2 jmcneill
599 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
600 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
601 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L0S_ENB;
602 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
603 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L1_ENB;
604 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
605 1.2 jmcneill if (sc->alc_ident->deviceid ==
606 1.2 jmcneill PCI_PRODUCT_ATTANSIC_AR8152_B)
607 1.2 jmcneill pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
608 1.2 jmcneill pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
609 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB |
610 1.2 jmcneill PM_CFG_SERDES_BUDS_RX_L1_ENB);
611 1.2 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
612 1.2 jmcneill if (media == IFM_100_TX || media == IFM_1000_T) {
613 1.2 jmcneill pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
614 1.2 jmcneill switch (sc->alc_ident->deviceid) {
615 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
616 1.2 jmcneill pmcfg |= (7 <<
617 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
618 1.2 jmcneill break;
619 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
620 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
621 1.2 jmcneill pmcfg |= (4 <<
622 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
623 1.2 jmcneill break;
624 1.2 jmcneill default:
625 1.2 jmcneill pmcfg |= (15 <<
626 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
627 1.2 jmcneill break;
628 1.2 jmcneill }
629 1.2 jmcneill }
630 1.2 jmcneill } else {
631 1.2 jmcneill pmcfg |= PM_CFG_SERDES_L1_ENB |
632 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB |
633 1.2 jmcneill PM_CFG_SERDES_BUDS_RX_L1_ENB;
634 1.2 jmcneill pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
635 1.2 jmcneill PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
636 1.2 jmcneill }
637 1.1 jmcneill } else {
638 1.2 jmcneill pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
639 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB);
640 1.1 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
641 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
642 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L1_ENB;
643 1.1 jmcneill }
644 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
645 1.1 jmcneill }
646 1.1 jmcneill
647 1.1 jmcneill static void
648 1.1 jmcneill alc_attach(device_t parent, device_t self, void *aux)
649 1.1 jmcneill {
650 1.1 jmcneill
651 1.1 jmcneill struct alc_softc *sc = device_private(self);
652 1.1 jmcneill struct pci_attach_args *pa = aux;
653 1.1 jmcneill pci_chipset_tag_t pc = pa->pa_pc;
654 1.1 jmcneill pci_intr_handle_t ih;
655 1.1 jmcneill const char *intrstr;
656 1.1 jmcneill struct ifnet *ifp;
657 1.1 jmcneill pcireg_t memtype;
658 1.2 jmcneill const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
659 1.1 jmcneill uint16_t burst;
660 1.1 jmcneill int base, mii_flags, state, error = 0;
661 1.1 jmcneill uint32_t cap, ctl, val;
662 1.1 jmcneill
663 1.2 jmcneill sc->alc_ident = alc_find_ident(pa);
664 1.2 jmcneill
665 1.1 jmcneill aprint_naive("\n");
666 1.2 jmcneill aprint_normal(": %s\n", sc->alc_ident->name);
667 1.1 jmcneill
668 1.1 jmcneill sc->sc_dev = self;
669 1.1 jmcneill sc->sc_dmat = pa->pa_dmat;
670 1.1 jmcneill sc->sc_pct = pa->pa_pc;
671 1.1 jmcneill sc->sc_pcitag = pa->pa_tag;
672 1.1 jmcneill
673 1.1 jmcneill /*
674 1.1 jmcneill * Allocate IO memory
675 1.1 jmcneill */
676 1.1 jmcneill memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
677 1.1 jmcneill switch (memtype) {
678 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
679 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
680 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
681 1.1 jmcneill break;
682 1.1 jmcneill default:
683 1.1 jmcneill aprint_error_dev(self, "invalid base address register\n");
684 1.1 jmcneill break;
685 1.1 jmcneill }
686 1.1 jmcneill
687 1.1 jmcneill if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
688 1.1 jmcneill &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
689 1.1 jmcneill aprint_error_dev(self, "could not map mem space\n");
690 1.1 jmcneill return;
691 1.1 jmcneill }
692 1.1 jmcneill
693 1.1 jmcneill if (pci_intr_map(pa, &ih) != 0) {
694 1.1 jmcneill printf(": can't map interrupt\n");
695 1.1 jmcneill goto fail;
696 1.1 jmcneill }
697 1.1 jmcneill
698 1.1 jmcneill /*
699 1.1 jmcneill * Allocate IRQ
700 1.1 jmcneill */
701 1.1 jmcneill intrstr = pci_intr_string(sc->sc_pct, ih);
702 1.1 jmcneill sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc);
703 1.1 jmcneill if (sc->sc_irq_handle == NULL) {
704 1.1 jmcneill printf(": could not establish interrupt");
705 1.1 jmcneill if (intrstr != NULL)
706 1.1 jmcneill printf(" at %s", intrstr);
707 1.1 jmcneill printf("\n");
708 1.1 jmcneill goto fail;
709 1.1 jmcneill }
710 1.4 matt aprint_normal_dev(self, "interrupting at %s\n", intrstr);
711 1.1 jmcneill
712 1.1 jmcneill /* Set PHY address. */
713 1.1 jmcneill sc->alc_phyaddr = ALC_PHY_ADDR;
714 1.1 jmcneill
715 1.1 jmcneill /* Initialize DMA parameters. */
716 1.1 jmcneill sc->alc_dma_rd_burst = 0;
717 1.1 jmcneill sc->alc_dma_wr_burst = 0;
718 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_64;
719 1.1 jmcneill if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
720 1.1 jmcneill &base, NULL)) {
721 1.1 jmcneill sc->alc_flags |= ALC_FLAG_PCIE;
722 1.2 jmcneill sc->alc_expcap = base;
723 1.1 jmcneill burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
724 1.1 jmcneill base + PCI_PCIE_DCSR) >> 16;
725 1.1 jmcneill sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
726 1.1 jmcneill sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
727 1.1 jmcneill if (alcdebug) {
728 1.1 jmcneill printf("%s: Read request size : %u bytes.\n",
729 1.1 jmcneill device_xname(sc->sc_dev),
730 1.1 jmcneill alc_dma_burst[sc->alc_dma_rd_burst]);
731 1.1 jmcneill printf("%s: TLP payload size : %u bytes.\n",
732 1.1 jmcneill device_xname(sc->sc_dev),
733 1.1 jmcneill alc_dma_burst[sc->alc_dma_wr_burst]);
734 1.1 jmcneill }
735 1.1 jmcneill /* Clear data link and flow-control protocol error. */
736 1.1 jmcneill val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
737 1.1 jmcneill val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
738 1.1 jmcneill CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
739 1.2 jmcneill CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
740 1.2 jmcneill CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
741 1.2 jmcneill CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
742 1.2 jmcneill CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
743 1.2 jmcneill PCIE_PHYMISC_FORCE_RCV_DET);
744 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
745 1.2 jmcneill sc->alc_rev == ATHEROS_AR8152_B_V10) {
746 1.2 jmcneill val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
747 1.2 jmcneill val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
748 1.2 jmcneill PCIE_PHYMISC2_SERDES_TH_MASK);
749 1.2 jmcneill val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
750 1.2 jmcneill val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
751 1.2 jmcneill CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
752 1.2 jmcneill }
753 1.1 jmcneill /* Disable ASPM L0S and L1. */
754 1.1 jmcneill cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
755 1.1 jmcneill base + PCI_PCIE_LCAP) >> 16;
756 1.1 jmcneill if ((cap & 0x00000c00) != 0) {
757 1.1 jmcneill ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
758 1.1 jmcneill base + PCI_PCIE_LCSR) >> 16;
759 1.1 jmcneill if ((ctl & 0x08) != 0)
760 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_128;
761 1.1 jmcneill if (alcdebug)
762 1.1 jmcneill printf("%s: RCB %u bytes\n",
763 1.1 jmcneill device_xname(sc->sc_dev),
764 1.1 jmcneill sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
765 1.1 jmcneill state = ctl & 0x03;
766 1.2 jmcneill if (state & 0x01)
767 1.2 jmcneill sc->alc_flags |= ALC_FLAG_L0S;
768 1.2 jmcneill if (state & 0x02)
769 1.2 jmcneill sc->alc_flags |= ALC_FLAG_L1S;
770 1.1 jmcneill if (alcdebug)
771 1.1 jmcneill printf("%s: ASPM %s %s\n",
772 1.1 jmcneill device_xname(sc->sc_dev),
773 1.1 jmcneill aspm_state[state],
774 1.1 jmcneill state == 0 ? "disabled" : "enabled");
775 1.2 jmcneill alc_disable_l0s_l1(sc);
776 1.2 jmcneill } else {
777 1.2 jmcneill aprint_debug_dev(sc->sc_dev, "no ASPM support\n");
778 1.1 jmcneill }
779 1.1 jmcneill }
780 1.1 jmcneill
781 1.1 jmcneill /* Reset PHY. */
782 1.1 jmcneill alc_phy_reset(sc);
783 1.1 jmcneill
784 1.1 jmcneill /* Reset the ethernet controller. */
785 1.1 jmcneill alc_reset(sc);
786 1.1 jmcneill
787 1.1 jmcneill /*
788 1.1 jmcneill * One odd thing is AR8132 uses the same PHY hardware(F1
789 1.1 jmcneill * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
790 1.1 jmcneill * the PHY supports 1000Mbps but that's not true. The PHY
791 1.1 jmcneill * used in AR8132 can't establish gigabit link even if it
792 1.1 jmcneill * shows the same PHY model/revision number of AR8131.
793 1.1 jmcneill */
794 1.2 jmcneill switch (sc->alc_ident->deviceid) {
795 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
796 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
797 1.2 jmcneill sc->alc_flags |= ALC_FLAG_APS;
798 1.2 jmcneill /* FALLTHROUGH */
799 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
800 1.2 jmcneill sc->alc_flags |= ALC_FLAG_FASTETHER;
801 1.1 jmcneill break;
802 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
803 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
804 1.2 jmcneill sc->alc_flags |= ALC_FLAG_APS;
805 1.2 jmcneill /* FALLTHROUGH */
806 1.1 jmcneill default:
807 1.1 jmcneill break;
808 1.1 jmcneill }
809 1.2 jmcneill sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
810 1.1 jmcneill
811 1.1 jmcneill /*
812 1.2 jmcneill * It seems that AR813x/AR815x has silicon bug for SMB. In
813 1.1 jmcneill * addition, Atheros said that enabling SMB wouldn't improve
814 1.1 jmcneill * performance. However I think it's bad to access lots of
815 1.1 jmcneill * registers to extract MAC statistics.
816 1.1 jmcneill */
817 1.1 jmcneill sc->alc_flags |= ALC_FLAG_SMB_BUG;
818 1.1 jmcneill /*
819 1.1 jmcneill * Don't use Tx CMB. It is known to have silicon bug.
820 1.1 jmcneill */
821 1.1 jmcneill sc->alc_flags |= ALC_FLAG_CMB_BUG;
822 1.1 jmcneill sc->alc_rev = PCI_REVISION(pa->pa_class);
823 1.1 jmcneill sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
824 1.1 jmcneill MASTER_CHIP_REV_SHIFT;
825 1.1 jmcneill if (alcdebug) {
826 1.1 jmcneill printf("%s: PCI device revision : 0x%04x\n",
827 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_rev);
828 1.1 jmcneill printf("%s: Chip id/revision : 0x%04x\n",
829 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_chip_rev);
830 1.1 jmcneill printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev),
831 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
832 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
833 1.1 jmcneill }
834 1.1 jmcneill
835 1.1 jmcneill error = alc_dma_alloc(sc);
836 1.1 jmcneill if (error)
837 1.1 jmcneill goto fail;
838 1.1 jmcneill
839 1.1 jmcneill callout_init(&sc->sc_tick_ch, 0);
840 1.1 jmcneill callout_setfunc(&sc->sc_tick_ch, alc_tick, sc);
841 1.1 jmcneill
842 1.1 jmcneill /* Load station address. */
843 1.1 jmcneill alc_get_macaddr(sc);
844 1.1 jmcneill
845 1.1 jmcneill aprint_normal_dev(self, "Ethernet address %s\n",
846 1.1 jmcneill ether_sprintf(sc->alc_eaddr));
847 1.1 jmcneill
848 1.1 jmcneill ifp = &sc->sc_ec.ec_if;
849 1.1 jmcneill ifp->if_softc = sc;
850 1.1 jmcneill ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
851 1.1 jmcneill ifp->if_init = alc_init;
852 1.1 jmcneill ifp->if_ioctl = alc_ioctl;
853 1.1 jmcneill ifp->if_start = alc_start;
854 1.1 jmcneill ifp->if_stop = alc_stop;
855 1.1 jmcneill ifp->if_watchdog = alc_watchdog;
856 1.1 jmcneill ifp->if_baudrate = IF_Gbps(1);
857 1.1 jmcneill IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
858 1.1 jmcneill IFQ_SET_READY(&ifp->if_snd);
859 1.1 jmcneill strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
860 1.1 jmcneill
861 1.1 jmcneill sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
862 1.1 jmcneill
863 1.1 jmcneill #ifdef ALC_CHECKSUM
864 1.1 jmcneill ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
865 1.1 jmcneill IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
866 1.1 jmcneill IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
867 1.1 jmcneill #endif
868 1.1 jmcneill
869 1.1 jmcneill #if NVLAN > 0
870 1.1 jmcneill sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
871 1.1 jmcneill #endif
872 1.1 jmcneill
873 1.1 jmcneill /* Set up MII bus. */
874 1.1 jmcneill sc->sc_miibus.mii_ifp = ifp;
875 1.1 jmcneill sc->sc_miibus.mii_readreg = alc_miibus_readreg;
876 1.1 jmcneill sc->sc_miibus.mii_writereg = alc_miibus_writereg;
877 1.1 jmcneill sc->sc_miibus.mii_statchg = alc_miibus_statchg;
878 1.1 jmcneill
879 1.1 jmcneill sc->sc_ec.ec_mii = &sc->sc_miibus;
880 1.1 jmcneill ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
881 1.1 jmcneill alc_mediastatus);
882 1.1 jmcneill mii_flags = 0;
883 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0)
884 1.1 jmcneill mii_flags |= MIIF_DOPAUSE;
885 1.1 jmcneill mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
886 1.1 jmcneill MII_OFFSET_ANY, mii_flags);
887 1.1 jmcneill
888 1.1 jmcneill if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
889 1.1 jmcneill printf("%s: no PHY found!\n", device_xname(sc->sc_dev));
890 1.1 jmcneill ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
891 1.1 jmcneill 0, NULL);
892 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
893 1.1 jmcneill } else
894 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
895 1.1 jmcneill
896 1.1 jmcneill if_attach(ifp);
897 1.1 jmcneill ether_ifattach(ifp, sc->alc_eaddr);
898 1.1 jmcneill
899 1.1 jmcneill if (!pmf_device_register(self, NULL, NULL))
900 1.1 jmcneill aprint_error_dev(self, "couldn't establish power handler\n");
901 1.1 jmcneill else
902 1.1 jmcneill pmf_class_network_register(self, ifp);
903 1.1 jmcneill
904 1.1 jmcneill return;
905 1.1 jmcneill fail:
906 1.1 jmcneill alc_dma_free(sc);
907 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
908 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
909 1.1 jmcneill sc->sc_irq_handle = NULL;
910 1.1 jmcneill }
911 1.1 jmcneill if (sc->sc_mem_size) {
912 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
913 1.1 jmcneill sc->sc_mem_size = 0;
914 1.1 jmcneill }
915 1.1 jmcneill }
916 1.1 jmcneill
917 1.1 jmcneill static int
918 1.1 jmcneill alc_detach(device_t self, int flags)
919 1.1 jmcneill {
920 1.1 jmcneill struct alc_softc *sc = device_private(self);
921 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
922 1.1 jmcneill int s;
923 1.1 jmcneill
924 1.1 jmcneill s = splnet();
925 1.1 jmcneill alc_stop(ifp, 0);
926 1.1 jmcneill splx(s);
927 1.1 jmcneill
928 1.1 jmcneill mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
929 1.1 jmcneill
930 1.1 jmcneill /* Delete all remaining media. */
931 1.1 jmcneill ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
932 1.1 jmcneill
933 1.1 jmcneill ether_ifdetach(ifp);
934 1.1 jmcneill if_detach(ifp);
935 1.1 jmcneill alc_dma_free(sc);
936 1.1 jmcneill
937 1.1 jmcneill alc_phy_down(sc);
938 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
939 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
940 1.1 jmcneill sc->sc_irq_handle = NULL;
941 1.1 jmcneill }
942 1.1 jmcneill if (sc->sc_mem_size) {
943 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
944 1.1 jmcneill sc->sc_mem_size = 0;
945 1.1 jmcneill }
946 1.1 jmcneill
947 1.1 jmcneill return (0);
948 1.1 jmcneill }
949 1.1 jmcneill
950 1.1 jmcneill static int
951 1.1 jmcneill alc_dma_alloc(struct alc_softc *sc)
952 1.1 jmcneill {
953 1.1 jmcneill struct alc_txdesc *txd;
954 1.1 jmcneill struct alc_rxdesc *rxd;
955 1.1 jmcneill int nsegs, error, i;
956 1.1 jmcneill
957 1.1 jmcneill /*
958 1.1 jmcneill * Create DMA stuffs for TX ring
959 1.1 jmcneill */
960 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
961 1.1 jmcneill ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
962 1.1 jmcneill if (error) {
963 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
964 1.1 jmcneill return (ENOBUFS);
965 1.1 jmcneill }
966 1.1 jmcneill
967 1.1 jmcneill /* Allocate DMA'able memory for TX ring */
968 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
969 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
970 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
971 1.1 jmcneill if (error) {
972 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Tx ring.\n",
973 1.1 jmcneill device_xname(sc->sc_dev));
974 1.1 jmcneill return error;
975 1.1 jmcneill }
976 1.1 jmcneill
977 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
978 1.1 jmcneill nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring,
979 1.1 jmcneill BUS_DMA_NOWAIT);
980 1.1 jmcneill if (error)
981 1.1 jmcneill return (ENOBUFS);
982 1.1 jmcneill
983 1.1 jmcneill /* Load the DMA map for Tx ring. */
984 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
985 1.1 jmcneill sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
986 1.1 jmcneill if (error) {
987 1.1 jmcneill printf("%s: could not load DMA'able memory for Tx ring.\n",
988 1.1 jmcneill device_xname(sc->sc_dev));
989 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
990 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
991 1.1 jmcneill return error;
992 1.1 jmcneill }
993 1.1 jmcneill
994 1.1 jmcneill sc->alc_rdata.alc_tx_ring_paddr =
995 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
996 1.1 jmcneill
997 1.1 jmcneill /*
998 1.1 jmcneill * Create DMA stuffs for RX ring
999 1.1 jmcneill */
1000 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
1001 1.1 jmcneill ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1002 1.1 jmcneill if (error)
1003 1.1 jmcneill return (ENOBUFS);
1004 1.1 jmcneill
1005 1.1 jmcneill /* Allocate DMA'able memory for RX ring */
1006 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1007 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1008 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1009 1.1 jmcneill if (error) {
1010 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1011 1.1 jmcneill device_xname(sc->sc_dev));
1012 1.1 jmcneill return error;
1013 1.1 jmcneill }
1014 1.1 jmcneill
1015 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1016 1.1 jmcneill nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring,
1017 1.1 jmcneill BUS_DMA_NOWAIT);
1018 1.1 jmcneill if (error)
1019 1.1 jmcneill return (ENOBUFS);
1020 1.1 jmcneill
1021 1.1 jmcneill /* Load the DMA map for Rx ring. */
1022 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1023 1.1 jmcneill sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1024 1.1 jmcneill if (error) {
1025 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx ring.\n",
1026 1.1 jmcneill device_xname(sc->sc_dev));
1027 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1028 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1029 1.1 jmcneill return error;
1030 1.1 jmcneill }
1031 1.1 jmcneill
1032 1.1 jmcneill sc->alc_rdata.alc_rx_ring_paddr =
1033 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1034 1.1 jmcneill
1035 1.1 jmcneill /*
1036 1.1 jmcneill * Create DMA stuffs for RX return ring
1037 1.1 jmcneill */
1038 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1039 1.1 jmcneill ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1040 1.1 jmcneill if (error)
1041 1.1 jmcneill return (ENOBUFS);
1042 1.1 jmcneill
1043 1.1 jmcneill /* Allocate DMA'able memory for RX return ring */
1044 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1045 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1046 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1047 1.1 jmcneill if (error) {
1048 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx "
1049 1.1 jmcneill "return ring.\n", device_xname(sc->sc_dev));
1050 1.1 jmcneill return error;
1051 1.1 jmcneill }
1052 1.1 jmcneill
1053 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1054 1.1 jmcneill nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring,
1055 1.1 jmcneill BUS_DMA_NOWAIT);
1056 1.1 jmcneill if (error)
1057 1.1 jmcneill return (ENOBUFS);
1058 1.1 jmcneill
1059 1.1 jmcneill /* Load the DMA map for Rx return ring. */
1060 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1061 1.1 jmcneill sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1062 1.1 jmcneill if (error) {
1063 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx return ring."
1064 1.1 jmcneill "\n", device_xname(sc->sc_dev));
1065 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1066 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1067 1.1 jmcneill return error;
1068 1.1 jmcneill }
1069 1.1 jmcneill
1070 1.1 jmcneill sc->alc_rdata.alc_rr_ring_paddr =
1071 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1072 1.1 jmcneill
1073 1.1 jmcneill /*
1074 1.1 jmcneill * Create DMA stuffs for CMB block
1075 1.1 jmcneill */
1076 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1077 1.1 jmcneill ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1078 1.1 jmcneill &sc->alc_cdata.alc_cmb_map);
1079 1.1 jmcneill if (error)
1080 1.1 jmcneill return (ENOBUFS);
1081 1.1 jmcneill
1082 1.1 jmcneill /* Allocate DMA'able memory for CMB block */
1083 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1084 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1085 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1086 1.1 jmcneill if (error) {
1087 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
1088 1.1 jmcneill "CMB block\n", device_xname(sc->sc_dev));
1089 1.1 jmcneill return error;
1090 1.1 jmcneill }
1091 1.1 jmcneill
1092 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1093 1.1 jmcneill nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb,
1094 1.1 jmcneill BUS_DMA_NOWAIT);
1095 1.1 jmcneill if (error)
1096 1.1 jmcneill return (ENOBUFS);
1097 1.1 jmcneill
1098 1.1 jmcneill /* Load the DMA map for CMB block. */
1099 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1100 1.1 jmcneill sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1101 1.1 jmcneill BUS_DMA_WAITOK);
1102 1.1 jmcneill if (error) {
1103 1.1 jmcneill printf("%s: could not load DMA'able memory for CMB block\n",
1104 1.1 jmcneill device_xname(sc->sc_dev));
1105 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1106 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1107 1.1 jmcneill return error;
1108 1.1 jmcneill }
1109 1.1 jmcneill
1110 1.1 jmcneill sc->alc_rdata.alc_cmb_paddr =
1111 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1112 1.1 jmcneill
1113 1.1 jmcneill /*
1114 1.1 jmcneill * Create DMA stuffs for SMB block
1115 1.1 jmcneill */
1116 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1117 1.1 jmcneill ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1118 1.1 jmcneill &sc->alc_cdata.alc_smb_map);
1119 1.1 jmcneill if (error)
1120 1.1 jmcneill return (ENOBUFS);
1121 1.1 jmcneill
1122 1.1 jmcneill /* Allocate DMA'able memory for SMB block */
1123 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1124 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1125 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1126 1.1 jmcneill if (error) {
1127 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
1128 1.1 jmcneill "SMB block\n", device_xname(sc->sc_dev));
1129 1.1 jmcneill return error;
1130 1.1 jmcneill }
1131 1.1 jmcneill
1132 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1133 1.1 jmcneill nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb,
1134 1.1 jmcneill BUS_DMA_NOWAIT);
1135 1.1 jmcneill if (error)
1136 1.1 jmcneill return (ENOBUFS);
1137 1.1 jmcneill
1138 1.1 jmcneill /* Load the DMA map for SMB block */
1139 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1140 1.1 jmcneill sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1141 1.1 jmcneill BUS_DMA_WAITOK);
1142 1.1 jmcneill if (error) {
1143 1.1 jmcneill printf("%s: could not load DMA'able memory for SMB block\n",
1144 1.1 jmcneill device_xname(sc->sc_dev));
1145 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1146 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1147 1.1 jmcneill return error;
1148 1.1 jmcneill }
1149 1.1 jmcneill
1150 1.1 jmcneill sc->alc_rdata.alc_smb_paddr =
1151 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1152 1.1 jmcneill
1153 1.1 jmcneill
1154 1.1 jmcneill /* Create DMA maps for Tx buffers. */
1155 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
1156 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
1157 1.1 jmcneill txd->tx_m = NULL;
1158 1.1 jmcneill txd->tx_dmamap = NULL;
1159 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1160 1.1 jmcneill ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1161 1.1 jmcneill &txd->tx_dmamap);
1162 1.1 jmcneill if (error) {
1163 1.1 jmcneill printf("%s: could not create Tx dmamap.\n",
1164 1.1 jmcneill device_xname(sc->sc_dev));
1165 1.1 jmcneill return error;
1166 1.1 jmcneill }
1167 1.1 jmcneill }
1168 1.1 jmcneill
1169 1.1 jmcneill /* Create DMA maps for Rx buffers. */
1170 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1171 1.1 jmcneill BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1172 1.1 jmcneill if (error) {
1173 1.1 jmcneill printf("%s: could not create spare Rx dmamap.\n",
1174 1.1 jmcneill device_xname(sc->sc_dev));
1175 1.1 jmcneill return error;
1176 1.1 jmcneill }
1177 1.1 jmcneill
1178 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
1179 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
1180 1.1 jmcneill rxd->rx_m = NULL;
1181 1.1 jmcneill rxd->rx_dmamap = NULL;
1182 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1183 1.1 jmcneill MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1184 1.1 jmcneill if (error) {
1185 1.1 jmcneill printf("%s: could not create Rx dmamap.\n",
1186 1.1 jmcneill device_xname(sc->sc_dev));
1187 1.1 jmcneill return error;
1188 1.1 jmcneill }
1189 1.1 jmcneill }
1190 1.1 jmcneill
1191 1.1 jmcneill return (0);
1192 1.1 jmcneill }
1193 1.1 jmcneill
1194 1.1 jmcneill
1195 1.1 jmcneill static void
1196 1.1 jmcneill alc_dma_free(struct alc_softc *sc)
1197 1.1 jmcneill {
1198 1.1 jmcneill struct alc_txdesc *txd;
1199 1.1 jmcneill struct alc_rxdesc *rxd;
1200 1.1 jmcneill int i;
1201 1.1 jmcneill
1202 1.1 jmcneill /* Tx buffers */
1203 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
1204 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
1205 1.1 jmcneill if (txd->tx_dmamap != NULL) {
1206 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1207 1.1 jmcneill txd->tx_dmamap = NULL;
1208 1.1 jmcneill }
1209 1.1 jmcneill }
1210 1.1 jmcneill /* Rx buffers */
1211 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
1212 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
1213 1.1 jmcneill if (rxd->rx_dmamap != NULL) {
1214 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1215 1.1 jmcneill rxd->rx_dmamap = NULL;
1216 1.1 jmcneill }
1217 1.1 jmcneill }
1218 1.1 jmcneill if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1219 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1220 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = NULL;
1221 1.1 jmcneill }
1222 1.1 jmcneill
1223 1.1 jmcneill /* Tx ring. */
1224 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL)
1225 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1226 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1227 1.1 jmcneill sc->alc_rdata.alc_tx_ring != NULL)
1228 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1229 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
1230 1.1 jmcneill sc->alc_rdata.alc_tx_ring = NULL;
1231 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
1232 1.1 jmcneill
1233 1.1 jmcneill /* Rx ring. */
1234 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL)
1235 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1236 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1237 1.1 jmcneill sc->alc_rdata.alc_rx_ring != NULL)
1238 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1239 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1240 1.1 jmcneill sc->alc_rdata.alc_rx_ring = NULL;
1241 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map = NULL;
1242 1.1 jmcneill
1243 1.1 jmcneill /* Rx return ring. */
1244 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL)
1245 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1246 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1247 1.1 jmcneill sc->alc_rdata.alc_rr_ring != NULL)
1248 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1249 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1250 1.1 jmcneill sc->alc_rdata.alc_rr_ring = NULL;
1251 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map = NULL;
1252 1.1 jmcneill
1253 1.1 jmcneill /* CMB block */
1254 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL)
1255 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1256 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL &&
1257 1.1 jmcneill sc->alc_rdata.alc_cmb != NULL)
1258 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1259 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1260 1.1 jmcneill sc->alc_rdata.alc_cmb = NULL;
1261 1.1 jmcneill sc->alc_cdata.alc_cmb_map = NULL;
1262 1.1 jmcneill
1263 1.1 jmcneill /* SMB block */
1264 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL)
1265 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1266 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL &&
1267 1.1 jmcneill sc->alc_rdata.alc_smb != NULL)
1268 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1269 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1270 1.1 jmcneill sc->alc_rdata.alc_smb = NULL;
1271 1.1 jmcneill sc->alc_cdata.alc_smb_map = NULL;
1272 1.1 jmcneill }
1273 1.1 jmcneill
1274 1.1 jmcneill static int
1275 1.1 jmcneill alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1276 1.1 jmcneill {
1277 1.1 jmcneill struct alc_txdesc *txd, *txd_last;
1278 1.1 jmcneill struct tx_desc *desc;
1279 1.1 jmcneill struct mbuf *m;
1280 1.1 jmcneill bus_dmamap_t map;
1281 1.1 jmcneill uint32_t cflags, poff, vtag;
1282 1.1 jmcneill int error, idx, nsegs, prod;
1283 1.1 jmcneill #if NVLAN > 0
1284 1.1 jmcneill struct m_tag *mtag;
1285 1.1 jmcneill #endif
1286 1.1 jmcneill
1287 1.1 jmcneill m = *m_head;
1288 1.1 jmcneill cflags = vtag = 0;
1289 1.1 jmcneill poff = 0;
1290 1.1 jmcneill
1291 1.1 jmcneill prod = sc->alc_cdata.alc_tx_prod;
1292 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1293 1.1 jmcneill txd_last = txd;
1294 1.1 jmcneill map = txd->tx_dmamap;
1295 1.1 jmcneill
1296 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1297 1.1 jmcneill
1298 1.1 jmcneill if (error == EFBIG) {
1299 1.1 jmcneill error = 0;
1300 1.1 jmcneill
1301 1.1 jmcneill *m_head = m_pullup(*m_head, MHLEN);
1302 1.1 jmcneill if (*m_head == NULL) {
1303 1.1 jmcneill printf("%s: can't defrag TX mbuf\n",
1304 1.1 jmcneill device_xname(sc->sc_dev));
1305 1.1 jmcneill return ENOBUFS;
1306 1.1 jmcneill }
1307 1.1 jmcneill
1308 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1309 1.1 jmcneill BUS_DMA_NOWAIT);
1310 1.1 jmcneill
1311 1.1 jmcneill if (error != 0) {
1312 1.1 jmcneill printf("%s: could not load defragged TX mbuf\n",
1313 1.1 jmcneill device_xname(sc->sc_dev));
1314 1.1 jmcneill m_freem(*m_head);
1315 1.1 jmcneill *m_head = NULL;
1316 1.1 jmcneill return error;
1317 1.1 jmcneill }
1318 1.1 jmcneill } else if (error) {
1319 1.1 jmcneill printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1320 1.1 jmcneill return (error);
1321 1.1 jmcneill }
1322 1.1 jmcneill
1323 1.1 jmcneill nsegs = map->dm_nsegs;
1324 1.1 jmcneill
1325 1.1 jmcneill if (nsegs == 0) {
1326 1.1 jmcneill m_freem(*m_head);
1327 1.1 jmcneill *m_head = NULL;
1328 1.1 jmcneill return (EIO);
1329 1.1 jmcneill }
1330 1.1 jmcneill
1331 1.1 jmcneill /* Check descriptor overrun. */
1332 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1333 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, map);
1334 1.1 jmcneill return (ENOBUFS);
1335 1.1 jmcneill }
1336 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1337 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1338 1.1 jmcneill
1339 1.1 jmcneill m = *m_head;
1340 1.1 jmcneill desc = NULL;
1341 1.1 jmcneill idx = 0;
1342 1.1 jmcneill #if NVLAN > 0
1343 1.1 jmcneill /* Configure VLAN hardware tag insertion. */
1344 1.1 jmcneill if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1345 1.1 jmcneill vtag = htons(VLAN_TAG_VALUE(mtag));
1346 1.1 jmcneill vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1347 1.1 jmcneill cflags |= TD_INS_VLAN_TAG;
1348 1.1 jmcneill }
1349 1.1 jmcneill #endif
1350 1.1 jmcneill /* Configure Tx checksum offload. */
1351 1.1 jmcneill if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1352 1.1 jmcneill cflags |= TD_CUSTOM_CSUM;
1353 1.1 jmcneill /* Set checksum start offset. */
1354 1.1 jmcneill cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1355 1.1 jmcneill TD_PLOAD_OFFSET_MASK;
1356 1.1 jmcneill }
1357 1.1 jmcneill for (; idx < nsegs; idx++) {
1358 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1359 1.1 jmcneill desc->len =
1360 1.1 jmcneill htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1361 1.1 jmcneill desc->flags = htole32(cflags);
1362 1.1 jmcneill desc->addr = htole64(map->dm_segs[idx].ds_addr);
1363 1.1 jmcneill sc->alc_cdata.alc_tx_cnt++;
1364 1.1 jmcneill ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1365 1.1 jmcneill }
1366 1.1 jmcneill /* Update producer index. */
1367 1.1 jmcneill sc->alc_cdata.alc_tx_prod = prod;
1368 1.1 jmcneill
1369 1.1 jmcneill /* Finally set EOP on the last descriptor. */
1370 1.1 jmcneill prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1371 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1372 1.1 jmcneill desc->flags |= htole32(TD_EOP);
1373 1.1 jmcneill
1374 1.1 jmcneill /* Swap dmamap of the first and the last. */
1375 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1376 1.1 jmcneill map = txd_last->tx_dmamap;
1377 1.1 jmcneill txd_last->tx_dmamap = txd->tx_dmamap;
1378 1.1 jmcneill txd->tx_dmamap = map;
1379 1.1 jmcneill txd->tx_m = m;
1380 1.1 jmcneill
1381 1.1 jmcneill return (0);
1382 1.1 jmcneill }
1383 1.1 jmcneill
1384 1.1 jmcneill static void
1385 1.1 jmcneill alc_start(struct ifnet *ifp)
1386 1.1 jmcneill {
1387 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1388 1.1 jmcneill struct mbuf *m_head;
1389 1.1 jmcneill int enq;
1390 1.1 jmcneill
1391 1.1 jmcneill if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1392 1.1 jmcneill return;
1393 1.1 jmcneill
1394 1.1 jmcneill /* Reclaim transmitted frames. */
1395 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1396 1.1 jmcneill alc_txeof(sc);
1397 1.1 jmcneill
1398 1.1 jmcneill enq = 0;
1399 1.1 jmcneill for (;;) {
1400 1.1 jmcneill IFQ_DEQUEUE(&ifp->if_snd, m_head);
1401 1.1 jmcneill if (m_head == NULL)
1402 1.1 jmcneill break;
1403 1.1 jmcneill
1404 1.1 jmcneill /*
1405 1.1 jmcneill * Pack the data into the transmit ring. If we
1406 1.1 jmcneill * don't have room, set the OACTIVE flag and wait
1407 1.1 jmcneill * for the NIC to drain the ring.
1408 1.1 jmcneill */
1409 1.1 jmcneill if (alc_encap(sc, &m_head)) {
1410 1.1 jmcneill if (m_head == NULL)
1411 1.1 jmcneill break;
1412 1.1 jmcneill ifp->if_flags |= IFF_OACTIVE;
1413 1.1 jmcneill break;
1414 1.1 jmcneill }
1415 1.1 jmcneill enq = 1;
1416 1.1 jmcneill
1417 1.1 jmcneill /*
1418 1.1 jmcneill * If there's a BPF listener, bounce a copy of this frame
1419 1.1 jmcneill * to him.
1420 1.1 jmcneill */
1421 1.1 jmcneill bpf_mtap(ifp, m_head);
1422 1.1 jmcneill }
1423 1.1 jmcneill
1424 1.1 jmcneill if (enq) {
1425 1.1 jmcneill /* Sync descriptors. */
1426 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1427 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1428 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1429 1.1 jmcneill /* Kick. Assume we're using normal Tx priority queue. */
1430 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1431 1.1 jmcneill (sc->alc_cdata.alc_tx_prod <<
1432 1.1 jmcneill MBOX_TD_PROD_LO_IDX_SHIFT) &
1433 1.1 jmcneill MBOX_TD_PROD_LO_IDX_MASK);
1434 1.1 jmcneill /* Set a timeout in case the chip goes out to lunch. */
1435 1.1 jmcneill ifp->if_timer = ALC_TX_TIMEOUT;
1436 1.1 jmcneill }
1437 1.1 jmcneill }
1438 1.1 jmcneill
1439 1.1 jmcneill static void
1440 1.1 jmcneill alc_watchdog(struct ifnet *ifp)
1441 1.1 jmcneill {
1442 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1443 1.1 jmcneill
1444 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1445 1.1 jmcneill printf("%s: watchdog timeout (missed link)\n",
1446 1.1 jmcneill device_xname(sc->sc_dev));
1447 1.1 jmcneill ifp->if_oerrors++;
1448 1.1 jmcneill alc_init(ifp);
1449 1.1 jmcneill return;
1450 1.1 jmcneill }
1451 1.1 jmcneill
1452 1.1 jmcneill printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1453 1.1 jmcneill ifp->if_oerrors++;
1454 1.1 jmcneill alc_init(ifp);
1455 1.1 jmcneill
1456 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1457 1.1 jmcneill alc_start(ifp);
1458 1.1 jmcneill }
1459 1.1 jmcneill
1460 1.1 jmcneill static int
1461 1.1 jmcneill alc_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1462 1.1 jmcneill {
1463 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1464 1.1 jmcneill int s, error = 0;
1465 1.1 jmcneill
1466 1.1 jmcneill s = splnet();
1467 1.1 jmcneill
1468 1.1 jmcneill error = ether_ioctl(ifp, cmd, data);
1469 1.1 jmcneill if (error == ENETRESET) {
1470 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING)
1471 1.1 jmcneill alc_iff(sc);
1472 1.1 jmcneill error = 0;
1473 1.1 jmcneill }
1474 1.1 jmcneill
1475 1.1 jmcneill splx(s);
1476 1.1 jmcneill return (error);
1477 1.1 jmcneill }
1478 1.1 jmcneill
1479 1.1 jmcneill static void
1480 1.1 jmcneill alc_mac_config(struct alc_softc *sc)
1481 1.1 jmcneill {
1482 1.1 jmcneill struct mii_data *mii;
1483 1.1 jmcneill uint32_t reg;
1484 1.1 jmcneill
1485 1.1 jmcneill mii = &sc->sc_miibus;
1486 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
1487 1.1 jmcneill reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1488 1.1 jmcneill MAC_CFG_SPEED_MASK);
1489 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
1490 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
1491 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
1492 1.2 jmcneill reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
1493 1.1 jmcneill /* Reprogram MAC with resolved speed/duplex. */
1494 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
1495 1.1 jmcneill case IFM_10_T:
1496 1.1 jmcneill case IFM_100_TX:
1497 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
1498 1.1 jmcneill break;
1499 1.1 jmcneill case IFM_1000_T:
1500 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
1501 1.1 jmcneill break;
1502 1.1 jmcneill }
1503 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1504 1.1 jmcneill reg |= MAC_CFG_FULL_DUPLEX;
1505 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1506 1.1 jmcneill reg |= MAC_CFG_TX_FC;
1507 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1508 1.1 jmcneill reg |= MAC_CFG_RX_FC;
1509 1.1 jmcneill }
1510 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1511 1.1 jmcneill }
1512 1.1 jmcneill
1513 1.1 jmcneill static void
1514 1.1 jmcneill alc_stats_clear(struct alc_softc *sc)
1515 1.1 jmcneill {
1516 1.1 jmcneill struct smb sb, *smb;
1517 1.1 jmcneill uint32_t *reg;
1518 1.1 jmcneill int i;
1519 1.1 jmcneill
1520 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1521 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1522 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1523 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1524 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1525 1.1 jmcneill /* Update done, clear. */
1526 1.1 jmcneill smb->updated = 0;
1527 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1528 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1529 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1530 1.1 jmcneill } else {
1531 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1532 1.1 jmcneill reg++) {
1533 1.1 jmcneill CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1534 1.1 jmcneill i += sizeof(uint32_t);
1535 1.1 jmcneill }
1536 1.1 jmcneill /* Read Tx statistics. */
1537 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1538 1.1 jmcneill reg++) {
1539 1.1 jmcneill CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1540 1.1 jmcneill i += sizeof(uint32_t);
1541 1.1 jmcneill }
1542 1.1 jmcneill }
1543 1.1 jmcneill }
1544 1.1 jmcneill
1545 1.1 jmcneill static void
1546 1.1 jmcneill alc_stats_update(struct alc_softc *sc)
1547 1.1 jmcneill {
1548 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1549 1.1 jmcneill struct alc_hw_stats *stat;
1550 1.1 jmcneill struct smb sb, *smb;
1551 1.1 jmcneill uint32_t *reg;
1552 1.1 jmcneill int i;
1553 1.1 jmcneill
1554 1.1 jmcneill stat = &sc->alc_stats;
1555 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1556 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1557 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1558 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1559 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1560 1.1 jmcneill if (smb->updated == 0)
1561 1.1 jmcneill return;
1562 1.1 jmcneill } else {
1563 1.1 jmcneill smb = &sb;
1564 1.1 jmcneill /* Read Rx statistics. */
1565 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1566 1.1 jmcneill reg++) {
1567 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1568 1.1 jmcneill i += sizeof(uint32_t);
1569 1.1 jmcneill }
1570 1.1 jmcneill /* Read Tx statistics. */
1571 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1572 1.1 jmcneill reg++) {
1573 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1574 1.1 jmcneill i += sizeof(uint32_t);
1575 1.1 jmcneill }
1576 1.1 jmcneill }
1577 1.1 jmcneill
1578 1.1 jmcneill /* Rx stats. */
1579 1.1 jmcneill stat->rx_frames += smb->rx_frames;
1580 1.1 jmcneill stat->rx_bcast_frames += smb->rx_bcast_frames;
1581 1.1 jmcneill stat->rx_mcast_frames += smb->rx_mcast_frames;
1582 1.1 jmcneill stat->rx_pause_frames += smb->rx_pause_frames;
1583 1.1 jmcneill stat->rx_control_frames += smb->rx_control_frames;
1584 1.1 jmcneill stat->rx_crcerrs += smb->rx_crcerrs;
1585 1.1 jmcneill stat->rx_lenerrs += smb->rx_lenerrs;
1586 1.1 jmcneill stat->rx_bytes += smb->rx_bytes;
1587 1.1 jmcneill stat->rx_runts += smb->rx_runts;
1588 1.1 jmcneill stat->rx_fragments += smb->rx_fragments;
1589 1.1 jmcneill stat->rx_pkts_64 += smb->rx_pkts_64;
1590 1.1 jmcneill stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1591 1.1 jmcneill stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1592 1.1 jmcneill stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1593 1.1 jmcneill stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1594 1.1 jmcneill stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1595 1.1 jmcneill stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1596 1.1 jmcneill stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1597 1.1 jmcneill stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1598 1.1 jmcneill stat->rx_rrs_errs += smb->rx_rrs_errs;
1599 1.1 jmcneill stat->rx_alignerrs += smb->rx_alignerrs;
1600 1.1 jmcneill stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1601 1.1 jmcneill stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1602 1.1 jmcneill stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1603 1.1 jmcneill
1604 1.1 jmcneill /* Tx stats. */
1605 1.1 jmcneill stat->tx_frames += smb->tx_frames;
1606 1.1 jmcneill stat->tx_bcast_frames += smb->tx_bcast_frames;
1607 1.1 jmcneill stat->tx_mcast_frames += smb->tx_mcast_frames;
1608 1.1 jmcneill stat->tx_pause_frames += smb->tx_pause_frames;
1609 1.1 jmcneill stat->tx_excess_defer += smb->tx_excess_defer;
1610 1.1 jmcneill stat->tx_control_frames += smb->tx_control_frames;
1611 1.1 jmcneill stat->tx_deferred += smb->tx_deferred;
1612 1.1 jmcneill stat->tx_bytes += smb->tx_bytes;
1613 1.1 jmcneill stat->tx_pkts_64 += smb->tx_pkts_64;
1614 1.1 jmcneill stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1615 1.1 jmcneill stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1616 1.1 jmcneill stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1617 1.1 jmcneill stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1618 1.1 jmcneill stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1619 1.1 jmcneill stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1620 1.1 jmcneill stat->tx_single_colls += smb->tx_single_colls;
1621 1.1 jmcneill stat->tx_multi_colls += smb->tx_multi_colls;
1622 1.1 jmcneill stat->tx_late_colls += smb->tx_late_colls;
1623 1.1 jmcneill stat->tx_excess_colls += smb->tx_excess_colls;
1624 1.1 jmcneill stat->tx_abort += smb->tx_abort;
1625 1.1 jmcneill stat->tx_underrun += smb->tx_underrun;
1626 1.1 jmcneill stat->tx_desc_underrun += smb->tx_desc_underrun;
1627 1.1 jmcneill stat->tx_lenerrs += smb->tx_lenerrs;
1628 1.1 jmcneill stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1629 1.1 jmcneill stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1630 1.1 jmcneill stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1631 1.1 jmcneill
1632 1.1 jmcneill /* Update counters in ifnet. */
1633 1.1 jmcneill ifp->if_opackets += smb->tx_frames;
1634 1.1 jmcneill
1635 1.1 jmcneill ifp->if_collisions += smb->tx_single_colls +
1636 1.1 jmcneill smb->tx_multi_colls * 2 + smb->tx_late_colls +
1637 1.1 jmcneill smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
1638 1.1 jmcneill
1639 1.1 jmcneill /*
1640 1.1 jmcneill * XXX
1641 1.1 jmcneill * tx_pkts_truncated counter looks suspicious. It constantly
1642 1.1 jmcneill * increments with no sign of Tx errors. This may indicate
1643 1.1 jmcneill * the counter name is not correct one so I've removed the
1644 1.1 jmcneill * counter in output errors.
1645 1.1 jmcneill */
1646 1.1 jmcneill ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
1647 1.1 jmcneill smb->tx_underrun;
1648 1.1 jmcneill
1649 1.1 jmcneill ifp->if_ipackets += smb->rx_frames;
1650 1.1 jmcneill
1651 1.1 jmcneill ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1652 1.1 jmcneill smb->rx_runts + smb->rx_pkts_truncated +
1653 1.1 jmcneill smb->rx_fifo_oflows + smb->rx_rrs_errs +
1654 1.1 jmcneill smb->rx_alignerrs;
1655 1.1 jmcneill
1656 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1657 1.1 jmcneill /* Update done, clear. */
1658 1.1 jmcneill smb->updated = 0;
1659 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1660 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1661 1.1 jmcneill }
1662 1.1 jmcneill }
1663 1.1 jmcneill
1664 1.1 jmcneill static int
1665 1.1 jmcneill alc_intr(void *arg)
1666 1.1 jmcneill {
1667 1.1 jmcneill struct alc_softc *sc = arg;
1668 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1669 1.1 jmcneill uint32_t status;
1670 1.1 jmcneill
1671 1.1 jmcneill status = CSR_READ_4(sc, ALC_INTR_STATUS);
1672 1.1 jmcneill if ((status & ALC_INTRS) == 0)
1673 1.1 jmcneill return (0);
1674 1.1 jmcneill
1675 1.1 jmcneill /* Acknowledge and disable interrupts. */
1676 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
1677 1.1 jmcneill
1678 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING) {
1679 1.1 jmcneill if (status & INTR_RX_PKT) {
1680 1.1 jmcneill int error;
1681 1.1 jmcneill
1682 1.1 jmcneill error = alc_rxintr(sc);
1683 1.1 jmcneill if (error) {
1684 1.1 jmcneill alc_init(ifp);
1685 1.1 jmcneill return (0);
1686 1.1 jmcneill }
1687 1.1 jmcneill }
1688 1.1 jmcneill
1689 1.1 jmcneill if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
1690 1.1 jmcneill INTR_TXQ_TO_RST)) {
1691 1.1 jmcneill if (status & INTR_DMA_RD_TO_RST)
1692 1.1 jmcneill printf("%s: DMA read error! -- resetting\n",
1693 1.1 jmcneill device_xname(sc->sc_dev));
1694 1.1 jmcneill if (status & INTR_DMA_WR_TO_RST)
1695 1.1 jmcneill printf("%s: DMA write error! -- resetting\n",
1696 1.1 jmcneill device_xname(sc->sc_dev));
1697 1.1 jmcneill if (status & INTR_TXQ_TO_RST)
1698 1.1 jmcneill printf("%s: TxQ reset! -- resetting\n",
1699 1.1 jmcneill device_xname(sc->sc_dev));
1700 1.1 jmcneill alc_init(ifp);
1701 1.1 jmcneill return (0);
1702 1.1 jmcneill }
1703 1.1 jmcneill
1704 1.1 jmcneill alc_txeof(sc);
1705 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1706 1.1 jmcneill alc_start(ifp);
1707 1.1 jmcneill }
1708 1.1 jmcneill
1709 1.1 jmcneill /* Re-enable interrupts. */
1710 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
1711 1.1 jmcneill return (1);
1712 1.1 jmcneill }
1713 1.1 jmcneill
1714 1.1 jmcneill static void
1715 1.1 jmcneill alc_txeof(struct alc_softc *sc)
1716 1.1 jmcneill {
1717 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1718 1.1 jmcneill struct alc_txdesc *txd;
1719 1.1 jmcneill uint32_t cons, prod;
1720 1.1 jmcneill int prog;
1721 1.1 jmcneill
1722 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1723 1.1 jmcneill return;
1724 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1725 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1726 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1727 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
1728 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1729 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize,
1730 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1731 1.1 jmcneill prod = sc->alc_rdata.alc_cmb->cons;
1732 1.1 jmcneill } else
1733 1.1 jmcneill prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
1734 1.1 jmcneill /* Assume we're using normal Tx priority queue. */
1735 1.1 jmcneill prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
1736 1.1 jmcneill MBOX_TD_CONS_LO_IDX_SHIFT;
1737 1.1 jmcneill cons = sc->alc_cdata.alc_tx_cons;
1738 1.1 jmcneill /*
1739 1.1 jmcneill * Go through our Tx list and free mbufs for those
1740 1.1 jmcneill * frames which have been transmitted.
1741 1.1 jmcneill */
1742 1.1 jmcneill for (prog = 0; cons != prod; prog++,
1743 1.1 jmcneill ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
1744 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt <= 0)
1745 1.1 jmcneill break;
1746 1.1 jmcneill prog++;
1747 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
1748 1.1 jmcneill sc->alc_cdata.alc_tx_cnt--;
1749 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[cons];
1750 1.1 jmcneill if (txd->tx_m != NULL) {
1751 1.1 jmcneill /* Reclaim transmitted mbufs. */
1752 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1753 1.1 jmcneill m_freem(txd->tx_m);
1754 1.1 jmcneill txd->tx_m = NULL;
1755 1.1 jmcneill }
1756 1.1 jmcneill }
1757 1.1 jmcneill
1758 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1759 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1760 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1761 1.1 jmcneill sc->alc_cdata.alc_tx_cons = cons;
1762 1.1 jmcneill /*
1763 1.1 jmcneill * Unarm watchdog timer only when there is no pending
1764 1.1 jmcneill * frames in Tx queue.
1765 1.1 jmcneill */
1766 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1767 1.1 jmcneill ifp->if_timer = 0;
1768 1.1 jmcneill }
1769 1.1 jmcneill
1770 1.1 jmcneill static int
1771 1.1 jmcneill alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, int init)
1772 1.1 jmcneill {
1773 1.1 jmcneill struct mbuf *m;
1774 1.1 jmcneill bus_dmamap_t map;
1775 1.1 jmcneill int error;
1776 1.1 jmcneill
1777 1.1 jmcneill MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1778 1.1 jmcneill if (m == NULL)
1779 1.1 jmcneill return (ENOBUFS);
1780 1.1 jmcneill MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
1781 1.1 jmcneill if (!(m->m_flags & M_EXT)) {
1782 1.1 jmcneill m_freem(m);
1783 1.1 jmcneill return (ENOBUFS);
1784 1.1 jmcneill }
1785 1.1 jmcneill
1786 1.1 jmcneill m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
1787 1.1 jmcneill
1788 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat,
1789 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
1790 1.1 jmcneill
1791 1.1 jmcneill if (error != 0) {
1792 1.1 jmcneill if (!error) {
1793 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat,
1794 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap);
1795 1.1 jmcneill error = EFBIG;
1796 1.1 jmcneill printf("%s: too many segments?!\n",
1797 1.1 jmcneill device_xname(sc->sc_dev));
1798 1.1 jmcneill }
1799 1.1 jmcneill m_freem(m);
1800 1.1 jmcneill
1801 1.1 jmcneill if (init)
1802 1.1 jmcneill printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
1803 1.1 jmcneill
1804 1.1 jmcneill return (error);
1805 1.1 jmcneill }
1806 1.1 jmcneill
1807 1.1 jmcneill if (rxd->rx_m != NULL) {
1808 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1809 1.1 jmcneill rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1810 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1811 1.1 jmcneill }
1812 1.1 jmcneill map = rxd->rx_dmamap;
1813 1.1 jmcneill rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
1814 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = map;
1815 1.1 jmcneill rxd->rx_m = m;
1816 1.1 jmcneill rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
1817 1.1 jmcneill return (0);
1818 1.1 jmcneill }
1819 1.1 jmcneill
1820 1.1 jmcneill static int
1821 1.1 jmcneill alc_rxintr(struct alc_softc *sc)
1822 1.1 jmcneill {
1823 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1824 1.1 jmcneill struct rx_rdesc *rrd;
1825 1.1 jmcneill uint32_t nsegs, status;
1826 1.1 jmcneill int rr_cons, prog;
1827 1.1 jmcneill
1828 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1829 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1830 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1831 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1832 1.1 jmcneill rr_cons = sc->alc_cdata.alc_rr_cons;
1833 1.1 jmcneill for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
1834 1.1 jmcneill rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
1835 1.1 jmcneill status = le32toh(rrd->status);
1836 1.1 jmcneill if ((status & RRD_VALID) == 0)
1837 1.1 jmcneill break;
1838 1.1 jmcneill nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
1839 1.1 jmcneill if (nsegs == 0) {
1840 1.1 jmcneill /* This should not happen! */
1841 1.1 jmcneill if (alcdebug)
1842 1.1 jmcneill printf("%s: unexpected segment count -- "
1843 1.1 jmcneill "resetting\n", device_xname(sc->sc_dev));
1844 1.1 jmcneill return (EIO);
1845 1.1 jmcneill }
1846 1.1 jmcneill alc_rxeof(sc, rrd);
1847 1.1 jmcneill /* Clear Rx return status. */
1848 1.1 jmcneill rrd->status = 0;
1849 1.1 jmcneill ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
1850 1.1 jmcneill sc->alc_cdata.alc_rx_cons += nsegs;
1851 1.1 jmcneill sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
1852 1.1 jmcneill prog += nsegs;
1853 1.1 jmcneill }
1854 1.1 jmcneill
1855 1.1 jmcneill if (prog > 0) {
1856 1.1 jmcneill /* Update the consumer index. */
1857 1.1 jmcneill sc->alc_cdata.alc_rr_cons = rr_cons;
1858 1.1 jmcneill /* Sync Rx return descriptors. */
1859 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1860 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1861 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1862 1.1 jmcneill /*
1863 1.1 jmcneill * Sync updated Rx descriptors such that controller see
1864 1.1 jmcneill * modified buffer addresses.
1865 1.1 jmcneill */
1866 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1867 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1868 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1869 1.1 jmcneill /*
1870 1.1 jmcneill * Let controller know availability of new Rx buffers.
1871 1.1 jmcneill * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
1872 1.1 jmcneill * it may be possible to update ALC_MBOX_RD0_PROD_IDX
1873 1.1 jmcneill * only when Rx buffer pre-fetching is required. In
1874 1.1 jmcneill * addition we already set ALC_RX_RD_FREE_THRESH to
1875 1.1 jmcneill * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
1876 1.1 jmcneill * it still seems that pre-fetching needs more
1877 1.1 jmcneill * experimentation.
1878 1.1 jmcneill */
1879 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
1880 1.1 jmcneill sc->alc_cdata.alc_rx_cons);
1881 1.1 jmcneill }
1882 1.1 jmcneill
1883 1.1 jmcneill return (0);
1884 1.1 jmcneill }
1885 1.1 jmcneill
1886 1.1 jmcneill /* Receive a frame. */
1887 1.1 jmcneill static void
1888 1.1 jmcneill alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
1889 1.1 jmcneill {
1890 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1891 1.1 jmcneill struct alc_rxdesc *rxd;
1892 1.1 jmcneill struct mbuf *mp, *m;
1893 1.1 jmcneill uint32_t rdinfo, status;
1894 1.1 jmcneill int count, nsegs, rx_cons;
1895 1.1 jmcneill
1896 1.1 jmcneill status = le32toh(rrd->status);
1897 1.1 jmcneill rdinfo = le32toh(rrd->rdinfo);
1898 1.1 jmcneill rx_cons = RRD_RD_IDX(rdinfo);
1899 1.1 jmcneill nsegs = RRD_RD_CNT(rdinfo);
1900 1.1 jmcneill
1901 1.1 jmcneill sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
1902 1.1 jmcneill if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
1903 1.1 jmcneill /*
1904 1.1 jmcneill * We want to pass the following frames to upper
1905 1.1 jmcneill * layer regardless of error status of Rx return
1906 1.1 jmcneill * ring.
1907 1.1 jmcneill *
1908 1.1 jmcneill * o IP/TCP/UDP checksum is bad.
1909 1.1 jmcneill * o frame length and protocol specific length
1910 1.1 jmcneill * does not match.
1911 1.1 jmcneill *
1912 1.1 jmcneill * Force network stack compute checksum for
1913 1.1 jmcneill * errored frames.
1914 1.1 jmcneill */
1915 1.1 jmcneill status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
1916 1.2 jmcneill if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
1917 1.2 jmcneill RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
1918 1.1 jmcneill return;
1919 1.1 jmcneill }
1920 1.1 jmcneill
1921 1.1 jmcneill for (count = 0; count < nsegs; count++,
1922 1.1 jmcneill ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
1923 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
1924 1.1 jmcneill mp = rxd->rx_m;
1925 1.1 jmcneill /* Add a new receive buffer to the ring. */
1926 1.1 jmcneill if (alc_newbuf(sc, rxd, 0) != 0) {
1927 1.1 jmcneill ifp->if_iqdrops++;
1928 1.1 jmcneill /* Reuse Rx buffers. */
1929 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
1930 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
1931 1.1 jmcneill break;
1932 1.1 jmcneill }
1933 1.1 jmcneill
1934 1.1 jmcneill /*
1935 1.1 jmcneill * Assume we've received a full sized frame.
1936 1.1 jmcneill * Actual size is fixed when we encounter the end of
1937 1.1 jmcneill * multi-segmented frame.
1938 1.1 jmcneill */
1939 1.1 jmcneill mp->m_len = sc->alc_buf_size;
1940 1.1 jmcneill
1941 1.1 jmcneill /* Chain received mbufs. */
1942 1.1 jmcneill if (sc->alc_cdata.alc_rxhead == NULL) {
1943 1.1 jmcneill sc->alc_cdata.alc_rxhead = mp;
1944 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1945 1.1 jmcneill } else {
1946 1.1 jmcneill mp->m_flags &= ~M_PKTHDR;
1947 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail =
1948 1.1 jmcneill sc->alc_cdata.alc_rxtail;
1949 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = mp;
1950 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1951 1.1 jmcneill }
1952 1.1 jmcneill
1953 1.1 jmcneill if (count == nsegs - 1) {
1954 1.1 jmcneill /* Last desc. for this frame. */
1955 1.1 jmcneill m = sc->alc_cdata.alc_rxhead;
1956 1.1 jmcneill m->m_flags |= M_PKTHDR;
1957 1.1 jmcneill /*
1958 1.1 jmcneill * It seems that L1C/L2C controller has no way
1959 1.1 jmcneill * to tell hardware to strip CRC bytes.
1960 1.1 jmcneill */
1961 1.1 jmcneill m->m_pkthdr.len =
1962 1.1 jmcneill sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
1963 1.1 jmcneill if (nsegs > 1) {
1964 1.1 jmcneill /* Set last mbuf size. */
1965 1.1 jmcneill mp->m_len = sc->alc_cdata.alc_rxlen -
1966 1.1 jmcneill (nsegs - 1) * sc->alc_buf_size;
1967 1.1 jmcneill /* Remove the CRC bytes in chained mbufs. */
1968 1.1 jmcneill if (mp->m_len <= ETHER_CRC_LEN) {
1969 1.1 jmcneill sc->alc_cdata.alc_rxtail =
1970 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail;
1971 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_len -=
1972 1.1 jmcneill (ETHER_CRC_LEN - mp->m_len);
1973 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = NULL;
1974 1.1 jmcneill m_freem(mp);
1975 1.1 jmcneill } else {
1976 1.1 jmcneill mp->m_len -= ETHER_CRC_LEN;
1977 1.1 jmcneill }
1978 1.1 jmcneill } else
1979 1.1 jmcneill m->m_len = m->m_pkthdr.len;
1980 1.1 jmcneill m->m_pkthdr.rcvif = ifp;
1981 1.1 jmcneill #if NVLAN > 0
1982 1.1 jmcneill /*
1983 1.1 jmcneill * Due to hardware bugs, Rx checksum offloading
1984 1.1 jmcneill * was intentionally disabled.
1985 1.1 jmcneill */
1986 1.1 jmcneill if (status & RRD_VLAN_TAG) {
1987 1.1 jmcneill u_int32_t vtag = RRD_VLAN(le32toh(rrd->vtag));
1988 1.1 jmcneill VLAN_INPUT_TAG(ifp, m, ntohs(vtag), );
1989 1.1 jmcneill }
1990 1.1 jmcneill #endif
1991 1.1 jmcneill
1992 1.1 jmcneill bpf_mtap(ifp, m);
1993 1.1 jmcneill
1994 1.1 jmcneill {
1995 1.1 jmcneill /* Pass it on. */
1996 1.1 jmcneill ether_input(ifp, m);
1997 1.1 jmcneill }
1998 1.1 jmcneill }
1999 1.1 jmcneill }
2000 1.1 jmcneill /* Reset mbuf chains. */
2001 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2002 1.1 jmcneill }
2003 1.1 jmcneill
2004 1.1 jmcneill static void
2005 1.1 jmcneill alc_tick(void *xsc)
2006 1.1 jmcneill {
2007 1.1 jmcneill struct alc_softc *sc = xsc;
2008 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
2009 1.1 jmcneill int s;
2010 1.1 jmcneill
2011 1.1 jmcneill s = splnet();
2012 1.1 jmcneill mii_tick(mii);
2013 1.1 jmcneill alc_stats_update(sc);
2014 1.1 jmcneill splx(s);
2015 1.1 jmcneill
2016 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2017 1.1 jmcneill }
2018 1.1 jmcneill
2019 1.1 jmcneill static void
2020 1.1 jmcneill alc_reset(struct alc_softc *sc)
2021 1.1 jmcneill {
2022 1.1 jmcneill uint32_t reg;
2023 1.1 jmcneill int i;
2024 1.1 jmcneill
2025 1.2 jmcneill reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
2026 1.2 jmcneill reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2027 1.2 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2028 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2029 1.1 jmcneill DELAY(10);
2030 1.1 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2031 1.1 jmcneill break;
2032 1.1 jmcneill }
2033 1.1 jmcneill if (i == 0)
2034 1.1 jmcneill printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
2035 1.1 jmcneill
2036 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2037 1.1 jmcneill if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2038 1.1 jmcneill break;
2039 1.1 jmcneill DELAY(10);
2040 1.1 jmcneill }
2041 1.1 jmcneill
2042 1.1 jmcneill if (i == 0)
2043 1.1 jmcneill printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
2044 1.1 jmcneill reg);
2045 1.1 jmcneill }
2046 1.1 jmcneill
2047 1.1 jmcneill static int
2048 1.1 jmcneill alc_init(struct ifnet *ifp)
2049 1.1 jmcneill {
2050 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2051 1.1 jmcneill struct mii_data *mii;
2052 1.1 jmcneill uint8_t eaddr[ETHER_ADDR_LEN];
2053 1.1 jmcneill bus_addr_t paddr;
2054 1.1 jmcneill uint32_t reg, rxf_hi, rxf_lo;
2055 1.1 jmcneill int error;
2056 1.1 jmcneill
2057 1.1 jmcneill /*
2058 1.1 jmcneill * Cancel any pending I/O.
2059 1.1 jmcneill */
2060 1.1 jmcneill alc_stop(ifp, 0);
2061 1.1 jmcneill /*
2062 1.1 jmcneill * Reset the chip to a known state.
2063 1.1 jmcneill */
2064 1.1 jmcneill alc_reset(sc);
2065 1.1 jmcneill
2066 1.1 jmcneill /* Initialize Rx descriptors. */
2067 1.1 jmcneill error = alc_init_rx_ring(sc);
2068 1.1 jmcneill if (error != 0) {
2069 1.1 jmcneill printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
2070 1.1 jmcneill alc_stop(ifp, 0);
2071 1.1 jmcneill return (error);
2072 1.1 jmcneill }
2073 1.1 jmcneill alc_init_rr_ring(sc);
2074 1.1 jmcneill alc_init_tx_ring(sc);
2075 1.1 jmcneill alc_init_cmb(sc);
2076 1.1 jmcneill alc_init_smb(sc);
2077 1.1 jmcneill
2078 1.2 jmcneill /* Enable all clocks. */
2079 1.2 jmcneill CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2080 1.2 jmcneill
2081 1.1 jmcneill /* Reprogram the station address. */
2082 1.1 jmcneill memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
2083 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR0,
2084 1.1 jmcneill eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2085 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2086 1.1 jmcneill /*
2087 1.1 jmcneill * Clear WOL status and disable all WOL feature as WOL
2088 1.1 jmcneill * would interfere Rx operation under normal environments.
2089 1.1 jmcneill */
2090 1.1 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
2091 1.1 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2092 1.1 jmcneill /* Set Tx descriptor base addresses. */
2093 1.1 jmcneill paddr = sc->alc_rdata.alc_tx_ring_paddr;
2094 1.1 jmcneill CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2095 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2096 1.1 jmcneill /* We don't use high priority ring. */
2097 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2098 1.1 jmcneill /* Set Tx descriptor counter. */
2099 1.1 jmcneill CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2100 1.1 jmcneill (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2101 1.1 jmcneill /* Set Rx descriptor base addresses. */
2102 1.1 jmcneill paddr = sc->alc_rdata.alc_rx_ring_paddr;
2103 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2104 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2105 1.1 jmcneill /* We use one Rx ring. */
2106 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2107 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2108 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2109 1.1 jmcneill /* Set Rx descriptor counter. */
2110 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2111 1.1 jmcneill (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2112 1.1 jmcneill
2113 1.1 jmcneill /*
2114 1.1 jmcneill * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2115 1.1 jmcneill * if it do not fit the buffer size. Rx return descriptor holds
2116 1.1 jmcneill * a counter that indicates how many fragments were made by the
2117 1.1 jmcneill * hardware. The buffer size should be multiple of 8 bytes.
2118 1.1 jmcneill * Since hardware has limit on the size of buffer size, always
2119 1.1 jmcneill * use the maximum value.
2120 1.1 jmcneill * For strict-alignment architectures make sure to reduce buffer
2121 1.1 jmcneill * size by 8 bytes to make room for alignment fixup.
2122 1.1 jmcneill */
2123 1.1 jmcneill sc->alc_buf_size = RX_BUF_SIZE_MAX;
2124 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2125 1.1 jmcneill
2126 1.1 jmcneill paddr = sc->alc_rdata.alc_rr_ring_paddr;
2127 1.1 jmcneill /* Set Rx return descriptor base addresses. */
2128 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2129 1.1 jmcneill /* We use one Rx return ring. */
2130 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2131 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2132 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2133 1.1 jmcneill /* Set Rx return descriptor counter. */
2134 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2135 1.1 jmcneill (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2136 1.1 jmcneill paddr = sc->alc_rdata.alc_cmb_paddr;
2137 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2138 1.1 jmcneill paddr = sc->alc_rdata.alc_smb_paddr;
2139 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2140 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2141 1.1 jmcneill
2142 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
2143 1.2 jmcneill /* Reconfigure SRAM - Vendor magic. */
2144 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2145 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2146 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2147 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2148 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2149 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2150 1.2 jmcneill CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2151 1.2 jmcneill CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2152 1.2 jmcneill }
2153 1.2 jmcneill
2154 1.1 jmcneill /* Tell hardware that we're ready to load DMA blocks. */
2155 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2156 1.1 jmcneill
2157 1.1 jmcneill /* Configure interrupt moderation timer. */
2158 1.1 jmcneill sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2159 1.1 jmcneill sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2160 1.1 jmcneill reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2161 1.1 jmcneill reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2162 1.1 jmcneill CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2163 1.1 jmcneill /*
2164 1.1 jmcneill * We don't want to automatic interrupt clear as task queue
2165 1.1 jmcneill * for the interrupt should know interrupt status.
2166 1.1 jmcneill */
2167 1.2 jmcneill reg = MASTER_SA_TIMER_ENB;
2168 1.1 jmcneill if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2169 1.1 jmcneill reg |= MASTER_IM_RX_TIMER_ENB;
2170 1.1 jmcneill if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2171 1.1 jmcneill reg |= MASTER_IM_TX_TIMER_ENB;
2172 1.1 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2173 1.1 jmcneill /*
2174 1.1 jmcneill * Disable interrupt re-trigger timer. We don't want automatic
2175 1.1 jmcneill * re-triggering of un-ACKed interrupts.
2176 1.1 jmcneill */
2177 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2178 1.1 jmcneill /* Configure CMB. */
2179 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2180 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2181 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2182 1.1 jmcneill else
2183 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2184 1.1 jmcneill /*
2185 1.1 jmcneill * Hardware can be configured to issue SMB interrupt based
2186 1.1 jmcneill * on programmed interval. Since there is a callout that is
2187 1.1 jmcneill * invoked for every hz in driver we use that instead of
2188 1.1 jmcneill * relying on periodic SMB interrupt.
2189 1.1 jmcneill */
2190 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2191 1.1 jmcneill /* Clear MAC statistics. */
2192 1.1 jmcneill alc_stats_clear(sc);
2193 1.1 jmcneill
2194 1.1 jmcneill /*
2195 1.1 jmcneill * Always use maximum frame size that controller can support.
2196 1.1 jmcneill * Otherwise received frames that has larger frame length
2197 1.1 jmcneill * than alc(4) MTU would be silently dropped in hardware. This
2198 1.1 jmcneill * would make path-MTU discovery hard as sender wouldn't get
2199 1.1 jmcneill * any responses from receiver. alc(4) supports
2200 1.1 jmcneill * multi-fragmented frames on Rx path so it has no issue on
2201 1.1 jmcneill * assembling fragmented frames. Using maximum frame size also
2202 1.1 jmcneill * removes the need to reinitialize hardware when interface
2203 1.1 jmcneill * MTU configuration was changed.
2204 1.1 jmcneill *
2205 1.1 jmcneill * Be conservative in what you do, be liberal in what you
2206 1.1 jmcneill * accept from others - RFC 793.
2207 1.1 jmcneill */
2208 1.2 jmcneill CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
2209 1.1 jmcneill
2210 1.1 jmcneill /* Disable header split(?) */
2211 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2212 1.1 jmcneill
2213 1.1 jmcneill /* Configure IPG/IFG parameters. */
2214 1.1 jmcneill CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2215 1.1 jmcneill ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2216 1.1 jmcneill ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2217 1.1 jmcneill ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2218 1.1 jmcneill ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2219 1.1 jmcneill /* Set parameters for half-duplex media. */
2220 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDPX_CFG,
2221 1.1 jmcneill ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2222 1.1 jmcneill HDPX_CFG_LCOL_MASK) |
2223 1.1 jmcneill ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2224 1.1 jmcneill HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2225 1.1 jmcneill ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2226 1.1 jmcneill HDPX_CFG_ABEBT_MASK) |
2227 1.1 jmcneill ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2228 1.1 jmcneill HDPX_CFG_JAMIPG_MASK));
2229 1.1 jmcneill /*
2230 1.1 jmcneill * Set TSO/checksum offload threshold. For frames that is
2231 1.1 jmcneill * larger than this threshold, hardware wouldn't do
2232 1.1 jmcneill * TSO/checksum offloading.
2233 1.1 jmcneill */
2234 1.1 jmcneill CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
2235 1.2 jmcneill (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2236 1.1 jmcneill TSO_OFFLOAD_THRESH_MASK);
2237 1.1 jmcneill /* Configure TxQ. */
2238 1.1 jmcneill reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2239 1.1 jmcneill TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2240 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2241 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2242 1.2 jmcneill reg >>= 1;
2243 1.1 jmcneill reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2244 1.1 jmcneill TXQ_CFG_TD_BURST_MASK;
2245 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2246 1.1 jmcneill
2247 1.1 jmcneill /* Configure Rx free descriptor pre-fetching. */
2248 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2249 1.1 jmcneill ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
2250 1.1 jmcneill RX_RD_FREE_THRESH_HI_MASK) |
2251 1.1 jmcneill ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
2252 1.1 jmcneill RX_RD_FREE_THRESH_LO_MASK));
2253 1.1 jmcneill
2254 1.1 jmcneill /*
2255 1.1 jmcneill * Configure flow control parameters.
2256 1.1 jmcneill * XON : 80% of Rx FIFO
2257 1.1 jmcneill * XOFF : 30% of Rx FIFO
2258 1.1 jmcneill */
2259 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
2260 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) {
2261 1.2 jmcneill reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2262 1.2 jmcneill rxf_hi = (reg * 8) / 10;
2263 1.2 jmcneill rxf_lo = (reg * 3) / 10;
2264 1.2 jmcneill CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2265 1.2 jmcneill ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2266 1.2 jmcneill RX_FIFO_PAUSE_THRESH_LO_MASK) |
2267 1.2 jmcneill ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2268 1.2 jmcneill RX_FIFO_PAUSE_THRESH_HI_MASK));
2269 1.2 jmcneill }
2270 1.2 jmcneill
2271 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2272 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2)
2273 1.2 jmcneill CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2274 1.2 jmcneill CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
2275 1.2 jmcneill SERDES_PHY_CLK_SLOWDOWN);
2276 1.1 jmcneill
2277 1.1 jmcneill /* Disable RSS until I understand L1C/L2C's RSS logic. */
2278 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2279 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2280 1.1 jmcneill
2281 1.1 jmcneill /* Configure RxQ. */
2282 1.1 jmcneill reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2283 1.1 jmcneill RXQ_CFG_RD_BURST_MASK;
2284 1.1 jmcneill reg |= RXQ_CFG_RSS_MODE_DIS;
2285 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
2286 1.2 jmcneill reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
2287 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2288 1.1 jmcneill
2289 1.1 jmcneill /* Configure DMA parameters. */
2290 1.1 jmcneill reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2291 1.1 jmcneill reg |= sc->alc_rcb;
2292 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2293 1.1 jmcneill reg |= DMA_CFG_CMB_ENB;
2294 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2295 1.1 jmcneill reg |= DMA_CFG_SMB_ENB;
2296 1.1 jmcneill else
2297 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2298 1.1 jmcneill reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2299 1.1 jmcneill DMA_CFG_RD_BURST_SHIFT;
2300 1.1 jmcneill reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2301 1.1 jmcneill DMA_CFG_WR_BURST_SHIFT;
2302 1.1 jmcneill reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2303 1.1 jmcneill DMA_CFG_RD_DELAY_CNT_MASK;
2304 1.1 jmcneill reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2305 1.1 jmcneill DMA_CFG_WR_DELAY_CNT_MASK;
2306 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2307 1.1 jmcneill
2308 1.1 jmcneill /*
2309 1.1 jmcneill * Configure Tx/Rx MACs.
2310 1.1 jmcneill * - Auto-padding for short frames.
2311 1.1 jmcneill * - Enable CRC generation.
2312 1.1 jmcneill * Actual reconfiguration of MAC for resolved speed/duplex
2313 1.1 jmcneill * is followed after detection of link establishment.
2314 1.2 jmcneill * AR813x/AR815x always does checksum computation regardless
2315 1.1 jmcneill * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
2316 1.1 jmcneill * have bug in protocol field in Rx return structure so
2317 1.1 jmcneill * these controllers can't handle fragmented frames. Disable
2318 1.1 jmcneill * Rx checksum offloading until there is a newer controller
2319 1.1 jmcneill * that has sane implementation.
2320 1.1 jmcneill */
2321 1.1 jmcneill reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2322 1.1 jmcneill ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2323 1.1 jmcneill MAC_CFG_PREAMBLE_MASK);
2324 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
2325 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
2326 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2327 1.2 jmcneill reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2328 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
2329 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
2330 1.1 jmcneill else
2331 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
2332 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2333 1.1 jmcneill
2334 1.1 jmcneill /* Set up the receive filter. */
2335 1.1 jmcneill alc_iff(sc);
2336 1.1 jmcneill alc_rxvlan(sc);
2337 1.1 jmcneill
2338 1.1 jmcneill /* Acknowledge all pending interrupts and clear it. */
2339 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
2340 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2341 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
2342 1.1 jmcneill
2343 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2344 1.1 jmcneill /* Switch to the current media. */
2345 1.1 jmcneill mii = &sc->sc_miibus;
2346 1.1 jmcneill mii_mediachg(mii);
2347 1.1 jmcneill
2348 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2349 1.1 jmcneill
2350 1.1 jmcneill ifp->if_flags |= IFF_RUNNING;
2351 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
2352 1.1 jmcneill
2353 1.1 jmcneill return (0);
2354 1.1 jmcneill }
2355 1.1 jmcneill
2356 1.1 jmcneill static void
2357 1.1 jmcneill alc_stop(struct ifnet *ifp, int disable)
2358 1.1 jmcneill {
2359 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2360 1.1 jmcneill struct alc_txdesc *txd;
2361 1.1 jmcneill struct alc_rxdesc *rxd;
2362 1.1 jmcneill uint32_t reg;
2363 1.1 jmcneill int i;
2364 1.1 jmcneill
2365 1.1 jmcneill callout_stop(&sc->sc_tick_ch);
2366 1.1 jmcneill
2367 1.1 jmcneill /*
2368 1.1 jmcneill * Mark the interface down and cancel the watchdog timer.
2369 1.1 jmcneill */
2370 1.1 jmcneill ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2371 1.1 jmcneill ifp->if_timer = 0;
2372 1.1 jmcneill
2373 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2374 1.1 jmcneill
2375 1.1 jmcneill alc_stats_update(sc);
2376 1.1 jmcneill
2377 1.1 jmcneill mii_down(&sc->sc_miibus);
2378 1.1 jmcneill
2379 1.1 jmcneill /* Disable interrupts. */
2380 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
2381 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2382 1.1 jmcneill alc_stop_queue(sc);
2383 1.1 jmcneill
2384 1.1 jmcneill /* Disable DMA. */
2385 1.1 jmcneill reg = CSR_READ_4(sc, ALC_DMA_CFG);
2386 1.1 jmcneill reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
2387 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2388 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2389 1.1 jmcneill DELAY(1000);
2390 1.1 jmcneill
2391 1.1 jmcneill /* Stop Rx/Tx MACs. */
2392 1.1 jmcneill alc_stop_mac(sc);
2393 1.1 jmcneill
2394 1.1 jmcneill /* Disable interrupts which might be touched in taskq handler. */
2395 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2396 1.1 jmcneill
2397 1.1 jmcneill /* Reclaim Rx buffers that have been processed. */
2398 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
2399 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
2400 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2401 1.1 jmcneill /*
2402 1.1 jmcneill * Free Tx/Rx mbufs still in the queues.
2403 1.1 jmcneill */
2404 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2405 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2406 1.1 jmcneill if (rxd->rx_m != NULL) {
2407 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2408 1.1 jmcneill m_freem(rxd->rx_m);
2409 1.1 jmcneill rxd->rx_m = NULL;
2410 1.1 jmcneill }
2411 1.1 jmcneill }
2412 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2413 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2414 1.1 jmcneill if (txd->tx_m != NULL) {
2415 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2416 1.1 jmcneill m_freem(txd->tx_m);
2417 1.1 jmcneill txd->tx_m = NULL;
2418 1.1 jmcneill }
2419 1.1 jmcneill }
2420 1.1 jmcneill }
2421 1.1 jmcneill
2422 1.1 jmcneill static void
2423 1.1 jmcneill alc_stop_mac(struct alc_softc *sc)
2424 1.1 jmcneill {
2425 1.1 jmcneill uint32_t reg;
2426 1.1 jmcneill int i;
2427 1.1 jmcneill
2428 1.1 jmcneill /* Disable Rx/Tx MAC. */
2429 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2430 1.1 jmcneill if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2431 1.2 jmcneill reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2432 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2433 1.1 jmcneill }
2434 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2435 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2436 1.1 jmcneill if (reg == 0)
2437 1.1 jmcneill break;
2438 1.1 jmcneill DELAY(10);
2439 1.1 jmcneill }
2440 1.1 jmcneill if (i == 0)
2441 1.1 jmcneill printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
2442 1.1 jmcneill device_xname(sc->sc_dev), reg);
2443 1.1 jmcneill }
2444 1.1 jmcneill
2445 1.1 jmcneill static void
2446 1.1 jmcneill alc_start_queue(struct alc_softc *sc)
2447 1.1 jmcneill {
2448 1.1 jmcneill uint32_t qcfg[] = {
2449 1.1 jmcneill 0,
2450 1.1 jmcneill RXQ_CFG_QUEUE0_ENB,
2451 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
2452 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
2453 1.1 jmcneill RXQ_CFG_ENB
2454 1.1 jmcneill };
2455 1.1 jmcneill uint32_t cfg;
2456 1.1 jmcneill
2457 1.1 jmcneill /* Enable RxQ. */
2458 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
2459 1.1 jmcneill cfg &= ~RXQ_CFG_ENB;
2460 1.1 jmcneill cfg |= qcfg[1];
2461 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
2462 1.1 jmcneill /* Enable TxQ. */
2463 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
2464 1.1 jmcneill cfg |= TXQ_CFG_ENB;
2465 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
2466 1.1 jmcneill }
2467 1.1 jmcneill
2468 1.1 jmcneill static void
2469 1.1 jmcneill alc_stop_queue(struct alc_softc *sc)
2470 1.1 jmcneill {
2471 1.1 jmcneill uint32_t reg;
2472 1.1 jmcneill int i;
2473 1.1 jmcneill
2474 1.1 jmcneill /* Disable RxQ. */
2475 1.1 jmcneill reg = CSR_READ_4(sc, ALC_RXQ_CFG);
2476 1.1 jmcneill if ((reg & RXQ_CFG_ENB) != 0) {
2477 1.1 jmcneill reg &= ~RXQ_CFG_ENB;
2478 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2479 1.1 jmcneill }
2480 1.1 jmcneill /* Disable TxQ. */
2481 1.1 jmcneill reg = CSR_READ_4(sc, ALC_TXQ_CFG);
2482 1.2 jmcneill if ((reg & TXQ_CFG_ENB) != 0) {
2483 1.1 jmcneill reg &= ~TXQ_CFG_ENB;
2484 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
2485 1.1 jmcneill }
2486 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2487 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2488 1.1 jmcneill if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2489 1.1 jmcneill break;
2490 1.1 jmcneill DELAY(10);
2491 1.1 jmcneill }
2492 1.1 jmcneill if (i == 0)
2493 1.1 jmcneill printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
2494 1.1 jmcneill device_xname(sc->sc_dev), reg);
2495 1.1 jmcneill }
2496 1.1 jmcneill
2497 1.1 jmcneill static void
2498 1.1 jmcneill alc_init_tx_ring(struct alc_softc *sc)
2499 1.1 jmcneill {
2500 1.1 jmcneill struct alc_ring_data *rd;
2501 1.1 jmcneill struct alc_txdesc *txd;
2502 1.1 jmcneill int i;
2503 1.1 jmcneill
2504 1.1 jmcneill sc->alc_cdata.alc_tx_prod = 0;
2505 1.1 jmcneill sc->alc_cdata.alc_tx_cons = 0;
2506 1.1 jmcneill sc->alc_cdata.alc_tx_cnt = 0;
2507 1.1 jmcneill
2508 1.1 jmcneill rd = &sc->alc_rdata;
2509 1.1 jmcneill memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ);
2510 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2511 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2512 1.1 jmcneill txd->tx_m = NULL;
2513 1.1 jmcneill }
2514 1.1 jmcneill
2515 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2516 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2517 1.1 jmcneill }
2518 1.1 jmcneill
2519 1.1 jmcneill static int
2520 1.1 jmcneill alc_init_rx_ring(struct alc_softc *sc)
2521 1.1 jmcneill {
2522 1.1 jmcneill struct alc_ring_data *rd;
2523 1.1 jmcneill struct alc_rxdesc *rxd;
2524 1.1 jmcneill int i;
2525 1.1 jmcneill
2526 1.1 jmcneill sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
2527 1.1 jmcneill rd = &sc->alc_rdata;
2528 1.1 jmcneill memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ);
2529 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2530 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2531 1.1 jmcneill rxd->rx_m = NULL;
2532 1.1 jmcneill rxd->rx_desc = &rd->alc_rx_ring[i];
2533 1.1 jmcneill if (alc_newbuf(sc, rxd, 1) != 0)
2534 1.1 jmcneill return (ENOBUFS);
2535 1.1 jmcneill }
2536 1.1 jmcneill
2537 1.1 jmcneill /*
2538 1.1 jmcneill * Since controller does not update Rx descriptors, driver
2539 1.1 jmcneill * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
2540 1.1 jmcneill * is enough to ensure coherence.
2541 1.1 jmcneill */
2542 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2543 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2544 1.1 jmcneill /* Let controller know availability of new Rx buffers. */
2545 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
2546 1.1 jmcneill
2547 1.1 jmcneill return (0);
2548 1.1 jmcneill }
2549 1.1 jmcneill
2550 1.1 jmcneill static void
2551 1.1 jmcneill alc_init_rr_ring(struct alc_softc *sc)
2552 1.1 jmcneill {
2553 1.1 jmcneill struct alc_ring_data *rd;
2554 1.1 jmcneill
2555 1.1 jmcneill sc->alc_cdata.alc_rr_cons = 0;
2556 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2557 1.1 jmcneill
2558 1.1 jmcneill rd = &sc->alc_rdata;
2559 1.1 jmcneill memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ);
2560 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2561 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2562 1.1 jmcneill }
2563 1.1 jmcneill
2564 1.1 jmcneill static void
2565 1.1 jmcneill alc_init_cmb(struct alc_softc *sc)
2566 1.1 jmcneill {
2567 1.1 jmcneill struct alc_ring_data *rd;
2568 1.1 jmcneill
2569 1.1 jmcneill rd = &sc->alc_rdata;
2570 1.1 jmcneill memset(rd->alc_cmb, 0, ALC_CMB_SZ);
2571 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2572 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2573 1.1 jmcneill }
2574 1.1 jmcneill
2575 1.1 jmcneill static void
2576 1.1 jmcneill alc_init_smb(struct alc_softc *sc)
2577 1.1 jmcneill {
2578 1.1 jmcneill struct alc_ring_data *rd;
2579 1.1 jmcneill
2580 1.1 jmcneill rd = &sc->alc_rdata;
2581 1.1 jmcneill memset(rd->alc_smb, 0, ALC_SMB_SZ);
2582 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2583 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2584 1.1 jmcneill }
2585 1.1 jmcneill
2586 1.1 jmcneill static void
2587 1.1 jmcneill alc_rxvlan(struct alc_softc *sc)
2588 1.1 jmcneill {
2589 1.1 jmcneill uint32_t reg;
2590 1.1 jmcneill
2591 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2592 1.3 sborrill if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2593 1.1 jmcneill reg |= MAC_CFG_VLAN_TAG_STRIP;
2594 1.1 jmcneill else
2595 1.1 jmcneill reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2596 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2597 1.1 jmcneill }
2598 1.1 jmcneill
2599 1.1 jmcneill static void
2600 1.1 jmcneill alc_iff(struct alc_softc *sc)
2601 1.1 jmcneill {
2602 1.1 jmcneill struct ethercom *ec = &sc->sc_ec;
2603 1.1 jmcneill struct ifnet *ifp = &ec->ec_if;
2604 1.1 jmcneill struct ether_multi *enm;
2605 1.1 jmcneill struct ether_multistep step;
2606 1.1 jmcneill uint32_t crc;
2607 1.1 jmcneill uint32_t mchash[2];
2608 1.1 jmcneill uint32_t rxcfg;
2609 1.1 jmcneill
2610 1.1 jmcneill rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
2611 1.1 jmcneill rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2612 1.1 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
2613 1.1 jmcneill
2614 1.1 jmcneill /*
2615 1.1 jmcneill * Always accept broadcast frames.
2616 1.1 jmcneill */
2617 1.1 jmcneill rxcfg |= MAC_CFG_BCAST;
2618 1.1 jmcneill
2619 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2620 1.1 jmcneill ifp->if_flags |= IFF_ALLMULTI;
2621 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC)
2622 1.1 jmcneill rxcfg |= MAC_CFG_PROMISC;
2623 1.1 jmcneill else
2624 1.1 jmcneill rxcfg |= MAC_CFG_ALLMULTI;
2625 1.1 jmcneill mchash[0] = mchash[1] = 0xFFFFFFFF;
2626 1.1 jmcneill } else {
2627 1.1 jmcneill /* Program new filter. */
2628 1.1 jmcneill memset(mchash, 0, sizeof(mchash));
2629 1.1 jmcneill
2630 1.1 jmcneill ETHER_FIRST_MULTI(step, ec, enm);
2631 1.1 jmcneill while (enm != NULL) {
2632 1.1 jmcneill crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2633 1.1 jmcneill mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2634 1.1 jmcneill ETHER_NEXT_MULTI(step, enm);
2635 1.1 jmcneill }
2636 1.1 jmcneill }
2637 1.1 jmcneill
2638 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
2639 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
2640 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
2641 1.1 jmcneill }
2642 1.1 jmcneill
2643 1.5 jmcneill MODULE(MODULE_CLASS_DRIVER, if_alc, "pci");
2644 1.1 jmcneill
2645 1.1 jmcneill #ifdef _MODULE
2646 1.1 jmcneill #include "ioconf.c"
2647 1.1 jmcneill #endif
2648 1.1 jmcneill
2649 1.1 jmcneill static int
2650 1.1 jmcneill if_alc_modcmd(modcmd_t cmd, void *opaque)
2651 1.1 jmcneill {
2652 1.1 jmcneill int error = 0;
2653 1.1 jmcneill
2654 1.1 jmcneill switch (cmd) {
2655 1.1 jmcneill case MODULE_CMD_INIT:
2656 1.1 jmcneill #ifdef _MODULE
2657 1.1 jmcneill error = config_init_component(cfdriver_ioconf_if_alc,
2658 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2659 1.1 jmcneill #endif
2660 1.1 jmcneill return error;
2661 1.1 jmcneill case MODULE_CMD_FINI:
2662 1.1 jmcneill #ifdef _MODULE
2663 1.1 jmcneill error = config_fini_component(cfdriver_ioconf_if_alc,
2664 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2665 1.1 jmcneill #endif
2666 1.1 jmcneill return error;
2667 1.1 jmcneill default:
2668 1.1 jmcneill return ENOTTY;
2669 1.1 jmcneill }
2670 1.1 jmcneill }
2671