if_alc.c revision 1.6 1 1.1 jmcneill /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */
2 1.1 jmcneill /*-
3 1.1 jmcneill * Copyright (c) 2009, Pyun YongHyeon <yongari (at) FreeBSD.org>
4 1.1 jmcneill * All rights reserved.
5 1.1 jmcneill *
6 1.1 jmcneill * Redistribution and use in source and binary forms, with or without
7 1.1 jmcneill * modification, are permitted provided that the following conditions
8 1.1 jmcneill * are met:
9 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright
10 1.1 jmcneill * notice unmodified, this list of conditions, and the following
11 1.1 jmcneill * disclaimer.
12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the
14 1.1 jmcneill * documentation and/or other materials provided with the distribution.
15 1.1 jmcneill *
16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 jmcneill * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 jmcneill * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 jmcneill * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 jmcneill * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 jmcneill * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 jmcneill * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 jmcneill * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 jmcneill * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 jmcneill * SUCH DAMAGE.
27 1.1 jmcneill */
28 1.1 jmcneill
29 1.2 jmcneill /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
30 1.1 jmcneill
31 1.1 jmcneill #ifdef _KERNEL_OPT
32 1.1 jmcneill #include "vlan.h"
33 1.1 jmcneill #endif
34 1.1 jmcneill
35 1.1 jmcneill #include <sys/param.h>
36 1.1 jmcneill #include <sys/proc.h>
37 1.1 jmcneill #include <sys/endian.h>
38 1.1 jmcneill #include <sys/systm.h>
39 1.1 jmcneill #include <sys/types.h>
40 1.1 jmcneill #include <sys/sockio.h>
41 1.1 jmcneill #include <sys/mbuf.h>
42 1.1 jmcneill #include <sys/queue.h>
43 1.1 jmcneill #include <sys/kernel.h>
44 1.1 jmcneill #include <sys/device.h>
45 1.1 jmcneill #include <sys/callout.h>
46 1.1 jmcneill #include <sys/socket.h>
47 1.1 jmcneill #include <sys/module.h>
48 1.1 jmcneill
49 1.1 jmcneill #include <sys/bus.h>
50 1.1 jmcneill
51 1.1 jmcneill #include <net/if.h>
52 1.1 jmcneill #include <net/if_dl.h>
53 1.1 jmcneill #include <net/if_llc.h>
54 1.1 jmcneill #include <net/if_media.h>
55 1.1 jmcneill #include <net/if_ether.h>
56 1.1 jmcneill
57 1.1 jmcneill #include <net/bpf.h>
58 1.1 jmcneill
59 1.1 jmcneill #ifdef INET
60 1.1 jmcneill #include <netinet/in.h>
61 1.1 jmcneill #include <netinet/in_systm.h>
62 1.1 jmcneill #include <netinet/in_var.h>
63 1.1 jmcneill #include <netinet/ip.h>
64 1.1 jmcneill #endif
65 1.1 jmcneill
66 1.1 jmcneill #include <net/if_types.h>
67 1.1 jmcneill #include <net/if_vlanvar.h>
68 1.1 jmcneill
69 1.1 jmcneill #include <net/bpf.h>
70 1.1 jmcneill
71 1.1 jmcneill #include <sys/rnd.h>
72 1.1 jmcneill
73 1.1 jmcneill #include <dev/mii/mii.h>
74 1.1 jmcneill #include <dev/mii/miivar.h>
75 1.1 jmcneill
76 1.1 jmcneill #include <dev/pci/pcireg.h>
77 1.1 jmcneill #include <dev/pci/pcivar.h>
78 1.1 jmcneill #include <dev/pci/pcidevs.h>
79 1.1 jmcneill
80 1.1 jmcneill #include <dev/pci/if_alcreg.h>
81 1.1 jmcneill
82 1.2 jmcneill /*
83 1.2 jmcneill * Devices supported by this driver.
84 1.2 jmcneill */
85 1.2 jmcneill static struct alc_ident alc_ident_table[] = {
86 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024,
87 1.2 jmcneill "Atheros AR8131 PCIe Gigabit Ethernet" },
88 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024,
89 1.2 jmcneill "Atheros AR8132 PCIe Fast Ethernet" },
90 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024,
91 1.2 jmcneill "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
92 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024,
93 1.2 jmcneill "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
94 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024,
95 1.2 jmcneill "Atheros AR8152 v1.1 PCIe Fast Ethernet" },
96 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024,
97 1.2 jmcneill "Atheros AR8152 v2.0 PCIe Fast Ethernet" },
98 1.2 jmcneill { 0, 0, 0, NULL },
99 1.2 jmcneill };
100 1.2 jmcneill
101 1.1 jmcneill static int alc_match(device_t, cfdata_t, void *);
102 1.1 jmcneill static void alc_attach(device_t, device_t, void *);
103 1.1 jmcneill static int alc_detach(device_t, int);
104 1.1 jmcneill
105 1.1 jmcneill static int alc_init(struct ifnet *);
106 1.1 jmcneill static void alc_start(struct ifnet *);
107 1.1 jmcneill static int alc_ioctl(struct ifnet *, u_long, void *);
108 1.1 jmcneill static void alc_watchdog(struct ifnet *);
109 1.1 jmcneill static int alc_mediachange(struct ifnet *);
110 1.1 jmcneill static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
111 1.1 jmcneill
112 1.2 jmcneill static void alc_aspm(struct alc_softc *, int);
113 1.1 jmcneill static void alc_disable_l0s_l1(struct alc_softc *);
114 1.1 jmcneill static int alc_dma_alloc(struct alc_softc *);
115 1.1 jmcneill static void alc_dma_free(struct alc_softc *);
116 1.1 jmcneill static int alc_encap(struct alc_softc *, struct mbuf **);
117 1.2 jmcneill static struct alc_ident *
118 1.2 jmcneill alc_find_ident(struct pci_attach_args *);
119 1.1 jmcneill static void alc_get_macaddr(struct alc_softc *);
120 1.1 jmcneill static void alc_init_cmb(struct alc_softc *);
121 1.1 jmcneill static void alc_init_rr_ring(struct alc_softc *);
122 1.1 jmcneill static int alc_init_rx_ring(struct alc_softc *);
123 1.1 jmcneill static void alc_init_smb(struct alc_softc *);
124 1.1 jmcneill static void alc_init_tx_ring(struct alc_softc *);
125 1.1 jmcneill static int alc_intr(void *);
126 1.1 jmcneill static void alc_mac_config(struct alc_softc *);
127 1.1 jmcneill static int alc_miibus_readreg(device_t, int, int);
128 1.6 matt static void alc_miibus_statchg(struct ifnet *);
129 1.1 jmcneill static void alc_miibus_writereg(device_t, int, int, int);
130 1.1 jmcneill static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, int);
131 1.1 jmcneill static void alc_phy_down(struct alc_softc *);
132 1.1 jmcneill static void alc_phy_reset(struct alc_softc *);
133 1.1 jmcneill static void alc_reset(struct alc_softc *);
134 1.1 jmcneill static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
135 1.1 jmcneill static int alc_rxintr(struct alc_softc *);
136 1.1 jmcneill static void alc_iff(struct alc_softc *);
137 1.1 jmcneill static void alc_rxvlan(struct alc_softc *);
138 1.1 jmcneill static void alc_start_queue(struct alc_softc *);
139 1.1 jmcneill static void alc_stats_clear(struct alc_softc *);
140 1.1 jmcneill static void alc_stats_update(struct alc_softc *);
141 1.1 jmcneill static void alc_stop(struct ifnet *, int);
142 1.1 jmcneill static void alc_stop_mac(struct alc_softc *);
143 1.1 jmcneill static void alc_stop_queue(struct alc_softc *);
144 1.1 jmcneill static void alc_tick(void *);
145 1.1 jmcneill static void alc_txeof(struct alc_softc *);
146 1.1 jmcneill
147 1.1 jmcneill uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
148 1.1 jmcneill
149 1.1 jmcneill CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc),
150 1.1 jmcneill alc_match, alc_attach, alc_detach, NULL);
151 1.1 jmcneill
152 1.1 jmcneill int alcdebug = 0;
153 1.1 jmcneill #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
154 1.1 jmcneill
155 1.1 jmcneill #define ETHER_ALIGN 2
156 1.1 jmcneill #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
157 1.1 jmcneill
158 1.1 jmcneill static int
159 1.1 jmcneill alc_miibus_readreg(device_t dev, int phy, int reg)
160 1.1 jmcneill {
161 1.1 jmcneill struct alc_softc *sc = device_private(dev);
162 1.1 jmcneill uint32_t v;
163 1.1 jmcneill int i;
164 1.1 jmcneill
165 1.1 jmcneill if (phy != sc->alc_phyaddr)
166 1.1 jmcneill return (0);
167 1.1 jmcneill
168 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
169 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
170 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
171 1.1 jmcneill DELAY(5);
172 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
173 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
174 1.1 jmcneill break;
175 1.1 jmcneill }
176 1.1 jmcneill
177 1.1 jmcneill if (i == 0) {
178 1.1 jmcneill printf("%s: phy read timeout: phy %d, reg %d\n",
179 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
180 1.1 jmcneill return (0);
181 1.1 jmcneill }
182 1.1 jmcneill
183 1.1 jmcneill return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
184 1.1 jmcneill }
185 1.1 jmcneill
186 1.1 jmcneill static void
187 1.1 jmcneill alc_miibus_writereg(device_t dev, int phy, int reg, int val)
188 1.1 jmcneill {
189 1.1 jmcneill struct alc_softc *sc = device_private(dev);
190 1.1 jmcneill uint32_t v;
191 1.1 jmcneill int i;
192 1.1 jmcneill
193 1.1 jmcneill if (phy != sc->alc_phyaddr)
194 1.1 jmcneill return;
195 1.1 jmcneill
196 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
197 1.1 jmcneill (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
198 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
199 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
200 1.1 jmcneill DELAY(5);
201 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
202 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
203 1.1 jmcneill break;
204 1.1 jmcneill }
205 1.1 jmcneill
206 1.1 jmcneill if (i == 0)
207 1.1 jmcneill printf("%s: phy write timeout: phy %d, reg %d\n",
208 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
209 1.1 jmcneill }
210 1.1 jmcneill
211 1.1 jmcneill static void
212 1.6 matt alc_miibus_statchg(struct ifnet *ifp)
213 1.1 jmcneill {
214 1.6 matt struct alc_softc *sc = ifp->if_softc;
215 1.6 matt struct mii_data *mii = &sc->sc_miibus;
216 1.1 jmcneill uint32_t reg;
217 1.1 jmcneill
218 1.1 jmcneill if ((ifp->if_flags & IFF_RUNNING) == 0)
219 1.1 jmcneill return;
220 1.1 jmcneill
221 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
222 1.1 jmcneill if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
223 1.1 jmcneill (IFM_ACTIVE | IFM_AVALID)) {
224 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
225 1.1 jmcneill case IFM_10_T:
226 1.1 jmcneill case IFM_100_TX:
227 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
228 1.1 jmcneill break;
229 1.1 jmcneill case IFM_1000_T:
230 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
231 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
232 1.1 jmcneill break;
233 1.1 jmcneill default:
234 1.1 jmcneill break;
235 1.1 jmcneill }
236 1.1 jmcneill }
237 1.1 jmcneill alc_stop_queue(sc);
238 1.1 jmcneill /* Stop Rx/Tx MACs. */
239 1.1 jmcneill alc_stop_mac(sc);
240 1.1 jmcneill
241 1.1 jmcneill /* Program MACs with resolved speed/duplex/flow-control. */
242 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
243 1.1 jmcneill alc_start_queue(sc);
244 1.1 jmcneill alc_mac_config(sc);
245 1.1 jmcneill /* Re-enable Tx/Rx MACs. */
246 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
247 1.1 jmcneill reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
248 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
249 1.2 jmcneill alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
250 1.1 jmcneill }
251 1.1 jmcneill }
252 1.1 jmcneill
253 1.1 jmcneill static void
254 1.1 jmcneill alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
255 1.1 jmcneill {
256 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
257 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
258 1.1 jmcneill
259 1.1 jmcneill mii_pollstat(mii);
260 1.1 jmcneill ifmr->ifm_status = mii->mii_media_status;
261 1.1 jmcneill ifmr->ifm_active = mii->mii_media_active;
262 1.1 jmcneill }
263 1.1 jmcneill
264 1.1 jmcneill static int
265 1.1 jmcneill alc_mediachange(struct ifnet *ifp)
266 1.1 jmcneill {
267 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
268 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
269 1.1 jmcneill int error;
270 1.1 jmcneill
271 1.1 jmcneill if (mii->mii_instance != 0) {
272 1.1 jmcneill struct mii_softc *miisc;
273 1.1 jmcneill
274 1.1 jmcneill LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
275 1.1 jmcneill mii_phy_reset(miisc);
276 1.1 jmcneill }
277 1.1 jmcneill error = mii_mediachg(mii);
278 1.1 jmcneill
279 1.1 jmcneill return (error);
280 1.1 jmcneill }
281 1.1 jmcneill
282 1.2 jmcneill static struct alc_ident *
283 1.2 jmcneill alc_find_ident(struct pci_attach_args *pa)
284 1.2 jmcneill {
285 1.2 jmcneill struct alc_ident *ident;
286 1.2 jmcneill uint16_t vendor, devid;
287 1.2 jmcneill
288 1.2 jmcneill vendor = PCI_VENDOR(pa->pa_id);
289 1.2 jmcneill devid = PCI_PRODUCT(pa->pa_id);
290 1.2 jmcneill for (ident = alc_ident_table; ident->name != NULL; ident++) {
291 1.2 jmcneill if (vendor == ident->vendorid && devid == ident->deviceid)
292 1.2 jmcneill return (ident);
293 1.2 jmcneill }
294 1.2 jmcneill
295 1.2 jmcneill return (NULL);
296 1.2 jmcneill }
297 1.2 jmcneill
298 1.1 jmcneill static int
299 1.1 jmcneill alc_match(device_t dev, cfdata_t match, void *aux)
300 1.1 jmcneill {
301 1.1 jmcneill struct pci_attach_args *pa = aux;
302 1.1 jmcneill
303 1.2 jmcneill return alc_find_ident(pa) != NULL;
304 1.1 jmcneill }
305 1.1 jmcneill
306 1.1 jmcneill static void
307 1.1 jmcneill alc_get_macaddr(struct alc_softc *sc)
308 1.1 jmcneill {
309 1.1 jmcneill uint32_t ea[2], opt;
310 1.2 jmcneill uint16_t val;
311 1.2 jmcneill int eeprom, i;
312 1.1 jmcneill
313 1.2 jmcneill eeprom = 0;
314 1.1 jmcneill opt = CSR_READ_4(sc, ALC_OPT_CFG);
315 1.2 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
316 1.2 jmcneill (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
317 1.1 jmcneill /*
318 1.1 jmcneill * EEPROM found, let TWSI reload EEPROM configuration.
319 1.1 jmcneill * This will set ethernet address of controller.
320 1.1 jmcneill */
321 1.2 jmcneill eeprom++;
322 1.2 jmcneill switch (sc->alc_ident->deviceid) {
323 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
324 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
325 1.2 jmcneill if ((opt & OPT_CFG_CLK_ENB) == 0) {
326 1.2 jmcneill opt |= OPT_CFG_CLK_ENB;
327 1.2 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
328 1.2 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
329 1.2 jmcneill DELAY(1000);
330 1.2 jmcneill }
331 1.2 jmcneill break;
332 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
333 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
334 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
335 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
336 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
337 1.2 jmcneill ALC_MII_DBG_ADDR, 0x00);
338 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
339 1.2 jmcneill ALC_MII_DBG_DATA);
340 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
341 1.2 jmcneill ALC_MII_DBG_DATA, val & 0xFF7F);
342 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
343 1.2 jmcneill ALC_MII_DBG_ADDR, 0x3B);
344 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
345 1.2 jmcneill ALC_MII_DBG_DATA);
346 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
347 1.2 jmcneill ALC_MII_DBG_DATA, val | 0x0008);
348 1.2 jmcneill DELAY(20);
349 1.2 jmcneill break;
350 1.1 jmcneill }
351 1.2 jmcneill
352 1.2 jmcneill CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
353 1.2 jmcneill CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
354 1.2 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
355 1.2 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
356 1.2 jmcneill
357 1.1 jmcneill CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
358 1.1 jmcneill TWSI_CFG_SW_LD_START);
359 1.1 jmcneill for (i = 100; i > 0; i--) {
360 1.1 jmcneill DELAY(1000);
361 1.1 jmcneill if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
362 1.1 jmcneill TWSI_CFG_SW_LD_START) == 0)
363 1.1 jmcneill break;
364 1.1 jmcneill }
365 1.1 jmcneill if (i == 0)
366 1.1 jmcneill printf("%s: reloading EEPROM timeout!\n",
367 1.1 jmcneill device_xname(sc->sc_dev));
368 1.1 jmcneill } else {
369 1.1 jmcneill if (alcdebug)
370 1.1 jmcneill printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev));
371 1.1 jmcneill }
372 1.2 jmcneill if (eeprom != 0) {
373 1.2 jmcneill switch (sc->alc_ident->deviceid) {
374 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
375 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
376 1.2 jmcneill if ((opt & OPT_CFG_CLK_ENB) != 0) {
377 1.2 jmcneill opt &= ~OPT_CFG_CLK_ENB;
378 1.2 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
379 1.2 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
380 1.2 jmcneill DELAY(1000);
381 1.2 jmcneill }
382 1.2 jmcneill break;
383 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
384 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
385 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
386 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
387 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
388 1.2 jmcneill ALC_MII_DBG_ADDR, 0x00);
389 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
390 1.2 jmcneill ALC_MII_DBG_DATA);
391 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
392 1.2 jmcneill ALC_MII_DBG_DATA, val | 0x0080);
393 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
394 1.2 jmcneill ALC_MII_DBG_ADDR, 0x3B);
395 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
396 1.2 jmcneill ALC_MII_DBG_DATA);
397 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
398 1.2 jmcneill ALC_MII_DBG_DATA, val & 0xFFF7);
399 1.2 jmcneill DELAY(20);
400 1.2 jmcneill break;
401 1.2 jmcneill }
402 1.1 jmcneill }
403 1.1 jmcneill
404 1.1 jmcneill ea[0] = CSR_READ_4(sc, ALC_PAR0);
405 1.1 jmcneill ea[1] = CSR_READ_4(sc, ALC_PAR1);
406 1.1 jmcneill sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
407 1.1 jmcneill sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
408 1.1 jmcneill sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
409 1.1 jmcneill sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
410 1.1 jmcneill sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
411 1.1 jmcneill sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
412 1.1 jmcneill }
413 1.1 jmcneill
414 1.1 jmcneill static void
415 1.1 jmcneill alc_disable_l0s_l1(struct alc_softc *sc)
416 1.1 jmcneill {
417 1.1 jmcneill uint32_t pmcfg;
418 1.1 jmcneill
419 1.1 jmcneill /* Another magic from vendor. */
420 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
421 1.1 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
422 1.1 jmcneill PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
423 1.1 jmcneill PM_CFG_SERDES_PD_EX_L1);
424 1.1 jmcneill pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
425 1.1 jmcneill PM_CFG_SERDES_L1_ENB;
426 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
427 1.1 jmcneill }
428 1.1 jmcneill
429 1.1 jmcneill static void
430 1.1 jmcneill alc_phy_reset(struct alc_softc *sc)
431 1.1 jmcneill {
432 1.1 jmcneill uint16_t data;
433 1.1 jmcneill
434 1.1 jmcneill /* Reset magic from Linux. */
435 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
436 1.1 jmcneill GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
437 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
438 1.1 jmcneill DELAY(10 * 1000);
439 1.1 jmcneill
440 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
441 1.1 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
442 1.1 jmcneill GPHY_CFG_SEL_ANA_RESET);
443 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
444 1.1 jmcneill DELAY(10 * 1000);
445 1.1 jmcneill
446 1.2 jmcneill /* DSP fixup, Vendor magic. */
447 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
448 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
449 1.2 jmcneill ALC_MII_DBG_ADDR, 0x000A);
450 1.2 jmcneill data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
451 1.2 jmcneill ALC_MII_DBG_DATA);
452 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
453 1.2 jmcneill ALC_MII_DBG_DATA, data & 0xDFFF);
454 1.2 jmcneill }
455 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
456 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
457 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
458 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
459 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
460 1.2 jmcneill ALC_MII_DBG_ADDR, 0x003B);
461 1.2 jmcneill data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
462 1.2 jmcneill ALC_MII_DBG_DATA);
463 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
464 1.2 jmcneill ALC_MII_DBG_DATA, data & 0xFFF7);
465 1.2 jmcneill DELAY(20 * 1000);
466 1.2 jmcneill }
467 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) {
468 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
469 1.2 jmcneill ALC_MII_DBG_ADDR, 0x0029);
470 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
471 1.2 jmcneill ALC_MII_DBG_DATA, 0x929D);
472 1.2 jmcneill }
473 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
474 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 ||
475 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
476 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
477 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
478 1.2 jmcneill ALC_MII_DBG_ADDR, 0x0029);
479 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
480 1.2 jmcneill ALC_MII_DBG_DATA, 0xB6DD);
481 1.2 jmcneill }
482 1.2 jmcneill
483 1.1 jmcneill /* Load DSP codes, vendor magic. */
484 1.1 jmcneill data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
485 1.1 jmcneill ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
486 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
487 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG18);
488 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
489 1.1 jmcneill ALC_MII_DBG_DATA, data);
490 1.1 jmcneill
491 1.1 jmcneill data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
492 1.1 jmcneill ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
493 1.1 jmcneill ANA_SERDES_EN_LCKDT;
494 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
495 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG5);
496 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
497 1.1 jmcneill ALC_MII_DBG_DATA, data);
498 1.1 jmcneill
499 1.1 jmcneill data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
500 1.1 jmcneill ANA_LONG_CABLE_TH_100_MASK) |
501 1.1 jmcneill ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
502 1.1 jmcneill ANA_SHORT_CABLE_TH_100_SHIFT) |
503 1.1 jmcneill ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
504 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
505 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG54);
506 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
507 1.1 jmcneill ALC_MII_DBG_DATA, data);
508 1.1 jmcneill
509 1.1 jmcneill data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
510 1.1 jmcneill ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
511 1.1 jmcneill ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
512 1.1 jmcneill ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
513 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
514 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG4);
515 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
516 1.1 jmcneill ALC_MII_DBG_DATA, data);
517 1.1 jmcneill
518 1.1 jmcneill data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
519 1.1 jmcneill ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
520 1.1 jmcneill ANA_OEN_125M;
521 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
522 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG0);
523 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
524 1.1 jmcneill ALC_MII_DBG_DATA, data);
525 1.1 jmcneill DELAY(1000);
526 1.1 jmcneill }
527 1.1 jmcneill
528 1.1 jmcneill static void
529 1.1 jmcneill alc_phy_down(struct alc_softc *sc)
530 1.1 jmcneill {
531 1.2 jmcneill switch (sc->alc_ident->deviceid) {
532 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
533 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
534 1.2 jmcneill /*
535 1.2 jmcneill * GPHY power down caused more problems on AR8151 v2.0.
536 1.2 jmcneill * When driver is reloaded after GPHY power down,
537 1.2 jmcneill * accesses to PHY/MAC registers hung the system. Only
538 1.2 jmcneill * cold boot recovered from it. I'm not sure whether
539 1.2 jmcneill * AR8151 v1.0 also requires this one though. I don't
540 1.2 jmcneill * have AR8151 v1.0 controller in hand.
541 1.2 jmcneill * The only option left is to isolate the PHY and
542 1.2 jmcneill * initiates power down the PHY which in turn saves
543 1.2 jmcneill * more power when driver is unloaded.
544 1.2 jmcneill */
545 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
546 1.2 jmcneill MII_BMCR, BMCR_ISO | BMCR_PDOWN);
547 1.2 jmcneill break;
548 1.2 jmcneill default:
549 1.2 jmcneill /* Force PHY down. */
550 1.2 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
551 1.2 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
552 1.2 jmcneill GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
553 1.2 jmcneill GPHY_CFG_PWDOWN_HW);
554 1.2 jmcneill DELAY(1000);
555 1.2 jmcneill break;
556 1.2 jmcneill }
557 1.1 jmcneill }
558 1.1 jmcneill
559 1.1 jmcneill static void
560 1.2 jmcneill alc_aspm(struct alc_softc *sc, int media)
561 1.1 jmcneill {
562 1.1 jmcneill uint32_t pmcfg;
563 1.2 jmcneill uint16_t linkcfg;
564 1.2 jmcneill
565 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
566 1.2 jmcneill if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
567 1.2 jmcneill (ALC_FLAG_APS | ALC_FLAG_PCIE))
568 1.2 jmcneill linkcfg = CSR_READ_2(sc, sc->alc_expcap +
569 1.2 jmcneill PCI_PCIE_LCSR);
570 1.2 jmcneill else
571 1.2 jmcneill linkcfg = 0;
572 1.1 jmcneill pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
573 1.2 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
574 1.1 jmcneill pmcfg |= PM_CFG_MAC_ASPM_CHK;
575 1.2 jmcneill pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
576 1.2 jmcneill pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
577 1.2 jmcneill
578 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
579 1.2 jmcneill /* Disable extended sync except AR8152 B v1.0 */
580 1.2 jmcneill linkcfg &= ~0x80;
581 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
582 1.2 jmcneill sc->alc_rev == ATHEROS_AR8152_B_V10)
583 1.2 jmcneill linkcfg |= 0x80;
584 1.2 jmcneill CSR_WRITE_2(sc, sc->alc_expcap + PCI_PCIE_LCSR,
585 1.2 jmcneill linkcfg);
586 1.2 jmcneill pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
587 1.2 jmcneill PM_CFG_HOTRST);
588 1.2 jmcneill pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
589 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
590 1.2 jmcneill pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
591 1.2 jmcneill pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
592 1.2 jmcneill PM_CFG_PM_REQ_TIMER_SHIFT);
593 1.2 jmcneill pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
594 1.2 jmcneill }
595 1.2 jmcneill
596 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
597 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
598 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L0S_ENB;
599 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
600 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L1_ENB;
601 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
602 1.2 jmcneill if (sc->alc_ident->deviceid ==
603 1.2 jmcneill PCI_PRODUCT_ATTANSIC_AR8152_B)
604 1.2 jmcneill pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
605 1.2 jmcneill pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
606 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB |
607 1.2 jmcneill PM_CFG_SERDES_BUDS_RX_L1_ENB);
608 1.2 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
609 1.2 jmcneill if (media == IFM_100_TX || media == IFM_1000_T) {
610 1.2 jmcneill pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
611 1.2 jmcneill switch (sc->alc_ident->deviceid) {
612 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
613 1.2 jmcneill pmcfg |= (7 <<
614 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
615 1.2 jmcneill break;
616 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
617 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
618 1.2 jmcneill pmcfg |= (4 <<
619 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
620 1.2 jmcneill break;
621 1.2 jmcneill default:
622 1.2 jmcneill pmcfg |= (15 <<
623 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
624 1.2 jmcneill break;
625 1.2 jmcneill }
626 1.2 jmcneill }
627 1.2 jmcneill } else {
628 1.2 jmcneill pmcfg |= PM_CFG_SERDES_L1_ENB |
629 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB |
630 1.2 jmcneill PM_CFG_SERDES_BUDS_RX_L1_ENB;
631 1.2 jmcneill pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
632 1.2 jmcneill PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
633 1.2 jmcneill }
634 1.1 jmcneill } else {
635 1.2 jmcneill pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
636 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB);
637 1.1 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
638 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
639 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L1_ENB;
640 1.1 jmcneill }
641 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
642 1.1 jmcneill }
643 1.1 jmcneill
644 1.1 jmcneill static void
645 1.1 jmcneill alc_attach(device_t parent, device_t self, void *aux)
646 1.1 jmcneill {
647 1.1 jmcneill
648 1.1 jmcneill struct alc_softc *sc = device_private(self);
649 1.1 jmcneill struct pci_attach_args *pa = aux;
650 1.1 jmcneill pci_chipset_tag_t pc = pa->pa_pc;
651 1.1 jmcneill pci_intr_handle_t ih;
652 1.1 jmcneill const char *intrstr;
653 1.1 jmcneill struct ifnet *ifp;
654 1.1 jmcneill pcireg_t memtype;
655 1.2 jmcneill const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
656 1.1 jmcneill uint16_t burst;
657 1.1 jmcneill int base, mii_flags, state, error = 0;
658 1.1 jmcneill uint32_t cap, ctl, val;
659 1.1 jmcneill
660 1.2 jmcneill sc->alc_ident = alc_find_ident(pa);
661 1.2 jmcneill
662 1.1 jmcneill aprint_naive("\n");
663 1.2 jmcneill aprint_normal(": %s\n", sc->alc_ident->name);
664 1.1 jmcneill
665 1.1 jmcneill sc->sc_dev = self;
666 1.1 jmcneill sc->sc_dmat = pa->pa_dmat;
667 1.1 jmcneill sc->sc_pct = pa->pa_pc;
668 1.1 jmcneill sc->sc_pcitag = pa->pa_tag;
669 1.1 jmcneill
670 1.1 jmcneill /*
671 1.1 jmcneill * Allocate IO memory
672 1.1 jmcneill */
673 1.1 jmcneill memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
674 1.1 jmcneill switch (memtype) {
675 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
676 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
677 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
678 1.1 jmcneill break;
679 1.1 jmcneill default:
680 1.1 jmcneill aprint_error_dev(self, "invalid base address register\n");
681 1.1 jmcneill break;
682 1.1 jmcneill }
683 1.1 jmcneill
684 1.1 jmcneill if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
685 1.1 jmcneill &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
686 1.1 jmcneill aprint_error_dev(self, "could not map mem space\n");
687 1.1 jmcneill return;
688 1.1 jmcneill }
689 1.1 jmcneill
690 1.1 jmcneill if (pci_intr_map(pa, &ih) != 0) {
691 1.1 jmcneill printf(": can't map interrupt\n");
692 1.1 jmcneill goto fail;
693 1.1 jmcneill }
694 1.1 jmcneill
695 1.1 jmcneill /*
696 1.1 jmcneill * Allocate IRQ
697 1.1 jmcneill */
698 1.1 jmcneill intrstr = pci_intr_string(sc->sc_pct, ih);
699 1.1 jmcneill sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc);
700 1.1 jmcneill if (sc->sc_irq_handle == NULL) {
701 1.1 jmcneill printf(": could not establish interrupt");
702 1.1 jmcneill if (intrstr != NULL)
703 1.1 jmcneill printf(" at %s", intrstr);
704 1.1 jmcneill printf("\n");
705 1.1 jmcneill goto fail;
706 1.1 jmcneill }
707 1.4 matt aprint_normal_dev(self, "interrupting at %s\n", intrstr);
708 1.1 jmcneill
709 1.1 jmcneill /* Set PHY address. */
710 1.1 jmcneill sc->alc_phyaddr = ALC_PHY_ADDR;
711 1.1 jmcneill
712 1.1 jmcneill /* Initialize DMA parameters. */
713 1.1 jmcneill sc->alc_dma_rd_burst = 0;
714 1.1 jmcneill sc->alc_dma_wr_burst = 0;
715 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_64;
716 1.1 jmcneill if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
717 1.1 jmcneill &base, NULL)) {
718 1.1 jmcneill sc->alc_flags |= ALC_FLAG_PCIE;
719 1.2 jmcneill sc->alc_expcap = base;
720 1.1 jmcneill burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
721 1.1 jmcneill base + PCI_PCIE_DCSR) >> 16;
722 1.1 jmcneill sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
723 1.1 jmcneill sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
724 1.1 jmcneill if (alcdebug) {
725 1.1 jmcneill printf("%s: Read request size : %u bytes.\n",
726 1.1 jmcneill device_xname(sc->sc_dev),
727 1.1 jmcneill alc_dma_burst[sc->alc_dma_rd_burst]);
728 1.1 jmcneill printf("%s: TLP payload size : %u bytes.\n",
729 1.1 jmcneill device_xname(sc->sc_dev),
730 1.1 jmcneill alc_dma_burst[sc->alc_dma_wr_burst]);
731 1.1 jmcneill }
732 1.1 jmcneill /* Clear data link and flow-control protocol error. */
733 1.1 jmcneill val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
734 1.1 jmcneill val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
735 1.1 jmcneill CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
736 1.2 jmcneill CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
737 1.2 jmcneill CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
738 1.2 jmcneill CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
739 1.2 jmcneill CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
740 1.2 jmcneill PCIE_PHYMISC_FORCE_RCV_DET);
741 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
742 1.2 jmcneill sc->alc_rev == ATHEROS_AR8152_B_V10) {
743 1.2 jmcneill val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
744 1.2 jmcneill val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
745 1.2 jmcneill PCIE_PHYMISC2_SERDES_TH_MASK);
746 1.2 jmcneill val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
747 1.2 jmcneill val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
748 1.2 jmcneill CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
749 1.2 jmcneill }
750 1.1 jmcneill /* Disable ASPM L0S and L1. */
751 1.1 jmcneill cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
752 1.1 jmcneill base + PCI_PCIE_LCAP) >> 16;
753 1.1 jmcneill if ((cap & 0x00000c00) != 0) {
754 1.1 jmcneill ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
755 1.1 jmcneill base + PCI_PCIE_LCSR) >> 16;
756 1.1 jmcneill if ((ctl & 0x08) != 0)
757 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_128;
758 1.1 jmcneill if (alcdebug)
759 1.1 jmcneill printf("%s: RCB %u bytes\n",
760 1.1 jmcneill device_xname(sc->sc_dev),
761 1.1 jmcneill sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
762 1.1 jmcneill state = ctl & 0x03;
763 1.2 jmcneill if (state & 0x01)
764 1.2 jmcneill sc->alc_flags |= ALC_FLAG_L0S;
765 1.2 jmcneill if (state & 0x02)
766 1.2 jmcneill sc->alc_flags |= ALC_FLAG_L1S;
767 1.1 jmcneill if (alcdebug)
768 1.1 jmcneill printf("%s: ASPM %s %s\n",
769 1.1 jmcneill device_xname(sc->sc_dev),
770 1.1 jmcneill aspm_state[state],
771 1.1 jmcneill state == 0 ? "disabled" : "enabled");
772 1.2 jmcneill alc_disable_l0s_l1(sc);
773 1.2 jmcneill } else {
774 1.2 jmcneill aprint_debug_dev(sc->sc_dev, "no ASPM support\n");
775 1.1 jmcneill }
776 1.1 jmcneill }
777 1.1 jmcneill
778 1.1 jmcneill /* Reset PHY. */
779 1.1 jmcneill alc_phy_reset(sc);
780 1.1 jmcneill
781 1.1 jmcneill /* Reset the ethernet controller. */
782 1.1 jmcneill alc_reset(sc);
783 1.1 jmcneill
784 1.1 jmcneill /*
785 1.1 jmcneill * One odd thing is AR8132 uses the same PHY hardware(F1
786 1.1 jmcneill * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
787 1.1 jmcneill * the PHY supports 1000Mbps but that's not true. The PHY
788 1.1 jmcneill * used in AR8132 can't establish gigabit link even if it
789 1.1 jmcneill * shows the same PHY model/revision number of AR8131.
790 1.1 jmcneill */
791 1.2 jmcneill switch (sc->alc_ident->deviceid) {
792 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
793 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
794 1.2 jmcneill sc->alc_flags |= ALC_FLAG_APS;
795 1.2 jmcneill /* FALLTHROUGH */
796 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
797 1.2 jmcneill sc->alc_flags |= ALC_FLAG_FASTETHER;
798 1.1 jmcneill break;
799 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
800 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
801 1.2 jmcneill sc->alc_flags |= ALC_FLAG_APS;
802 1.2 jmcneill /* FALLTHROUGH */
803 1.1 jmcneill default:
804 1.1 jmcneill break;
805 1.1 jmcneill }
806 1.2 jmcneill sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
807 1.1 jmcneill
808 1.1 jmcneill /*
809 1.2 jmcneill * It seems that AR813x/AR815x has silicon bug for SMB. In
810 1.1 jmcneill * addition, Atheros said that enabling SMB wouldn't improve
811 1.1 jmcneill * performance. However I think it's bad to access lots of
812 1.1 jmcneill * registers to extract MAC statistics.
813 1.1 jmcneill */
814 1.1 jmcneill sc->alc_flags |= ALC_FLAG_SMB_BUG;
815 1.1 jmcneill /*
816 1.1 jmcneill * Don't use Tx CMB. It is known to have silicon bug.
817 1.1 jmcneill */
818 1.1 jmcneill sc->alc_flags |= ALC_FLAG_CMB_BUG;
819 1.1 jmcneill sc->alc_rev = PCI_REVISION(pa->pa_class);
820 1.1 jmcneill sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
821 1.1 jmcneill MASTER_CHIP_REV_SHIFT;
822 1.1 jmcneill if (alcdebug) {
823 1.1 jmcneill printf("%s: PCI device revision : 0x%04x\n",
824 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_rev);
825 1.1 jmcneill printf("%s: Chip id/revision : 0x%04x\n",
826 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_chip_rev);
827 1.1 jmcneill printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev),
828 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
829 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
830 1.1 jmcneill }
831 1.1 jmcneill
832 1.1 jmcneill error = alc_dma_alloc(sc);
833 1.1 jmcneill if (error)
834 1.1 jmcneill goto fail;
835 1.1 jmcneill
836 1.1 jmcneill callout_init(&sc->sc_tick_ch, 0);
837 1.1 jmcneill callout_setfunc(&sc->sc_tick_ch, alc_tick, sc);
838 1.1 jmcneill
839 1.1 jmcneill /* Load station address. */
840 1.1 jmcneill alc_get_macaddr(sc);
841 1.1 jmcneill
842 1.1 jmcneill aprint_normal_dev(self, "Ethernet address %s\n",
843 1.1 jmcneill ether_sprintf(sc->alc_eaddr));
844 1.1 jmcneill
845 1.1 jmcneill ifp = &sc->sc_ec.ec_if;
846 1.1 jmcneill ifp->if_softc = sc;
847 1.1 jmcneill ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
848 1.1 jmcneill ifp->if_init = alc_init;
849 1.1 jmcneill ifp->if_ioctl = alc_ioctl;
850 1.1 jmcneill ifp->if_start = alc_start;
851 1.1 jmcneill ifp->if_stop = alc_stop;
852 1.1 jmcneill ifp->if_watchdog = alc_watchdog;
853 1.1 jmcneill ifp->if_baudrate = IF_Gbps(1);
854 1.1 jmcneill IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
855 1.1 jmcneill IFQ_SET_READY(&ifp->if_snd);
856 1.1 jmcneill strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
857 1.1 jmcneill
858 1.1 jmcneill sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
859 1.1 jmcneill
860 1.1 jmcneill #ifdef ALC_CHECKSUM
861 1.1 jmcneill ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
862 1.1 jmcneill IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
863 1.1 jmcneill IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
864 1.1 jmcneill #endif
865 1.1 jmcneill
866 1.1 jmcneill #if NVLAN > 0
867 1.1 jmcneill sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
868 1.1 jmcneill #endif
869 1.1 jmcneill
870 1.1 jmcneill /* Set up MII bus. */
871 1.1 jmcneill sc->sc_miibus.mii_ifp = ifp;
872 1.1 jmcneill sc->sc_miibus.mii_readreg = alc_miibus_readreg;
873 1.1 jmcneill sc->sc_miibus.mii_writereg = alc_miibus_writereg;
874 1.1 jmcneill sc->sc_miibus.mii_statchg = alc_miibus_statchg;
875 1.1 jmcneill
876 1.1 jmcneill sc->sc_ec.ec_mii = &sc->sc_miibus;
877 1.1 jmcneill ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
878 1.1 jmcneill alc_mediastatus);
879 1.1 jmcneill mii_flags = 0;
880 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0)
881 1.1 jmcneill mii_flags |= MIIF_DOPAUSE;
882 1.1 jmcneill mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
883 1.1 jmcneill MII_OFFSET_ANY, mii_flags);
884 1.1 jmcneill
885 1.1 jmcneill if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
886 1.1 jmcneill printf("%s: no PHY found!\n", device_xname(sc->sc_dev));
887 1.1 jmcneill ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
888 1.1 jmcneill 0, NULL);
889 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
890 1.1 jmcneill } else
891 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
892 1.1 jmcneill
893 1.1 jmcneill if_attach(ifp);
894 1.1 jmcneill ether_ifattach(ifp, sc->alc_eaddr);
895 1.1 jmcneill
896 1.1 jmcneill if (!pmf_device_register(self, NULL, NULL))
897 1.1 jmcneill aprint_error_dev(self, "couldn't establish power handler\n");
898 1.1 jmcneill else
899 1.1 jmcneill pmf_class_network_register(self, ifp);
900 1.1 jmcneill
901 1.1 jmcneill return;
902 1.1 jmcneill fail:
903 1.1 jmcneill alc_dma_free(sc);
904 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
905 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
906 1.1 jmcneill sc->sc_irq_handle = NULL;
907 1.1 jmcneill }
908 1.1 jmcneill if (sc->sc_mem_size) {
909 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
910 1.1 jmcneill sc->sc_mem_size = 0;
911 1.1 jmcneill }
912 1.1 jmcneill }
913 1.1 jmcneill
914 1.1 jmcneill static int
915 1.1 jmcneill alc_detach(device_t self, int flags)
916 1.1 jmcneill {
917 1.1 jmcneill struct alc_softc *sc = device_private(self);
918 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
919 1.1 jmcneill int s;
920 1.1 jmcneill
921 1.1 jmcneill s = splnet();
922 1.1 jmcneill alc_stop(ifp, 0);
923 1.1 jmcneill splx(s);
924 1.1 jmcneill
925 1.1 jmcneill mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
926 1.1 jmcneill
927 1.1 jmcneill /* Delete all remaining media. */
928 1.1 jmcneill ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
929 1.1 jmcneill
930 1.1 jmcneill ether_ifdetach(ifp);
931 1.1 jmcneill if_detach(ifp);
932 1.1 jmcneill alc_dma_free(sc);
933 1.1 jmcneill
934 1.1 jmcneill alc_phy_down(sc);
935 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
936 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
937 1.1 jmcneill sc->sc_irq_handle = NULL;
938 1.1 jmcneill }
939 1.1 jmcneill if (sc->sc_mem_size) {
940 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
941 1.1 jmcneill sc->sc_mem_size = 0;
942 1.1 jmcneill }
943 1.1 jmcneill
944 1.1 jmcneill return (0);
945 1.1 jmcneill }
946 1.1 jmcneill
947 1.1 jmcneill static int
948 1.1 jmcneill alc_dma_alloc(struct alc_softc *sc)
949 1.1 jmcneill {
950 1.1 jmcneill struct alc_txdesc *txd;
951 1.1 jmcneill struct alc_rxdesc *rxd;
952 1.1 jmcneill int nsegs, error, i;
953 1.1 jmcneill
954 1.1 jmcneill /*
955 1.1 jmcneill * Create DMA stuffs for TX ring
956 1.1 jmcneill */
957 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
958 1.1 jmcneill ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
959 1.1 jmcneill if (error) {
960 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
961 1.1 jmcneill return (ENOBUFS);
962 1.1 jmcneill }
963 1.1 jmcneill
964 1.1 jmcneill /* Allocate DMA'able memory for TX ring */
965 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
966 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
967 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
968 1.1 jmcneill if (error) {
969 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Tx ring.\n",
970 1.1 jmcneill device_xname(sc->sc_dev));
971 1.1 jmcneill return error;
972 1.1 jmcneill }
973 1.1 jmcneill
974 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
975 1.1 jmcneill nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring,
976 1.1 jmcneill BUS_DMA_NOWAIT);
977 1.1 jmcneill if (error)
978 1.1 jmcneill return (ENOBUFS);
979 1.1 jmcneill
980 1.1 jmcneill /* Load the DMA map for Tx ring. */
981 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
982 1.1 jmcneill sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
983 1.1 jmcneill if (error) {
984 1.1 jmcneill printf("%s: could not load DMA'able memory for Tx ring.\n",
985 1.1 jmcneill device_xname(sc->sc_dev));
986 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
987 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
988 1.1 jmcneill return error;
989 1.1 jmcneill }
990 1.1 jmcneill
991 1.1 jmcneill sc->alc_rdata.alc_tx_ring_paddr =
992 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
993 1.1 jmcneill
994 1.1 jmcneill /*
995 1.1 jmcneill * Create DMA stuffs for RX ring
996 1.1 jmcneill */
997 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
998 1.1 jmcneill ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
999 1.1 jmcneill if (error)
1000 1.1 jmcneill return (ENOBUFS);
1001 1.1 jmcneill
1002 1.1 jmcneill /* Allocate DMA'able memory for RX ring */
1003 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1004 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1005 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1006 1.1 jmcneill if (error) {
1007 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1008 1.1 jmcneill device_xname(sc->sc_dev));
1009 1.1 jmcneill return error;
1010 1.1 jmcneill }
1011 1.1 jmcneill
1012 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1013 1.1 jmcneill nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring,
1014 1.1 jmcneill BUS_DMA_NOWAIT);
1015 1.1 jmcneill if (error)
1016 1.1 jmcneill return (ENOBUFS);
1017 1.1 jmcneill
1018 1.1 jmcneill /* Load the DMA map for Rx ring. */
1019 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1020 1.1 jmcneill sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1021 1.1 jmcneill if (error) {
1022 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx ring.\n",
1023 1.1 jmcneill device_xname(sc->sc_dev));
1024 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1025 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1026 1.1 jmcneill return error;
1027 1.1 jmcneill }
1028 1.1 jmcneill
1029 1.1 jmcneill sc->alc_rdata.alc_rx_ring_paddr =
1030 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1031 1.1 jmcneill
1032 1.1 jmcneill /*
1033 1.1 jmcneill * Create DMA stuffs for RX return ring
1034 1.1 jmcneill */
1035 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1036 1.1 jmcneill ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1037 1.1 jmcneill if (error)
1038 1.1 jmcneill return (ENOBUFS);
1039 1.1 jmcneill
1040 1.1 jmcneill /* Allocate DMA'able memory for RX return ring */
1041 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1042 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1043 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1044 1.1 jmcneill if (error) {
1045 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx "
1046 1.1 jmcneill "return ring.\n", device_xname(sc->sc_dev));
1047 1.1 jmcneill return error;
1048 1.1 jmcneill }
1049 1.1 jmcneill
1050 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1051 1.1 jmcneill nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring,
1052 1.1 jmcneill BUS_DMA_NOWAIT);
1053 1.1 jmcneill if (error)
1054 1.1 jmcneill return (ENOBUFS);
1055 1.1 jmcneill
1056 1.1 jmcneill /* Load the DMA map for Rx return ring. */
1057 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1058 1.1 jmcneill sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1059 1.1 jmcneill if (error) {
1060 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx return ring."
1061 1.1 jmcneill "\n", device_xname(sc->sc_dev));
1062 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1063 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1064 1.1 jmcneill return error;
1065 1.1 jmcneill }
1066 1.1 jmcneill
1067 1.1 jmcneill sc->alc_rdata.alc_rr_ring_paddr =
1068 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1069 1.1 jmcneill
1070 1.1 jmcneill /*
1071 1.1 jmcneill * Create DMA stuffs for CMB block
1072 1.1 jmcneill */
1073 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1074 1.1 jmcneill ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1075 1.1 jmcneill &sc->alc_cdata.alc_cmb_map);
1076 1.1 jmcneill if (error)
1077 1.1 jmcneill return (ENOBUFS);
1078 1.1 jmcneill
1079 1.1 jmcneill /* Allocate DMA'able memory for CMB block */
1080 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1081 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1082 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1083 1.1 jmcneill if (error) {
1084 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
1085 1.1 jmcneill "CMB block\n", device_xname(sc->sc_dev));
1086 1.1 jmcneill return error;
1087 1.1 jmcneill }
1088 1.1 jmcneill
1089 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1090 1.1 jmcneill nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb,
1091 1.1 jmcneill BUS_DMA_NOWAIT);
1092 1.1 jmcneill if (error)
1093 1.1 jmcneill return (ENOBUFS);
1094 1.1 jmcneill
1095 1.1 jmcneill /* Load the DMA map for CMB block. */
1096 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1097 1.1 jmcneill sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1098 1.1 jmcneill BUS_DMA_WAITOK);
1099 1.1 jmcneill if (error) {
1100 1.1 jmcneill printf("%s: could not load DMA'able memory for CMB block\n",
1101 1.1 jmcneill device_xname(sc->sc_dev));
1102 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1103 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1104 1.1 jmcneill return error;
1105 1.1 jmcneill }
1106 1.1 jmcneill
1107 1.1 jmcneill sc->alc_rdata.alc_cmb_paddr =
1108 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1109 1.1 jmcneill
1110 1.1 jmcneill /*
1111 1.1 jmcneill * Create DMA stuffs for SMB block
1112 1.1 jmcneill */
1113 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1114 1.1 jmcneill ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1115 1.1 jmcneill &sc->alc_cdata.alc_smb_map);
1116 1.1 jmcneill if (error)
1117 1.1 jmcneill return (ENOBUFS);
1118 1.1 jmcneill
1119 1.1 jmcneill /* Allocate DMA'able memory for SMB block */
1120 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1121 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1122 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1123 1.1 jmcneill if (error) {
1124 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
1125 1.1 jmcneill "SMB block\n", device_xname(sc->sc_dev));
1126 1.1 jmcneill return error;
1127 1.1 jmcneill }
1128 1.1 jmcneill
1129 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1130 1.1 jmcneill nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb,
1131 1.1 jmcneill BUS_DMA_NOWAIT);
1132 1.1 jmcneill if (error)
1133 1.1 jmcneill return (ENOBUFS);
1134 1.1 jmcneill
1135 1.1 jmcneill /* Load the DMA map for SMB block */
1136 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1137 1.1 jmcneill sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1138 1.1 jmcneill BUS_DMA_WAITOK);
1139 1.1 jmcneill if (error) {
1140 1.1 jmcneill printf("%s: could not load DMA'able memory for SMB block\n",
1141 1.1 jmcneill device_xname(sc->sc_dev));
1142 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1143 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1144 1.1 jmcneill return error;
1145 1.1 jmcneill }
1146 1.1 jmcneill
1147 1.1 jmcneill sc->alc_rdata.alc_smb_paddr =
1148 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1149 1.1 jmcneill
1150 1.1 jmcneill
1151 1.1 jmcneill /* Create DMA maps for Tx buffers. */
1152 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
1153 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
1154 1.1 jmcneill txd->tx_m = NULL;
1155 1.1 jmcneill txd->tx_dmamap = NULL;
1156 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1157 1.1 jmcneill ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1158 1.1 jmcneill &txd->tx_dmamap);
1159 1.1 jmcneill if (error) {
1160 1.1 jmcneill printf("%s: could not create Tx dmamap.\n",
1161 1.1 jmcneill device_xname(sc->sc_dev));
1162 1.1 jmcneill return error;
1163 1.1 jmcneill }
1164 1.1 jmcneill }
1165 1.1 jmcneill
1166 1.1 jmcneill /* Create DMA maps for Rx buffers. */
1167 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1168 1.1 jmcneill BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1169 1.1 jmcneill if (error) {
1170 1.1 jmcneill printf("%s: could not create spare Rx dmamap.\n",
1171 1.1 jmcneill device_xname(sc->sc_dev));
1172 1.1 jmcneill return error;
1173 1.1 jmcneill }
1174 1.1 jmcneill
1175 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
1176 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
1177 1.1 jmcneill rxd->rx_m = NULL;
1178 1.1 jmcneill rxd->rx_dmamap = NULL;
1179 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1180 1.1 jmcneill MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1181 1.1 jmcneill if (error) {
1182 1.1 jmcneill printf("%s: could not create Rx dmamap.\n",
1183 1.1 jmcneill device_xname(sc->sc_dev));
1184 1.1 jmcneill return error;
1185 1.1 jmcneill }
1186 1.1 jmcneill }
1187 1.1 jmcneill
1188 1.1 jmcneill return (0);
1189 1.1 jmcneill }
1190 1.1 jmcneill
1191 1.1 jmcneill
1192 1.1 jmcneill static void
1193 1.1 jmcneill alc_dma_free(struct alc_softc *sc)
1194 1.1 jmcneill {
1195 1.1 jmcneill struct alc_txdesc *txd;
1196 1.1 jmcneill struct alc_rxdesc *rxd;
1197 1.1 jmcneill int i;
1198 1.1 jmcneill
1199 1.1 jmcneill /* Tx buffers */
1200 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
1201 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
1202 1.1 jmcneill if (txd->tx_dmamap != NULL) {
1203 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1204 1.1 jmcneill txd->tx_dmamap = NULL;
1205 1.1 jmcneill }
1206 1.1 jmcneill }
1207 1.1 jmcneill /* Rx buffers */
1208 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
1209 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
1210 1.1 jmcneill if (rxd->rx_dmamap != NULL) {
1211 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1212 1.1 jmcneill rxd->rx_dmamap = NULL;
1213 1.1 jmcneill }
1214 1.1 jmcneill }
1215 1.1 jmcneill if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1216 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1217 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = NULL;
1218 1.1 jmcneill }
1219 1.1 jmcneill
1220 1.1 jmcneill /* Tx ring. */
1221 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL)
1222 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1223 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1224 1.1 jmcneill sc->alc_rdata.alc_tx_ring != NULL)
1225 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1226 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
1227 1.1 jmcneill sc->alc_rdata.alc_tx_ring = NULL;
1228 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
1229 1.1 jmcneill
1230 1.1 jmcneill /* Rx ring. */
1231 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL)
1232 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1233 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1234 1.1 jmcneill sc->alc_rdata.alc_rx_ring != NULL)
1235 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1236 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1237 1.1 jmcneill sc->alc_rdata.alc_rx_ring = NULL;
1238 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map = NULL;
1239 1.1 jmcneill
1240 1.1 jmcneill /* Rx return ring. */
1241 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL)
1242 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1243 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1244 1.1 jmcneill sc->alc_rdata.alc_rr_ring != NULL)
1245 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1246 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1247 1.1 jmcneill sc->alc_rdata.alc_rr_ring = NULL;
1248 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map = NULL;
1249 1.1 jmcneill
1250 1.1 jmcneill /* CMB block */
1251 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL)
1252 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1253 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL &&
1254 1.1 jmcneill sc->alc_rdata.alc_cmb != NULL)
1255 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1256 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1257 1.1 jmcneill sc->alc_rdata.alc_cmb = NULL;
1258 1.1 jmcneill sc->alc_cdata.alc_cmb_map = NULL;
1259 1.1 jmcneill
1260 1.1 jmcneill /* SMB block */
1261 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL)
1262 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1263 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL &&
1264 1.1 jmcneill sc->alc_rdata.alc_smb != NULL)
1265 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1266 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1267 1.1 jmcneill sc->alc_rdata.alc_smb = NULL;
1268 1.1 jmcneill sc->alc_cdata.alc_smb_map = NULL;
1269 1.1 jmcneill }
1270 1.1 jmcneill
1271 1.1 jmcneill static int
1272 1.1 jmcneill alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1273 1.1 jmcneill {
1274 1.1 jmcneill struct alc_txdesc *txd, *txd_last;
1275 1.1 jmcneill struct tx_desc *desc;
1276 1.1 jmcneill struct mbuf *m;
1277 1.1 jmcneill bus_dmamap_t map;
1278 1.1 jmcneill uint32_t cflags, poff, vtag;
1279 1.1 jmcneill int error, idx, nsegs, prod;
1280 1.1 jmcneill #if NVLAN > 0
1281 1.1 jmcneill struct m_tag *mtag;
1282 1.1 jmcneill #endif
1283 1.1 jmcneill
1284 1.1 jmcneill m = *m_head;
1285 1.1 jmcneill cflags = vtag = 0;
1286 1.1 jmcneill poff = 0;
1287 1.1 jmcneill
1288 1.1 jmcneill prod = sc->alc_cdata.alc_tx_prod;
1289 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1290 1.1 jmcneill txd_last = txd;
1291 1.1 jmcneill map = txd->tx_dmamap;
1292 1.1 jmcneill
1293 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1294 1.1 jmcneill
1295 1.1 jmcneill if (error == EFBIG) {
1296 1.1 jmcneill error = 0;
1297 1.1 jmcneill
1298 1.1 jmcneill *m_head = m_pullup(*m_head, MHLEN);
1299 1.1 jmcneill if (*m_head == NULL) {
1300 1.1 jmcneill printf("%s: can't defrag TX mbuf\n",
1301 1.1 jmcneill device_xname(sc->sc_dev));
1302 1.1 jmcneill return ENOBUFS;
1303 1.1 jmcneill }
1304 1.1 jmcneill
1305 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1306 1.1 jmcneill BUS_DMA_NOWAIT);
1307 1.1 jmcneill
1308 1.1 jmcneill if (error != 0) {
1309 1.1 jmcneill printf("%s: could not load defragged TX mbuf\n",
1310 1.1 jmcneill device_xname(sc->sc_dev));
1311 1.1 jmcneill m_freem(*m_head);
1312 1.1 jmcneill *m_head = NULL;
1313 1.1 jmcneill return error;
1314 1.1 jmcneill }
1315 1.1 jmcneill } else if (error) {
1316 1.1 jmcneill printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1317 1.1 jmcneill return (error);
1318 1.1 jmcneill }
1319 1.1 jmcneill
1320 1.1 jmcneill nsegs = map->dm_nsegs;
1321 1.1 jmcneill
1322 1.1 jmcneill if (nsegs == 0) {
1323 1.1 jmcneill m_freem(*m_head);
1324 1.1 jmcneill *m_head = NULL;
1325 1.1 jmcneill return (EIO);
1326 1.1 jmcneill }
1327 1.1 jmcneill
1328 1.1 jmcneill /* Check descriptor overrun. */
1329 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1330 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, map);
1331 1.1 jmcneill return (ENOBUFS);
1332 1.1 jmcneill }
1333 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1334 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1335 1.1 jmcneill
1336 1.1 jmcneill m = *m_head;
1337 1.1 jmcneill desc = NULL;
1338 1.1 jmcneill idx = 0;
1339 1.1 jmcneill #if NVLAN > 0
1340 1.1 jmcneill /* Configure VLAN hardware tag insertion. */
1341 1.1 jmcneill if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1342 1.1 jmcneill vtag = htons(VLAN_TAG_VALUE(mtag));
1343 1.1 jmcneill vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1344 1.1 jmcneill cflags |= TD_INS_VLAN_TAG;
1345 1.1 jmcneill }
1346 1.1 jmcneill #endif
1347 1.1 jmcneill /* Configure Tx checksum offload. */
1348 1.1 jmcneill if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1349 1.1 jmcneill cflags |= TD_CUSTOM_CSUM;
1350 1.1 jmcneill /* Set checksum start offset. */
1351 1.1 jmcneill cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1352 1.1 jmcneill TD_PLOAD_OFFSET_MASK;
1353 1.1 jmcneill }
1354 1.1 jmcneill for (; idx < nsegs; idx++) {
1355 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1356 1.1 jmcneill desc->len =
1357 1.1 jmcneill htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1358 1.1 jmcneill desc->flags = htole32(cflags);
1359 1.1 jmcneill desc->addr = htole64(map->dm_segs[idx].ds_addr);
1360 1.1 jmcneill sc->alc_cdata.alc_tx_cnt++;
1361 1.1 jmcneill ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1362 1.1 jmcneill }
1363 1.1 jmcneill /* Update producer index. */
1364 1.1 jmcneill sc->alc_cdata.alc_tx_prod = prod;
1365 1.1 jmcneill
1366 1.1 jmcneill /* Finally set EOP on the last descriptor. */
1367 1.1 jmcneill prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1368 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1369 1.1 jmcneill desc->flags |= htole32(TD_EOP);
1370 1.1 jmcneill
1371 1.1 jmcneill /* Swap dmamap of the first and the last. */
1372 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1373 1.1 jmcneill map = txd_last->tx_dmamap;
1374 1.1 jmcneill txd_last->tx_dmamap = txd->tx_dmamap;
1375 1.1 jmcneill txd->tx_dmamap = map;
1376 1.1 jmcneill txd->tx_m = m;
1377 1.1 jmcneill
1378 1.1 jmcneill return (0);
1379 1.1 jmcneill }
1380 1.1 jmcneill
1381 1.1 jmcneill static void
1382 1.1 jmcneill alc_start(struct ifnet *ifp)
1383 1.1 jmcneill {
1384 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1385 1.1 jmcneill struct mbuf *m_head;
1386 1.1 jmcneill int enq;
1387 1.1 jmcneill
1388 1.1 jmcneill if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1389 1.1 jmcneill return;
1390 1.1 jmcneill
1391 1.1 jmcneill /* Reclaim transmitted frames. */
1392 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1393 1.1 jmcneill alc_txeof(sc);
1394 1.1 jmcneill
1395 1.1 jmcneill enq = 0;
1396 1.1 jmcneill for (;;) {
1397 1.1 jmcneill IFQ_DEQUEUE(&ifp->if_snd, m_head);
1398 1.1 jmcneill if (m_head == NULL)
1399 1.1 jmcneill break;
1400 1.1 jmcneill
1401 1.1 jmcneill /*
1402 1.1 jmcneill * Pack the data into the transmit ring. If we
1403 1.1 jmcneill * don't have room, set the OACTIVE flag and wait
1404 1.1 jmcneill * for the NIC to drain the ring.
1405 1.1 jmcneill */
1406 1.1 jmcneill if (alc_encap(sc, &m_head)) {
1407 1.1 jmcneill if (m_head == NULL)
1408 1.1 jmcneill break;
1409 1.1 jmcneill ifp->if_flags |= IFF_OACTIVE;
1410 1.1 jmcneill break;
1411 1.1 jmcneill }
1412 1.1 jmcneill enq = 1;
1413 1.1 jmcneill
1414 1.1 jmcneill /*
1415 1.1 jmcneill * If there's a BPF listener, bounce a copy of this frame
1416 1.1 jmcneill * to him.
1417 1.1 jmcneill */
1418 1.1 jmcneill bpf_mtap(ifp, m_head);
1419 1.1 jmcneill }
1420 1.1 jmcneill
1421 1.1 jmcneill if (enq) {
1422 1.1 jmcneill /* Sync descriptors. */
1423 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1424 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1425 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1426 1.1 jmcneill /* Kick. Assume we're using normal Tx priority queue. */
1427 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1428 1.1 jmcneill (sc->alc_cdata.alc_tx_prod <<
1429 1.1 jmcneill MBOX_TD_PROD_LO_IDX_SHIFT) &
1430 1.1 jmcneill MBOX_TD_PROD_LO_IDX_MASK);
1431 1.1 jmcneill /* Set a timeout in case the chip goes out to lunch. */
1432 1.1 jmcneill ifp->if_timer = ALC_TX_TIMEOUT;
1433 1.1 jmcneill }
1434 1.1 jmcneill }
1435 1.1 jmcneill
1436 1.1 jmcneill static void
1437 1.1 jmcneill alc_watchdog(struct ifnet *ifp)
1438 1.1 jmcneill {
1439 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1440 1.1 jmcneill
1441 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1442 1.1 jmcneill printf("%s: watchdog timeout (missed link)\n",
1443 1.1 jmcneill device_xname(sc->sc_dev));
1444 1.1 jmcneill ifp->if_oerrors++;
1445 1.1 jmcneill alc_init(ifp);
1446 1.1 jmcneill return;
1447 1.1 jmcneill }
1448 1.1 jmcneill
1449 1.1 jmcneill printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1450 1.1 jmcneill ifp->if_oerrors++;
1451 1.1 jmcneill alc_init(ifp);
1452 1.1 jmcneill
1453 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1454 1.1 jmcneill alc_start(ifp);
1455 1.1 jmcneill }
1456 1.1 jmcneill
1457 1.1 jmcneill static int
1458 1.1 jmcneill alc_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1459 1.1 jmcneill {
1460 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1461 1.1 jmcneill int s, error = 0;
1462 1.1 jmcneill
1463 1.1 jmcneill s = splnet();
1464 1.1 jmcneill
1465 1.1 jmcneill error = ether_ioctl(ifp, cmd, data);
1466 1.1 jmcneill if (error == ENETRESET) {
1467 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING)
1468 1.1 jmcneill alc_iff(sc);
1469 1.1 jmcneill error = 0;
1470 1.1 jmcneill }
1471 1.1 jmcneill
1472 1.1 jmcneill splx(s);
1473 1.1 jmcneill return (error);
1474 1.1 jmcneill }
1475 1.1 jmcneill
1476 1.1 jmcneill static void
1477 1.1 jmcneill alc_mac_config(struct alc_softc *sc)
1478 1.1 jmcneill {
1479 1.1 jmcneill struct mii_data *mii;
1480 1.1 jmcneill uint32_t reg;
1481 1.1 jmcneill
1482 1.1 jmcneill mii = &sc->sc_miibus;
1483 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
1484 1.1 jmcneill reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1485 1.1 jmcneill MAC_CFG_SPEED_MASK);
1486 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
1487 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
1488 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
1489 1.2 jmcneill reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
1490 1.1 jmcneill /* Reprogram MAC with resolved speed/duplex. */
1491 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
1492 1.1 jmcneill case IFM_10_T:
1493 1.1 jmcneill case IFM_100_TX:
1494 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
1495 1.1 jmcneill break;
1496 1.1 jmcneill case IFM_1000_T:
1497 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
1498 1.1 jmcneill break;
1499 1.1 jmcneill }
1500 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1501 1.1 jmcneill reg |= MAC_CFG_FULL_DUPLEX;
1502 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1503 1.1 jmcneill reg |= MAC_CFG_TX_FC;
1504 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1505 1.1 jmcneill reg |= MAC_CFG_RX_FC;
1506 1.1 jmcneill }
1507 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1508 1.1 jmcneill }
1509 1.1 jmcneill
1510 1.1 jmcneill static void
1511 1.1 jmcneill alc_stats_clear(struct alc_softc *sc)
1512 1.1 jmcneill {
1513 1.1 jmcneill struct smb sb, *smb;
1514 1.1 jmcneill uint32_t *reg;
1515 1.1 jmcneill int i;
1516 1.1 jmcneill
1517 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1518 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1519 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1520 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1521 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1522 1.1 jmcneill /* Update done, clear. */
1523 1.1 jmcneill smb->updated = 0;
1524 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1525 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1526 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1527 1.1 jmcneill } else {
1528 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1529 1.1 jmcneill reg++) {
1530 1.1 jmcneill CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1531 1.1 jmcneill i += sizeof(uint32_t);
1532 1.1 jmcneill }
1533 1.1 jmcneill /* Read Tx statistics. */
1534 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1535 1.1 jmcneill reg++) {
1536 1.1 jmcneill CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1537 1.1 jmcneill i += sizeof(uint32_t);
1538 1.1 jmcneill }
1539 1.1 jmcneill }
1540 1.1 jmcneill }
1541 1.1 jmcneill
1542 1.1 jmcneill static void
1543 1.1 jmcneill alc_stats_update(struct alc_softc *sc)
1544 1.1 jmcneill {
1545 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1546 1.1 jmcneill struct alc_hw_stats *stat;
1547 1.1 jmcneill struct smb sb, *smb;
1548 1.1 jmcneill uint32_t *reg;
1549 1.1 jmcneill int i;
1550 1.1 jmcneill
1551 1.1 jmcneill stat = &sc->alc_stats;
1552 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1553 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1554 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1555 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1556 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1557 1.1 jmcneill if (smb->updated == 0)
1558 1.1 jmcneill return;
1559 1.1 jmcneill } else {
1560 1.1 jmcneill smb = &sb;
1561 1.1 jmcneill /* Read Rx statistics. */
1562 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1563 1.1 jmcneill reg++) {
1564 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1565 1.1 jmcneill i += sizeof(uint32_t);
1566 1.1 jmcneill }
1567 1.1 jmcneill /* Read Tx statistics. */
1568 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1569 1.1 jmcneill reg++) {
1570 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1571 1.1 jmcneill i += sizeof(uint32_t);
1572 1.1 jmcneill }
1573 1.1 jmcneill }
1574 1.1 jmcneill
1575 1.1 jmcneill /* Rx stats. */
1576 1.1 jmcneill stat->rx_frames += smb->rx_frames;
1577 1.1 jmcneill stat->rx_bcast_frames += smb->rx_bcast_frames;
1578 1.1 jmcneill stat->rx_mcast_frames += smb->rx_mcast_frames;
1579 1.1 jmcneill stat->rx_pause_frames += smb->rx_pause_frames;
1580 1.1 jmcneill stat->rx_control_frames += smb->rx_control_frames;
1581 1.1 jmcneill stat->rx_crcerrs += smb->rx_crcerrs;
1582 1.1 jmcneill stat->rx_lenerrs += smb->rx_lenerrs;
1583 1.1 jmcneill stat->rx_bytes += smb->rx_bytes;
1584 1.1 jmcneill stat->rx_runts += smb->rx_runts;
1585 1.1 jmcneill stat->rx_fragments += smb->rx_fragments;
1586 1.1 jmcneill stat->rx_pkts_64 += smb->rx_pkts_64;
1587 1.1 jmcneill stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1588 1.1 jmcneill stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1589 1.1 jmcneill stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1590 1.1 jmcneill stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1591 1.1 jmcneill stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1592 1.1 jmcneill stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1593 1.1 jmcneill stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1594 1.1 jmcneill stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1595 1.1 jmcneill stat->rx_rrs_errs += smb->rx_rrs_errs;
1596 1.1 jmcneill stat->rx_alignerrs += smb->rx_alignerrs;
1597 1.1 jmcneill stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1598 1.1 jmcneill stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1599 1.1 jmcneill stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1600 1.1 jmcneill
1601 1.1 jmcneill /* Tx stats. */
1602 1.1 jmcneill stat->tx_frames += smb->tx_frames;
1603 1.1 jmcneill stat->tx_bcast_frames += smb->tx_bcast_frames;
1604 1.1 jmcneill stat->tx_mcast_frames += smb->tx_mcast_frames;
1605 1.1 jmcneill stat->tx_pause_frames += smb->tx_pause_frames;
1606 1.1 jmcneill stat->tx_excess_defer += smb->tx_excess_defer;
1607 1.1 jmcneill stat->tx_control_frames += smb->tx_control_frames;
1608 1.1 jmcneill stat->tx_deferred += smb->tx_deferred;
1609 1.1 jmcneill stat->tx_bytes += smb->tx_bytes;
1610 1.1 jmcneill stat->tx_pkts_64 += smb->tx_pkts_64;
1611 1.1 jmcneill stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1612 1.1 jmcneill stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1613 1.1 jmcneill stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1614 1.1 jmcneill stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1615 1.1 jmcneill stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1616 1.1 jmcneill stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1617 1.1 jmcneill stat->tx_single_colls += smb->tx_single_colls;
1618 1.1 jmcneill stat->tx_multi_colls += smb->tx_multi_colls;
1619 1.1 jmcneill stat->tx_late_colls += smb->tx_late_colls;
1620 1.1 jmcneill stat->tx_excess_colls += smb->tx_excess_colls;
1621 1.1 jmcneill stat->tx_abort += smb->tx_abort;
1622 1.1 jmcneill stat->tx_underrun += smb->tx_underrun;
1623 1.1 jmcneill stat->tx_desc_underrun += smb->tx_desc_underrun;
1624 1.1 jmcneill stat->tx_lenerrs += smb->tx_lenerrs;
1625 1.1 jmcneill stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1626 1.1 jmcneill stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1627 1.1 jmcneill stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1628 1.1 jmcneill
1629 1.1 jmcneill /* Update counters in ifnet. */
1630 1.1 jmcneill ifp->if_opackets += smb->tx_frames;
1631 1.1 jmcneill
1632 1.1 jmcneill ifp->if_collisions += smb->tx_single_colls +
1633 1.1 jmcneill smb->tx_multi_colls * 2 + smb->tx_late_colls +
1634 1.1 jmcneill smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
1635 1.1 jmcneill
1636 1.1 jmcneill /*
1637 1.1 jmcneill * XXX
1638 1.1 jmcneill * tx_pkts_truncated counter looks suspicious. It constantly
1639 1.1 jmcneill * increments with no sign of Tx errors. This may indicate
1640 1.1 jmcneill * the counter name is not correct one so I've removed the
1641 1.1 jmcneill * counter in output errors.
1642 1.1 jmcneill */
1643 1.1 jmcneill ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
1644 1.1 jmcneill smb->tx_underrun;
1645 1.1 jmcneill
1646 1.1 jmcneill ifp->if_ipackets += smb->rx_frames;
1647 1.1 jmcneill
1648 1.1 jmcneill ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1649 1.1 jmcneill smb->rx_runts + smb->rx_pkts_truncated +
1650 1.1 jmcneill smb->rx_fifo_oflows + smb->rx_rrs_errs +
1651 1.1 jmcneill smb->rx_alignerrs;
1652 1.1 jmcneill
1653 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1654 1.1 jmcneill /* Update done, clear. */
1655 1.1 jmcneill smb->updated = 0;
1656 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1657 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1658 1.1 jmcneill }
1659 1.1 jmcneill }
1660 1.1 jmcneill
1661 1.1 jmcneill static int
1662 1.1 jmcneill alc_intr(void *arg)
1663 1.1 jmcneill {
1664 1.1 jmcneill struct alc_softc *sc = arg;
1665 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1666 1.1 jmcneill uint32_t status;
1667 1.1 jmcneill
1668 1.1 jmcneill status = CSR_READ_4(sc, ALC_INTR_STATUS);
1669 1.1 jmcneill if ((status & ALC_INTRS) == 0)
1670 1.1 jmcneill return (0);
1671 1.1 jmcneill
1672 1.1 jmcneill /* Acknowledge and disable interrupts. */
1673 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
1674 1.1 jmcneill
1675 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING) {
1676 1.1 jmcneill if (status & INTR_RX_PKT) {
1677 1.1 jmcneill int error;
1678 1.1 jmcneill
1679 1.1 jmcneill error = alc_rxintr(sc);
1680 1.1 jmcneill if (error) {
1681 1.1 jmcneill alc_init(ifp);
1682 1.1 jmcneill return (0);
1683 1.1 jmcneill }
1684 1.1 jmcneill }
1685 1.1 jmcneill
1686 1.1 jmcneill if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
1687 1.1 jmcneill INTR_TXQ_TO_RST)) {
1688 1.1 jmcneill if (status & INTR_DMA_RD_TO_RST)
1689 1.1 jmcneill printf("%s: DMA read error! -- resetting\n",
1690 1.1 jmcneill device_xname(sc->sc_dev));
1691 1.1 jmcneill if (status & INTR_DMA_WR_TO_RST)
1692 1.1 jmcneill printf("%s: DMA write error! -- resetting\n",
1693 1.1 jmcneill device_xname(sc->sc_dev));
1694 1.1 jmcneill if (status & INTR_TXQ_TO_RST)
1695 1.1 jmcneill printf("%s: TxQ reset! -- resetting\n",
1696 1.1 jmcneill device_xname(sc->sc_dev));
1697 1.1 jmcneill alc_init(ifp);
1698 1.1 jmcneill return (0);
1699 1.1 jmcneill }
1700 1.1 jmcneill
1701 1.1 jmcneill alc_txeof(sc);
1702 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1703 1.1 jmcneill alc_start(ifp);
1704 1.1 jmcneill }
1705 1.1 jmcneill
1706 1.1 jmcneill /* Re-enable interrupts. */
1707 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
1708 1.1 jmcneill return (1);
1709 1.1 jmcneill }
1710 1.1 jmcneill
1711 1.1 jmcneill static void
1712 1.1 jmcneill alc_txeof(struct alc_softc *sc)
1713 1.1 jmcneill {
1714 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1715 1.1 jmcneill struct alc_txdesc *txd;
1716 1.1 jmcneill uint32_t cons, prod;
1717 1.1 jmcneill int prog;
1718 1.1 jmcneill
1719 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1720 1.1 jmcneill return;
1721 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1722 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1723 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1724 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
1725 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1726 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize,
1727 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1728 1.1 jmcneill prod = sc->alc_rdata.alc_cmb->cons;
1729 1.1 jmcneill } else
1730 1.1 jmcneill prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
1731 1.1 jmcneill /* Assume we're using normal Tx priority queue. */
1732 1.1 jmcneill prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
1733 1.1 jmcneill MBOX_TD_CONS_LO_IDX_SHIFT;
1734 1.1 jmcneill cons = sc->alc_cdata.alc_tx_cons;
1735 1.1 jmcneill /*
1736 1.1 jmcneill * Go through our Tx list and free mbufs for those
1737 1.1 jmcneill * frames which have been transmitted.
1738 1.1 jmcneill */
1739 1.1 jmcneill for (prog = 0; cons != prod; prog++,
1740 1.1 jmcneill ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
1741 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt <= 0)
1742 1.1 jmcneill break;
1743 1.1 jmcneill prog++;
1744 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
1745 1.1 jmcneill sc->alc_cdata.alc_tx_cnt--;
1746 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[cons];
1747 1.1 jmcneill if (txd->tx_m != NULL) {
1748 1.1 jmcneill /* Reclaim transmitted mbufs. */
1749 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1750 1.1 jmcneill m_freem(txd->tx_m);
1751 1.1 jmcneill txd->tx_m = NULL;
1752 1.1 jmcneill }
1753 1.1 jmcneill }
1754 1.1 jmcneill
1755 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1756 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1757 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1758 1.1 jmcneill sc->alc_cdata.alc_tx_cons = cons;
1759 1.1 jmcneill /*
1760 1.1 jmcneill * Unarm watchdog timer only when there is no pending
1761 1.1 jmcneill * frames in Tx queue.
1762 1.1 jmcneill */
1763 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1764 1.1 jmcneill ifp->if_timer = 0;
1765 1.1 jmcneill }
1766 1.1 jmcneill
1767 1.1 jmcneill static int
1768 1.1 jmcneill alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, int init)
1769 1.1 jmcneill {
1770 1.1 jmcneill struct mbuf *m;
1771 1.1 jmcneill bus_dmamap_t map;
1772 1.1 jmcneill int error;
1773 1.1 jmcneill
1774 1.1 jmcneill MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1775 1.1 jmcneill if (m == NULL)
1776 1.1 jmcneill return (ENOBUFS);
1777 1.1 jmcneill MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
1778 1.1 jmcneill if (!(m->m_flags & M_EXT)) {
1779 1.1 jmcneill m_freem(m);
1780 1.1 jmcneill return (ENOBUFS);
1781 1.1 jmcneill }
1782 1.1 jmcneill
1783 1.1 jmcneill m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
1784 1.1 jmcneill
1785 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat,
1786 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
1787 1.1 jmcneill
1788 1.1 jmcneill if (error != 0) {
1789 1.1 jmcneill if (!error) {
1790 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat,
1791 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap);
1792 1.1 jmcneill error = EFBIG;
1793 1.1 jmcneill printf("%s: too many segments?!\n",
1794 1.1 jmcneill device_xname(sc->sc_dev));
1795 1.1 jmcneill }
1796 1.1 jmcneill m_freem(m);
1797 1.1 jmcneill
1798 1.1 jmcneill if (init)
1799 1.1 jmcneill printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
1800 1.1 jmcneill
1801 1.1 jmcneill return (error);
1802 1.1 jmcneill }
1803 1.1 jmcneill
1804 1.1 jmcneill if (rxd->rx_m != NULL) {
1805 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1806 1.1 jmcneill rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1807 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1808 1.1 jmcneill }
1809 1.1 jmcneill map = rxd->rx_dmamap;
1810 1.1 jmcneill rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
1811 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = map;
1812 1.1 jmcneill rxd->rx_m = m;
1813 1.1 jmcneill rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
1814 1.1 jmcneill return (0);
1815 1.1 jmcneill }
1816 1.1 jmcneill
1817 1.1 jmcneill static int
1818 1.1 jmcneill alc_rxintr(struct alc_softc *sc)
1819 1.1 jmcneill {
1820 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1821 1.1 jmcneill struct rx_rdesc *rrd;
1822 1.1 jmcneill uint32_t nsegs, status;
1823 1.1 jmcneill int rr_cons, prog;
1824 1.1 jmcneill
1825 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1826 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1827 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1828 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1829 1.1 jmcneill rr_cons = sc->alc_cdata.alc_rr_cons;
1830 1.1 jmcneill for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
1831 1.1 jmcneill rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
1832 1.1 jmcneill status = le32toh(rrd->status);
1833 1.1 jmcneill if ((status & RRD_VALID) == 0)
1834 1.1 jmcneill break;
1835 1.1 jmcneill nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
1836 1.1 jmcneill if (nsegs == 0) {
1837 1.1 jmcneill /* This should not happen! */
1838 1.1 jmcneill if (alcdebug)
1839 1.1 jmcneill printf("%s: unexpected segment count -- "
1840 1.1 jmcneill "resetting\n", device_xname(sc->sc_dev));
1841 1.1 jmcneill return (EIO);
1842 1.1 jmcneill }
1843 1.1 jmcneill alc_rxeof(sc, rrd);
1844 1.1 jmcneill /* Clear Rx return status. */
1845 1.1 jmcneill rrd->status = 0;
1846 1.1 jmcneill ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
1847 1.1 jmcneill sc->alc_cdata.alc_rx_cons += nsegs;
1848 1.1 jmcneill sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
1849 1.1 jmcneill prog += nsegs;
1850 1.1 jmcneill }
1851 1.1 jmcneill
1852 1.1 jmcneill if (prog > 0) {
1853 1.1 jmcneill /* Update the consumer index. */
1854 1.1 jmcneill sc->alc_cdata.alc_rr_cons = rr_cons;
1855 1.1 jmcneill /* Sync Rx return descriptors. */
1856 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1857 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1858 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1859 1.1 jmcneill /*
1860 1.1 jmcneill * Sync updated Rx descriptors such that controller see
1861 1.1 jmcneill * modified buffer addresses.
1862 1.1 jmcneill */
1863 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1864 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1865 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1866 1.1 jmcneill /*
1867 1.1 jmcneill * Let controller know availability of new Rx buffers.
1868 1.1 jmcneill * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
1869 1.1 jmcneill * it may be possible to update ALC_MBOX_RD0_PROD_IDX
1870 1.1 jmcneill * only when Rx buffer pre-fetching is required. In
1871 1.1 jmcneill * addition we already set ALC_RX_RD_FREE_THRESH to
1872 1.1 jmcneill * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
1873 1.1 jmcneill * it still seems that pre-fetching needs more
1874 1.1 jmcneill * experimentation.
1875 1.1 jmcneill */
1876 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
1877 1.1 jmcneill sc->alc_cdata.alc_rx_cons);
1878 1.1 jmcneill }
1879 1.1 jmcneill
1880 1.1 jmcneill return (0);
1881 1.1 jmcneill }
1882 1.1 jmcneill
1883 1.1 jmcneill /* Receive a frame. */
1884 1.1 jmcneill static void
1885 1.1 jmcneill alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
1886 1.1 jmcneill {
1887 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1888 1.1 jmcneill struct alc_rxdesc *rxd;
1889 1.1 jmcneill struct mbuf *mp, *m;
1890 1.1 jmcneill uint32_t rdinfo, status;
1891 1.1 jmcneill int count, nsegs, rx_cons;
1892 1.1 jmcneill
1893 1.1 jmcneill status = le32toh(rrd->status);
1894 1.1 jmcneill rdinfo = le32toh(rrd->rdinfo);
1895 1.1 jmcneill rx_cons = RRD_RD_IDX(rdinfo);
1896 1.1 jmcneill nsegs = RRD_RD_CNT(rdinfo);
1897 1.1 jmcneill
1898 1.1 jmcneill sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
1899 1.1 jmcneill if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
1900 1.1 jmcneill /*
1901 1.1 jmcneill * We want to pass the following frames to upper
1902 1.1 jmcneill * layer regardless of error status of Rx return
1903 1.1 jmcneill * ring.
1904 1.1 jmcneill *
1905 1.1 jmcneill * o IP/TCP/UDP checksum is bad.
1906 1.1 jmcneill * o frame length and protocol specific length
1907 1.1 jmcneill * does not match.
1908 1.1 jmcneill *
1909 1.1 jmcneill * Force network stack compute checksum for
1910 1.1 jmcneill * errored frames.
1911 1.1 jmcneill */
1912 1.1 jmcneill status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
1913 1.2 jmcneill if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
1914 1.2 jmcneill RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
1915 1.1 jmcneill return;
1916 1.1 jmcneill }
1917 1.1 jmcneill
1918 1.1 jmcneill for (count = 0; count < nsegs; count++,
1919 1.1 jmcneill ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
1920 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
1921 1.1 jmcneill mp = rxd->rx_m;
1922 1.1 jmcneill /* Add a new receive buffer to the ring. */
1923 1.1 jmcneill if (alc_newbuf(sc, rxd, 0) != 0) {
1924 1.1 jmcneill ifp->if_iqdrops++;
1925 1.1 jmcneill /* Reuse Rx buffers. */
1926 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
1927 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
1928 1.1 jmcneill break;
1929 1.1 jmcneill }
1930 1.1 jmcneill
1931 1.1 jmcneill /*
1932 1.1 jmcneill * Assume we've received a full sized frame.
1933 1.1 jmcneill * Actual size is fixed when we encounter the end of
1934 1.1 jmcneill * multi-segmented frame.
1935 1.1 jmcneill */
1936 1.1 jmcneill mp->m_len = sc->alc_buf_size;
1937 1.1 jmcneill
1938 1.1 jmcneill /* Chain received mbufs. */
1939 1.1 jmcneill if (sc->alc_cdata.alc_rxhead == NULL) {
1940 1.1 jmcneill sc->alc_cdata.alc_rxhead = mp;
1941 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1942 1.1 jmcneill } else {
1943 1.1 jmcneill mp->m_flags &= ~M_PKTHDR;
1944 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail =
1945 1.1 jmcneill sc->alc_cdata.alc_rxtail;
1946 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = mp;
1947 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1948 1.1 jmcneill }
1949 1.1 jmcneill
1950 1.1 jmcneill if (count == nsegs - 1) {
1951 1.1 jmcneill /* Last desc. for this frame. */
1952 1.1 jmcneill m = sc->alc_cdata.alc_rxhead;
1953 1.1 jmcneill m->m_flags |= M_PKTHDR;
1954 1.1 jmcneill /*
1955 1.1 jmcneill * It seems that L1C/L2C controller has no way
1956 1.1 jmcneill * to tell hardware to strip CRC bytes.
1957 1.1 jmcneill */
1958 1.1 jmcneill m->m_pkthdr.len =
1959 1.1 jmcneill sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
1960 1.1 jmcneill if (nsegs > 1) {
1961 1.1 jmcneill /* Set last mbuf size. */
1962 1.1 jmcneill mp->m_len = sc->alc_cdata.alc_rxlen -
1963 1.1 jmcneill (nsegs - 1) * sc->alc_buf_size;
1964 1.1 jmcneill /* Remove the CRC bytes in chained mbufs. */
1965 1.1 jmcneill if (mp->m_len <= ETHER_CRC_LEN) {
1966 1.1 jmcneill sc->alc_cdata.alc_rxtail =
1967 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail;
1968 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_len -=
1969 1.1 jmcneill (ETHER_CRC_LEN - mp->m_len);
1970 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = NULL;
1971 1.1 jmcneill m_freem(mp);
1972 1.1 jmcneill } else {
1973 1.1 jmcneill mp->m_len -= ETHER_CRC_LEN;
1974 1.1 jmcneill }
1975 1.1 jmcneill } else
1976 1.1 jmcneill m->m_len = m->m_pkthdr.len;
1977 1.1 jmcneill m->m_pkthdr.rcvif = ifp;
1978 1.1 jmcneill #if NVLAN > 0
1979 1.1 jmcneill /*
1980 1.1 jmcneill * Due to hardware bugs, Rx checksum offloading
1981 1.1 jmcneill * was intentionally disabled.
1982 1.1 jmcneill */
1983 1.1 jmcneill if (status & RRD_VLAN_TAG) {
1984 1.1 jmcneill u_int32_t vtag = RRD_VLAN(le32toh(rrd->vtag));
1985 1.1 jmcneill VLAN_INPUT_TAG(ifp, m, ntohs(vtag), );
1986 1.1 jmcneill }
1987 1.1 jmcneill #endif
1988 1.1 jmcneill
1989 1.1 jmcneill bpf_mtap(ifp, m);
1990 1.1 jmcneill
1991 1.1 jmcneill {
1992 1.1 jmcneill /* Pass it on. */
1993 1.1 jmcneill ether_input(ifp, m);
1994 1.1 jmcneill }
1995 1.1 jmcneill }
1996 1.1 jmcneill }
1997 1.1 jmcneill /* Reset mbuf chains. */
1998 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
1999 1.1 jmcneill }
2000 1.1 jmcneill
2001 1.1 jmcneill static void
2002 1.1 jmcneill alc_tick(void *xsc)
2003 1.1 jmcneill {
2004 1.1 jmcneill struct alc_softc *sc = xsc;
2005 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
2006 1.1 jmcneill int s;
2007 1.1 jmcneill
2008 1.1 jmcneill s = splnet();
2009 1.1 jmcneill mii_tick(mii);
2010 1.1 jmcneill alc_stats_update(sc);
2011 1.1 jmcneill splx(s);
2012 1.1 jmcneill
2013 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2014 1.1 jmcneill }
2015 1.1 jmcneill
2016 1.1 jmcneill static void
2017 1.1 jmcneill alc_reset(struct alc_softc *sc)
2018 1.1 jmcneill {
2019 1.1 jmcneill uint32_t reg;
2020 1.1 jmcneill int i;
2021 1.1 jmcneill
2022 1.2 jmcneill reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
2023 1.2 jmcneill reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2024 1.2 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2025 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2026 1.1 jmcneill DELAY(10);
2027 1.1 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2028 1.1 jmcneill break;
2029 1.1 jmcneill }
2030 1.1 jmcneill if (i == 0)
2031 1.1 jmcneill printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
2032 1.1 jmcneill
2033 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2034 1.1 jmcneill if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2035 1.1 jmcneill break;
2036 1.1 jmcneill DELAY(10);
2037 1.1 jmcneill }
2038 1.1 jmcneill
2039 1.1 jmcneill if (i == 0)
2040 1.1 jmcneill printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
2041 1.1 jmcneill reg);
2042 1.1 jmcneill }
2043 1.1 jmcneill
2044 1.1 jmcneill static int
2045 1.1 jmcneill alc_init(struct ifnet *ifp)
2046 1.1 jmcneill {
2047 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2048 1.1 jmcneill struct mii_data *mii;
2049 1.1 jmcneill uint8_t eaddr[ETHER_ADDR_LEN];
2050 1.1 jmcneill bus_addr_t paddr;
2051 1.1 jmcneill uint32_t reg, rxf_hi, rxf_lo;
2052 1.1 jmcneill int error;
2053 1.1 jmcneill
2054 1.1 jmcneill /*
2055 1.1 jmcneill * Cancel any pending I/O.
2056 1.1 jmcneill */
2057 1.1 jmcneill alc_stop(ifp, 0);
2058 1.1 jmcneill /*
2059 1.1 jmcneill * Reset the chip to a known state.
2060 1.1 jmcneill */
2061 1.1 jmcneill alc_reset(sc);
2062 1.1 jmcneill
2063 1.1 jmcneill /* Initialize Rx descriptors. */
2064 1.1 jmcneill error = alc_init_rx_ring(sc);
2065 1.1 jmcneill if (error != 0) {
2066 1.1 jmcneill printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
2067 1.1 jmcneill alc_stop(ifp, 0);
2068 1.1 jmcneill return (error);
2069 1.1 jmcneill }
2070 1.1 jmcneill alc_init_rr_ring(sc);
2071 1.1 jmcneill alc_init_tx_ring(sc);
2072 1.1 jmcneill alc_init_cmb(sc);
2073 1.1 jmcneill alc_init_smb(sc);
2074 1.1 jmcneill
2075 1.2 jmcneill /* Enable all clocks. */
2076 1.2 jmcneill CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2077 1.2 jmcneill
2078 1.1 jmcneill /* Reprogram the station address. */
2079 1.1 jmcneill memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
2080 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR0,
2081 1.1 jmcneill eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2082 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2083 1.1 jmcneill /*
2084 1.1 jmcneill * Clear WOL status and disable all WOL feature as WOL
2085 1.1 jmcneill * would interfere Rx operation under normal environments.
2086 1.1 jmcneill */
2087 1.1 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
2088 1.1 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2089 1.1 jmcneill /* Set Tx descriptor base addresses. */
2090 1.1 jmcneill paddr = sc->alc_rdata.alc_tx_ring_paddr;
2091 1.1 jmcneill CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2092 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2093 1.1 jmcneill /* We don't use high priority ring. */
2094 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2095 1.1 jmcneill /* Set Tx descriptor counter. */
2096 1.1 jmcneill CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2097 1.1 jmcneill (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2098 1.1 jmcneill /* Set Rx descriptor base addresses. */
2099 1.1 jmcneill paddr = sc->alc_rdata.alc_rx_ring_paddr;
2100 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2101 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2102 1.1 jmcneill /* We use one Rx ring. */
2103 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2104 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2105 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2106 1.1 jmcneill /* Set Rx descriptor counter. */
2107 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2108 1.1 jmcneill (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2109 1.1 jmcneill
2110 1.1 jmcneill /*
2111 1.1 jmcneill * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2112 1.1 jmcneill * if it do not fit the buffer size. Rx return descriptor holds
2113 1.1 jmcneill * a counter that indicates how many fragments were made by the
2114 1.1 jmcneill * hardware. The buffer size should be multiple of 8 bytes.
2115 1.1 jmcneill * Since hardware has limit on the size of buffer size, always
2116 1.1 jmcneill * use the maximum value.
2117 1.1 jmcneill * For strict-alignment architectures make sure to reduce buffer
2118 1.1 jmcneill * size by 8 bytes to make room for alignment fixup.
2119 1.1 jmcneill */
2120 1.1 jmcneill sc->alc_buf_size = RX_BUF_SIZE_MAX;
2121 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2122 1.1 jmcneill
2123 1.1 jmcneill paddr = sc->alc_rdata.alc_rr_ring_paddr;
2124 1.1 jmcneill /* Set Rx return descriptor base addresses. */
2125 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2126 1.1 jmcneill /* We use one Rx return ring. */
2127 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2128 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2129 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2130 1.1 jmcneill /* Set Rx return descriptor counter. */
2131 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2132 1.1 jmcneill (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2133 1.1 jmcneill paddr = sc->alc_rdata.alc_cmb_paddr;
2134 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2135 1.1 jmcneill paddr = sc->alc_rdata.alc_smb_paddr;
2136 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2137 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2138 1.1 jmcneill
2139 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
2140 1.2 jmcneill /* Reconfigure SRAM - Vendor magic. */
2141 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2142 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2143 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2144 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2145 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2146 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2147 1.2 jmcneill CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2148 1.2 jmcneill CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2149 1.2 jmcneill }
2150 1.2 jmcneill
2151 1.1 jmcneill /* Tell hardware that we're ready to load DMA blocks. */
2152 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2153 1.1 jmcneill
2154 1.1 jmcneill /* Configure interrupt moderation timer. */
2155 1.1 jmcneill sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2156 1.1 jmcneill sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2157 1.1 jmcneill reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2158 1.1 jmcneill reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2159 1.1 jmcneill CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2160 1.1 jmcneill /*
2161 1.1 jmcneill * We don't want to automatic interrupt clear as task queue
2162 1.1 jmcneill * for the interrupt should know interrupt status.
2163 1.1 jmcneill */
2164 1.2 jmcneill reg = MASTER_SA_TIMER_ENB;
2165 1.1 jmcneill if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2166 1.1 jmcneill reg |= MASTER_IM_RX_TIMER_ENB;
2167 1.1 jmcneill if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2168 1.1 jmcneill reg |= MASTER_IM_TX_TIMER_ENB;
2169 1.1 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2170 1.1 jmcneill /*
2171 1.1 jmcneill * Disable interrupt re-trigger timer. We don't want automatic
2172 1.1 jmcneill * re-triggering of un-ACKed interrupts.
2173 1.1 jmcneill */
2174 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2175 1.1 jmcneill /* Configure CMB. */
2176 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2177 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2178 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2179 1.1 jmcneill else
2180 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2181 1.1 jmcneill /*
2182 1.1 jmcneill * Hardware can be configured to issue SMB interrupt based
2183 1.1 jmcneill * on programmed interval. Since there is a callout that is
2184 1.1 jmcneill * invoked for every hz in driver we use that instead of
2185 1.1 jmcneill * relying on periodic SMB interrupt.
2186 1.1 jmcneill */
2187 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2188 1.1 jmcneill /* Clear MAC statistics. */
2189 1.1 jmcneill alc_stats_clear(sc);
2190 1.1 jmcneill
2191 1.1 jmcneill /*
2192 1.1 jmcneill * Always use maximum frame size that controller can support.
2193 1.1 jmcneill * Otherwise received frames that has larger frame length
2194 1.1 jmcneill * than alc(4) MTU would be silently dropped in hardware. This
2195 1.1 jmcneill * would make path-MTU discovery hard as sender wouldn't get
2196 1.1 jmcneill * any responses from receiver. alc(4) supports
2197 1.1 jmcneill * multi-fragmented frames on Rx path so it has no issue on
2198 1.1 jmcneill * assembling fragmented frames. Using maximum frame size also
2199 1.1 jmcneill * removes the need to reinitialize hardware when interface
2200 1.1 jmcneill * MTU configuration was changed.
2201 1.1 jmcneill *
2202 1.1 jmcneill * Be conservative in what you do, be liberal in what you
2203 1.1 jmcneill * accept from others - RFC 793.
2204 1.1 jmcneill */
2205 1.2 jmcneill CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
2206 1.1 jmcneill
2207 1.1 jmcneill /* Disable header split(?) */
2208 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2209 1.1 jmcneill
2210 1.1 jmcneill /* Configure IPG/IFG parameters. */
2211 1.1 jmcneill CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2212 1.1 jmcneill ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2213 1.1 jmcneill ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2214 1.1 jmcneill ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2215 1.1 jmcneill ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2216 1.1 jmcneill /* Set parameters for half-duplex media. */
2217 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDPX_CFG,
2218 1.1 jmcneill ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2219 1.1 jmcneill HDPX_CFG_LCOL_MASK) |
2220 1.1 jmcneill ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2221 1.1 jmcneill HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2222 1.1 jmcneill ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2223 1.1 jmcneill HDPX_CFG_ABEBT_MASK) |
2224 1.1 jmcneill ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2225 1.1 jmcneill HDPX_CFG_JAMIPG_MASK));
2226 1.1 jmcneill /*
2227 1.1 jmcneill * Set TSO/checksum offload threshold. For frames that is
2228 1.1 jmcneill * larger than this threshold, hardware wouldn't do
2229 1.1 jmcneill * TSO/checksum offloading.
2230 1.1 jmcneill */
2231 1.1 jmcneill CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
2232 1.2 jmcneill (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2233 1.1 jmcneill TSO_OFFLOAD_THRESH_MASK);
2234 1.1 jmcneill /* Configure TxQ. */
2235 1.1 jmcneill reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2236 1.1 jmcneill TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2237 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2238 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2239 1.2 jmcneill reg >>= 1;
2240 1.1 jmcneill reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2241 1.1 jmcneill TXQ_CFG_TD_BURST_MASK;
2242 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2243 1.1 jmcneill
2244 1.1 jmcneill /* Configure Rx free descriptor pre-fetching. */
2245 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2246 1.1 jmcneill ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
2247 1.1 jmcneill RX_RD_FREE_THRESH_HI_MASK) |
2248 1.1 jmcneill ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
2249 1.1 jmcneill RX_RD_FREE_THRESH_LO_MASK));
2250 1.1 jmcneill
2251 1.1 jmcneill /*
2252 1.1 jmcneill * Configure flow control parameters.
2253 1.1 jmcneill * XON : 80% of Rx FIFO
2254 1.1 jmcneill * XOFF : 30% of Rx FIFO
2255 1.1 jmcneill */
2256 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
2257 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) {
2258 1.2 jmcneill reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2259 1.2 jmcneill rxf_hi = (reg * 8) / 10;
2260 1.2 jmcneill rxf_lo = (reg * 3) / 10;
2261 1.2 jmcneill CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2262 1.2 jmcneill ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2263 1.2 jmcneill RX_FIFO_PAUSE_THRESH_LO_MASK) |
2264 1.2 jmcneill ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2265 1.2 jmcneill RX_FIFO_PAUSE_THRESH_HI_MASK));
2266 1.2 jmcneill }
2267 1.2 jmcneill
2268 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2269 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2)
2270 1.2 jmcneill CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2271 1.2 jmcneill CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
2272 1.2 jmcneill SERDES_PHY_CLK_SLOWDOWN);
2273 1.1 jmcneill
2274 1.1 jmcneill /* Disable RSS until I understand L1C/L2C's RSS logic. */
2275 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2276 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2277 1.1 jmcneill
2278 1.1 jmcneill /* Configure RxQ. */
2279 1.1 jmcneill reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2280 1.1 jmcneill RXQ_CFG_RD_BURST_MASK;
2281 1.1 jmcneill reg |= RXQ_CFG_RSS_MODE_DIS;
2282 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
2283 1.2 jmcneill reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
2284 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2285 1.1 jmcneill
2286 1.1 jmcneill /* Configure DMA parameters. */
2287 1.1 jmcneill reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2288 1.1 jmcneill reg |= sc->alc_rcb;
2289 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2290 1.1 jmcneill reg |= DMA_CFG_CMB_ENB;
2291 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2292 1.1 jmcneill reg |= DMA_CFG_SMB_ENB;
2293 1.1 jmcneill else
2294 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2295 1.1 jmcneill reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2296 1.1 jmcneill DMA_CFG_RD_BURST_SHIFT;
2297 1.1 jmcneill reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2298 1.1 jmcneill DMA_CFG_WR_BURST_SHIFT;
2299 1.1 jmcneill reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2300 1.1 jmcneill DMA_CFG_RD_DELAY_CNT_MASK;
2301 1.1 jmcneill reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2302 1.1 jmcneill DMA_CFG_WR_DELAY_CNT_MASK;
2303 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2304 1.1 jmcneill
2305 1.1 jmcneill /*
2306 1.1 jmcneill * Configure Tx/Rx MACs.
2307 1.1 jmcneill * - Auto-padding for short frames.
2308 1.1 jmcneill * - Enable CRC generation.
2309 1.1 jmcneill * Actual reconfiguration of MAC for resolved speed/duplex
2310 1.1 jmcneill * is followed after detection of link establishment.
2311 1.2 jmcneill * AR813x/AR815x always does checksum computation regardless
2312 1.1 jmcneill * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
2313 1.1 jmcneill * have bug in protocol field in Rx return structure so
2314 1.1 jmcneill * these controllers can't handle fragmented frames. Disable
2315 1.1 jmcneill * Rx checksum offloading until there is a newer controller
2316 1.1 jmcneill * that has sane implementation.
2317 1.1 jmcneill */
2318 1.1 jmcneill reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2319 1.1 jmcneill ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2320 1.1 jmcneill MAC_CFG_PREAMBLE_MASK);
2321 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
2322 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
2323 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2324 1.2 jmcneill reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2325 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
2326 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
2327 1.1 jmcneill else
2328 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
2329 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2330 1.1 jmcneill
2331 1.1 jmcneill /* Set up the receive filter. */
2332 1.1 jmcneill alc_iff(sc);
2333 1.1 jmcneill alc_rxvlan(sc);
2334 1.1 jmcneill
2335 1.1 jmcneill /* Acknowledge all pending interrupts and clear it. */
2336 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
2337 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2338 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
2339 1.1 jmcneill
2340 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2341 1.1 jmcneill /* Switch to the current media. */
2342 1.1 jmcneill mii = &sc->sc_miibus;
2343 1.1 jmcneill mii_mediachg(mii);
2344 1.1 jmcneill
2345 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2346 1.1 jmcneill
2347 1.1 jmcneill ifp->if_flags |= IFF_RUNNING;
2348 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
2349 1.1 jmcneill
2350 1.1 jmcneill return (0);
2351 1.1 jmcneill }
2352 1.1 jmcneill
2353 1.1 jmcneill static void
2354 1.1 jmcneill alc_stop(struct ifnet *ifp, int disable)
2355 1.1 jmcneill {
2356 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2357 1.1 jmcneill struct alc_txdesc *txd;
2358 1.1 jmcneill struct alc_rxdesc *rxd;
2359 1.1 jmcneill uint32_t reg;
2360 1.1 jmcneill int i;
2361 1.1 jmcneill
2362 1.1 jmcneill callout_stop(&sc->sc_tick_ch);
2363 1.1 jmcneill
2364 1.1 jmcneill /*
2365 1.1 jmcneill * Mark the interface down and cancel the watchdog timer.
2366 1.1 jmcneill */
2367 1.1 jmcneill ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2368 1.1 jmcneill ifp->if_timer = 0;
2369 1.1 jmcneill
2370 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2371 1.1 jmcneill
2372 1.1 jmcneill alc_stats_update(sc);
2373 1.1 jmcneill
2374 1.1 jmcneill mii_down(&sc->sc_miibus);
2375 1.1 jmcneill
2376 1.1 jmcneill /* Disable interrupts. */
2377 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
2378 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2379 1.1 jmcneill alc_stop_queue(sc);
2380 1.1 jmcneill
2381 1.1 jmcneill /* Disable DMA. */
2382 1.1 jmcneill reg = CSR_READ_4(sc, ALC_DMA_CFG);
2383 1.1 jmcneill reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
2384 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2385 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2386 1.1 jmcneill DELAY(1000);
2387 1.1 jmcneill
2388 1.1 jmcneill /* Stop Rx/Tx MACs. */
2389 1.1 jmcneill alc_stop_mac(sc);
2390 1.1 jmcneill
2391 1.1 jmcneill /* Disable interrupts which might be touched in taskq handler. */
2392 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2393 1.1 jmcneill
2394 1.1 jmcneill /* Reclaim Rx buffers that have been processed. */
2395 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
2396 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
2397 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2398 1.1 jmcneill /*
2399 1.1 jmcneill * Free Tx/Rx mbufs still in the queues.
2400 1.1 jmcneill */
2401 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2402 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2403 1.1 jmcneill if (rxd->rx_m != NULL) {
2404 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2405 1.1 jmcneill m_freem(rxd->rx_m);
2406 1.1 jmcneill rxd->rx_m = NULL;
2407 1.1 jmcneill }
2408 1.1 jmcneill }
2409 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2410 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2411 1.1 jmcneill if (txd->tx_m != NULL) {
2412 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2413 1.1 jmcneill m_freem(txd->tx_m);
2414 1.1 jmcneill txd->tx_m = NULL;
2415 1.1 jmcneill }
2416 1.1 jmcneill }
2417 1.1 jmcneill }
2418 1.1 jmcneill
2419 1.1 jmcneill static void
2420 1.1 jmcneill alc_stop_mac(struct alc_softc *sc)
2421 1.1 jmcneill {
2422 1.1 jmcneill uint32_t reg;
2423 1.1 jmcneill int i;
2424 1.1 jmcneill
2425 1.1 jmcneill /* Disable Rx/Tx MAC. */
2426 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2427 1.1 jmcneill if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2428 1.2 jmcneill reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2429 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2430 1.1 jmcneill }
2431 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2432 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2433 1.1 jmcneill if (reg == 0)
2434 1.1 jmcneill break;
2435 1.1 jmcneill DELAY(10);
2436 1.1 jmcneill }
2437 1.1 jmcneill if (i == 0)
2438 1.1 jmcneill printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
2439 1.1 jmcneill device_xname(sc->sc_dev), reg);
2440 1.1 jmcneill }
2441 1.1 jmcneill
2442 1.1 jmcneill static void
2443 1.1 jmcneill alc_start_queue(struct alc_softc *sc)
2444 1.1 jmcneill {
2445 1.1 jmcneill uint32_t qcfg[] = {
2446 1.1 jmcneill 0,
2447 1.1 jmcneill RXQ_CFG_QUEUE0_ENB,
2448 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
2449 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
2450 1.1 jmcneill RXQ_CFG_ENB
2451 1.1 jmcneill };
2452 1.1 jmcneill uint32_t cfg;
2453 1.1 jmcneill
2454 1.1 jmcneill /* Enable RxQ. */
2455 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
2456 1.1 jmcneill cfg &= ~RXQ_CFG_ENB;
2457 1.1 jmcneill cfg |= qcfg[1];
2458 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
2459 1.1 jmcneill /* Enable TxQ. */
2460 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
2461 1.1 jmcneill cfg |= TXQ_CFG_ENB;
2462 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
2463 1.1 jmcneill }
2464 1.1 jmcneill
2465 1.1 jmcneill static void
2466 1.1 jmcneill alc_stop_queue(struct alc_softc *sc)
2467 1.1 jmcneill {
2468 1.1 jmcneill uint32_t reg;
2469 1.1 jmcneill int i;
2470 1.1 jmcneill
2471 1.1 jmcneill /* Disable RxQ. */
2472 1.1 jmcneill reg = CSR_READ_4(sc, ALC_RXQ_CFG);
2473 1.1 jmcneill if ((reg & RXQ_CFG_ENB) != 0) {
2474 1.1 jmcneill reg &= ~RXQ_CFG_ENB;
2475 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2476 1.1 jmcneill }
2477 1.1 jmcneill /* Disable TxQ. */
2478 1.1 jmcneill reg = CSR_READ_4(sc, ALC_TXQ_CFG);
2479 1.2 jmcneill if ((reg & TXQ_CFG_ENB) != 0) {
2480 1.1 jmcneill reg &= ~TXQ_CFG_ENB;
2481 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
2482 1.1 jmcneill }
2483 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2484 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2485 1.1 jmcneill if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2486 1.1 jmcneill break;
2487 1.1 jmcneill DELAY(10);
2488 1.1 jmcneill }
2489 1.1 jmcneill if (i == 0)
2490 1.1 jmcneill printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
2491 1.1 jmcneill device_xname(sc->sc_dev), reg);
2492 1.1 jmcneill }
2493 1.1 jmcneill
2494 1.1 jmcneill static void
2495 1.1 jmcneill alc_init_tx_ring(struct alc_softc *sc)
2496 1.1 jmcneill {
2497 1.1 jmcneill struct alc_ring_data *rd;
2498 1.1 jmcneill struct alc_txdesc *txd;
2499 1.1 jmcneill int i;
2500 1.1 jmcneill
2501 1.1 jmcneill sc->alc_cdata.alc_tx_prod = 0;
2502 1.1 jmcneill sc->alc_cdata.alc_tx_cons = 0;
2503 1.1 jmcneill sc->alc_cdata.alc_tx_cnt = 0;
2504 1.1 jmcneill
2505 1.1 jmcneill rd = &sc->alc_rdata;
2506 1.1 jmcneill memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ);
2507 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2508 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2509 1.1 jmcneill txd->tx_m = NULL;
2510 1.1 jmcneill }
2511 1.1 jmcneill
2512 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2513 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2514 1.1 jmcneill }
2515 1.1 jmcneill
2516 1.1 jmcneill static int
2517 1.1 jmcneill alc_init_rx_ring(struct alc_softc *sc)
2518 1.1 jmcneill {
2519 1.1 jmcneill struct alc_ring_data *rd;
2520 1.1 jmcneill struct alc_rxdesc *rxd;
2521 1.1 jmcneill int i;
2522 1.1 jmcneill
2523 1.1 jmcneill sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
2524 1.1 jmcneill rd = &sc->alc_rdata;
2525 1.1 jmcneill memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ);
2526 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2527 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2528 1.1 jmcneill rxd->rx_m = NULL;
2529 1.1 jmcneill rxd->rx_desc = &rd->alc_rx_ring[i];
2530 1.1 jmcneill if (alc_newbuf(sc, rxd, 1) != 0)
2531 1.1 jmcneill return (ENOBUFS);
2532 1.1 jmcneill }
2533 1.1 jmcneill
2534 1.1 jmcneill /*
2535 1.1 jmcneill * Since controller does not update Rx descriptors, driver
2536 1.1 jmcneill * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
2537 1.1 jmcneill * is enough to ensure coherence.
2538 1.1 jmcneill */
2539 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2540 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2541 1.1 jmcneill /* Let controller know availability of new Rx buffers. */
2542 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
2543 1.1 jmcneill
2544 1.1 jmcneill return (0);
2545 1.1 jmcneill }
2546 1.1 jmcneill
2547 1.1 jmcneill static void
2548 1.1 jmcneill alc_init_rr_ring(struct alc_softc *sc)
2549 1.1 jmcneill {
2550 1.1 jmcneill struct alc_ring_data *rd;
2551 1.1 jmcneill
2552 1.1 jmcneill sc->alc_cdata.alc_rr_cons = 0;
2553 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2554 1.1 jmcneill
2555 1.1 jmcneill rd = &sc->alc_rdata;
2556 1.1 jmcneill memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ);
2557 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2558 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2559 1.1 jmcneill }
2560 1.1 jmcneill
2561 1.1 jmcneill static void
2562 1.1 jmcneill alc_init_cmb(struct alc_softc *sc)
2563 1.1 jmcneill {
2564 1.1 jmcneill struct alc_ring_data *rd;
2565 1.1 jmcneill
2566 1.1 jmcneill rd = &sc->alc_rdata;
2567 1.1 jmcneill memset(rd->alc_cmb, 0, ALC_CMB_SZ);
2568 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2569 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2570 1.1 jmcneill }
2571 1.1 jmcneill
2572 1.1 jmcneill static void
2573 1.1 jmcneill alc_init_smb(struct alc_softc *sc)
2574 1.1 jmcneill {
2575 1.1 jmcneill struct alc_ring_data *rd;
2576 1.1 jmcneill
2577 1.1 jmcneill rd = &sc->alc_rdata;
2578 1.1 jmcneill memset(rd->alc_smb, 0, ALC_SMB_SZ);
2579 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2580 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2581 1.1 jmcneill }
2582 1.1 jmcneill
2583 1.1 jmcneill static void
2584 1.1 jmcneill alc_rxvlan(struct alc_softc *sc)
2585 1.1 jmcneill {
2586 1.1 jmcneill uint32_t reg;
2587 1.1 jmcneill
2588 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2589 1.3 sborrill if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2590 1.1 jmcneill reg |= MAC_CFG_VLAN_TAG_STRIP;
2591 1.1 jmcneill else
2592 1.1 jmcneill reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2593 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2594 1.1 jmcneill }
2595 1.1 jmcneill
2596 1.1 jmcneill static void
2597 1.1 jmcneill alc_iff(struct alc_softc *sc)
2598 1.1 jmcneill {
2599 1.1 jmcneill struct ethercom *ec = &sc->sc_ec;
2600 1.1 jmcneill struct ifnet *ifp = &ec->ec_if;
2601 1.1 jmcneill struct ether_multi *enm;
2602 1.1 jmcneill struct ether_multistep step;
2603 1.1 jmcneill uint32_t crc;
2604 1.1 jmcneill uint32_t mchash[2];
2605 1.1 jmcneill uint32_t rxcfg;
2606 1.1 jmcneill
2607 1.1 jmcneill rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
2608 1.1 jmcneill rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2609 1.1 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
2610 1.1 jmcneill
2611 1.1 jmcneill /*
2612 1.1 jmcneill * Always accept broadcast frames.
2613 1.1 jmcneill */
2614 1.1 jmcneill rxcfg |= MAC_CFG_BCAST;
2615 1.1 jmcneill
2616 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2617 1.1 jmcneill ifp->if_flags |= IFF_ALLMULTI;
2618 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC)
2619 1.1 jmcneill rxcfg |= MAC_CFG_PROMISC;
2620 1.1 jmcneill else
2621 1.1 jmcneill rxcfg |= MAC_CFG_ALLMULTI;
2622 1.1 jmcneill mchash[0] = mchash[1] = 0xFFFFFFFF;
2623 1.1 jmcneill } else {
2624 1.1 jmcneill /* Program new filter. */
2625 1.1 jmcneill memset(mchash, 0, sizeof(mchash));
2626 1.1 jmcneill
2627 1.1 jmcneill ETHER_FIRST_MULTI(step, ec, enm);
2628 1.1 jmcneill while (enm != NULL) {
2629 1.1 jmcneill crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2630 1.1 jmcneill mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2631 1.1 jmcneill ETHER_NEXT_MULTI(step, enm);
2632 1.1 jmcneill }
2633 1.1 jmcneill }
2634 1.1 jmcneill
2635 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
2636 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
2637 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
2638 1.1 jmcneill }
2639 1.1 jmcneill
2640 1.5 jmcneill MODULE(MODULE_CLASS_DRIVER, if_alc, "pci");
2641 1.1 jmcneill
2642 1.1 jmcneill #ifdef _MODULE
2643 1.1 jmcneill #include "ioconf.c"
2644 1.1 jmcneill #endif
2645 1.1 jmcneill
2646 1.1 jmcneill static int
2647 1.1 jmcneill if_alc_modcmd(modcmd_t cmd, void *opaque)
2648 1.1 jmcneill {
2649 1.1 jmcneill int error = 0;
2650 1.1 jmcneill
2651 1.1 jmcneill switch (cmd) {
2652 1.1 jmcneill case MODULE_CMD_INIT:
2653 1.1 jmcneill #ifdef _MODULE
2654 1.1 jmcneill error = config_init_component(cfdriver_ioconf_if_alc,
2655 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2656 1.1 jmcneill #endif
2657 1.1 jmcneill return error;
2658 1.1 jmcneill case MODULE_CMD_FINI:
2659 1.1 jmcneill #ifdef _MODULE
2660 1.1 jmcneill error = config_fini_component(cfdriver_ioconf_if_alc,
2661 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2662 1.1 jmcneill #endif
2663 1.1 jmcneill return error;
2664 1.1 jmcneill default:
2665 1.1 jmcneill return ENOTTY;
2666 1.1 jmcneill }
2667 1.1 jmcneill }
2668