if_alc.c revision 1.10 1 1.1 jmcneill /* $OpenBSD: if_alc.c,v 1.1 2009/08/08 09:31:13 kevlo Exp $ */
2 1.1 jmcneill /*-
3 1.1 jmcneill * Copyright (c) 2009, Pyun YongHyeon <yongari (at) FreeBSD.org>
4 1.1 jmcneill * All rights reserved.
5 1.1 jmcneill *
6 1.1 jmcneill * Redistribution and use in source and binary forms, with or without
7 1.1 jmcneill * modification, are permitted provided that the following conditions
8 1.1 jmcneill * are met:
9 1.1 jmcneill * 1. Redistributions of source code must retain the above copyright
10 1.1 jmcneill * notice unmodified, this list of conditions, and the following
11 1.1 jmcneill * disclaimer.
12 1.1 jmcneill * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 jmcneill * notice, this list of conditions and the following disclaimer in the
14 1.1 jmcneill * documentation and/or other materials provided with the distribution.
15 1.1 jmcneill *
16 1.1 jmcneill * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 1.1 jmcneill * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 1.1 jmcneill * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 1.1 jmcneill * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 1.1 jmcneill * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 1.1 jmcneill * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 1.1 jmcneill * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 jmcneill * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 1.1 jmcneill * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 1.1 jmcneill * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 1.1 jmcneill * SUCH DAMAGE.
27 1.1 jmcneill */
28 1.1 jmcneill
29 1.2 jmcneill /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */
30 1.1 jmcneill
31 1.1 jmcneill #ifdef _KERNEL_OPT
32 1.1 jmcneill #include "vlan.h"
33 1.1 jmcneill #endif
34 1.1 jmcneill
35 1.1 jmcneill #include <sys/param.h>
36 1.1 jmcneill #include <sys/proc.h>
37 1.1 jmcneill #include <sys/endian.h>
38 1.1 jmcneill #include <sys/systm.h>
39 1.1 jmcneill #include <sys/types.h>
40 1.1 jmcneill #include <sys/sockio.h>
41 1.1 jmcneill #include <sys/mbuf.h>
42 1.1 jmcneill #include <sys/queue.h>
43 1.1 jmcneill #include <sys/kernel.h>
44 1.1 jmcneill #include <sys/device.h>
45 1.1 jmcneill #include <sys/callout.h>
46 1.1 jmcneill #include <sys/socket.h>
47 1.1 jmcneill #include <sys/module.h>
48 1.1 jmcneill
49 1.1 jmcneill #include <sys/bus.h>
50 1.1 jmcneill
51 1.1 jmcneill #include <net/if.h>
52 1.1 jmcneill #include <net/if_dl.h>
53 1.1 jmcneill #include <net/if_llc.h>
54 1.1 jmcneill #include <net/if_media.h>
55 1.1 jmcneill #include <net/if_ether.h>
56 1.1 jmcneill
57 1.1 jmcneill #include <net/bpf.h>
58 1.1 jmcneill
59 1.1 jmcneill #ifdef INET
60 1.1 jmcneill #include <netinet/in.h>
61 1.1 jmcneill #include <netinet/in_systm.h>
62 1.1 jmcneill #include <netinet/in_var.h>
63 1.1 jmcneill #include <netinet/ip.h>
64 1.1 jmcneill #endif
65 1.1 jmcneill
66 1.1 jmcneill #include <net/if_types.h>
67 1.1 jmcneill #include <net/if_vlanvar.h>
68 1.1 jmcneill
69 1.1 jmcneill #include <net/bpf.h>
70 1.1 jmcneill
71 1.1 jmcneill #include <sys/rnd.h>
72 1.1 jmcneill
73 1.1 jmcneill #include <dev/mii/mii.h>
74 1.1 jmcneill #include <dev/mii/miivar.h>
75 1.1 jmcneill
76 1.1 jmcneill #include <dev/pci/pcireg.h>
77 1.1 jmcneill #include <dev/pci/pcivar.h>
78 1.1 jmcneill #include <dev/pci/pcidevs.h>
79 1.1 jmcneill
80 1.1 jmcneill #include <dev/pci/if_alcreg.h>
81 1.1 jmcneill
82 1.2 jmcneill /*
83 1.2 jmcneill * Devices supported by this driver.
84 1.2 jmcneill */
85 1.2 jmcneill static struct alc_ident alc_ident_table[] = {
86 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8131, 9 * 1024,
87 1.2 jmcneill "Atheros AR8131 PCIe Gigabit Ethernet" },
88 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8132, 9 * 1024,
89 1.2 jmcneill "Atheros AR8132 PCIe Fast Ethernet" },
90 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151, 6 * 1024,
91 1.2 jmcneill "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" },
92 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8151_V2, 6 * 1024,
93 1.2 jmcneill "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" },
94 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B, 6 * 1024,
95 1.2 jmcneill "Atheros AR8152 v1.1 PCIe Fast Ethernet" },
96 1.2 jmcneill { PCI_VENDOR_ATTANSIC, PCI_PRODUCT_ATTANSIC_AR8152_B2, 6 * 1024,
97 1.2 jmcneill "Atheros AR8152 v2.0 PCIe Fast Ethernet" },
98 1.2 jmcneill { 0, 0, 0, NULL },
99 1.2 jmcneill };
100 1.2 jmcneill
101 1.1 jmcneill static int alc_match(device_t, cfdata_t, void *);
102 1.1 jmcneill static void alc_attach(device_t, device_t, void *);
103 1.1 jmcneill static int alc_detach(device_t, int);
104 1.1 jmcneill
105 1.1 jmcneill static int alc_init(struct ifnet *);
106 1.7 mrg static int alc_init_backend(struct ifnet *, bool);
107 1.1 jmcneill static void alc_start(struct ifnet *);
108 1.1 jmcneill static int alc_ioctl(struct ifnet *, u_long, void *);
109 1.1 jmcneill static void alc_watchdog(struct ifnet *);
110 1.1 jmcneill static int alc_mediachange(struct ifnet *);
111 1.1 jmcneill static void alc_mediastatus(struct ifnet *, struct ifmediareq *);
112 1.1 jmcneill
113 1.2 jmcneill static void alc_aspm(struct alc_softc *, int);
114 1.1 jmcneill static void alc_disable_l0s_l1(struct alc_softc *);
115 1.1 jmcneill static int alc_dma_alloc(struct alc_softc *);
116 1.1 jmcneill static void alc_dma_free(struct alc_softc *);
117 1.1 jmcneill static int alc_encap(struct alc_softc *, struct mbuf **);
118 1.2 jmcneill static struct alc_ident *
119 1.2 jmcneill alc_find_ident(struct pci_attach_args *);
120 1.1 jmcneill static void alc_get_macaddr(struct alc_softc *);
121 1.1 jmcneill static void alc_init_cmb(struct alc_softc *);
122 1.1 jmcneill static void alc_init_rr_ring(struct alc_softc *);
123 1.7 mrg static int alc_init_rx_ring(struct alc_softc *, bool);
124 1.1 jmcneill static void alc_init_smb(struct alc_softc *);
125 1.1 jmcneill static void alc_init_tx_ring(struct alc_softc *);
126 1.1 jmcneill static int alc_intr(void *);
127 1.1 jmcneill static void alc_mac_config(struct alc_softc *);
128 1.1 jmcneill static int alc_miibus_readreg(device_t, int, int);
129 1.6 matt static void alc_miibus_statchg(struct ifnet *);
130 1.1 jmcneill static void alc_miibus_writereg(device_t, int, int, int);
131 1.7 mrg static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *, bool);
132 1.1 jmcneill static void alc_phy_down(struct alc_softc *);
133 1.1 jmcneill static void alc_phy_reset(struct alc_softc *);
134 1.1 jmcneill static void alc_reset(struct alc_softc *);
135 1.1 jmcneill static void alc_rxeof(struct alc_softc *, struct rx_rdesc *);
136 1.1 jmcneill static int alc_rxintr(struct alc_softc *);
137 1.1 jmcneill static void alc_iff(struct alc_softc *);
138 1.1 jmcneill static void alc_rxvlan(struct alc_softc *);
139 1.1 jmcneill static void alc_start_queue(struct alc_softc *);
140 1.1 jmcneill static void alc_stats_clear(struct alc_softc *);
141 1.1 jmcneill static void alc_stats_update(struct alc_softc *);
142 1.1 jmcneill static void alc_stop(struct ifnet *, int);
143 1.1 jmcneill static void alc_stop_mac(struct alc_softc *);
144 1.1 jmcneill static void alc_stop_queue(struct alc_softc *);
145 1.1 jmcneill static void alc_tick(void *);
146 1.1 jmcneill static void alc_txeof(struct alc_softc *);
147 1.1 jmcneill
148 1.1 jmcneill uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 };
149 1.1 jmcneill
150 1.1 jmcneill CFATTACH_DECL_NEW(alc, sizeof(struct alc_softc),
151 1.1 jmcneill alc_match, alc_attach, alc_detach, NULL);
152 1.1 jmcneill
153 1.1 jmcneill int alcdebug = 0;
154 1.1 jmcneill #define DPRINTF(x) do { if (alcdebug) printf x; } while (0)
155 1.1 jmcneill
156 1.1 jmcneill #define ETHER_ALIGN 2
157 1.1 jmcneill #define ALC_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
158 1.1 jmcneill
159 1.1 jmcneill static int
160 1.1 jmcneill alc_miibus_readreg(device_t dev, int phy, int reg)
161 1.1 jmcneill {
162 1.1 jmcneill struct alc_softc *sc = device_private(dev);
163 1.1 jmcneill uint32_t v;
164 1.1 jmcneill int i;
165 1.1 jmcneill
166 1.1 jmcneill if (phy != sc->alc_phyaddr)
167 1.1 jmcneill return (0);
168 1.1 jmcneill
169 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
170 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
171 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
172 1.1 jmcneill DELAY(5);
173 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
174 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
175 1.1 jmcneill break;
176 1.1 jmcneill }
177 1.1 jmcneill
178 1.1 jmcneill if (i == 0) {
179 1.1 jmcneill printf("%s: phy read timeout: phy %d, reg %d\n",
180 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
181 1.1 jmcneill return (0);
182 1.1 jmcneill }
183 1.1 jmcneill
184 1.1 jmcneill return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
185 1.1 jmcneill }
186 1.1 jmcneill
187 1.1 jmcneill static void
188 1.1 jmcneill alc_miibus_writereg(device_t dev, int phy, int reg, int val)
189 1.1 jmcneill {
190 1.1 jmcneill struct alc_softc *sc = device_private(dev);
191 1.1 jmcneill uint32_t v;
192 1.1 jmcneill int i;
193 1.1 jmcneill
194 1.1 jmcneill if (phy != sc->alc_phyaddr)
195 1.1 jmcneill return;
196 1.1 jmcneill
197 1.1 jmcneill CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
198 1.1 jmcneill (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
199 1.1 jmcneill MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
200 1.1 jmcneill for (i = ALC_PHY_TIMEOUT; i > 0; i--) {
201 1.1 jmcneill DELAY(5);
202 1.1 jmcneill v = CSR_READ_4(sc, ALC_MDIO);
203 1.1 jmcneill if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
204 1.1 jmcneill break;
205 1.1 jmcneill }
206 1.1 jmcneill
207 1.1 jmcneill if (i == 0)
208 1.1 jmcneill printf("%s: phy write timeout: phy %d, reg %d\n",
209 1.1 jmcneill device_xname(sc->sc_dev), phy, reg);
210 1.1 jmcneill }
211 1.1 jmcneill
212 1.1 jmcneill static void
213 1.6 matt alc_miibus_statchg(struct ifnet *ifp)
214 1.1 jmcneill {
215 1.6 matt struct alc_softc *sc = ifp->if_softc;
216 1.6 matt struct mii_data *mii = &sc->sc_miibus;
217 1.1 jmcneill uint32_t reg;
218 1.1 jmcneill
219 1.1 jmcneill if ((ifp->if_flags & IFF_RUNNING) == 0)
220 1.1 jmcneill return;
221 1.1 jmcneill
222 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
223 1.1 jmcneill if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
224 1.1 jmcneill (IFM_ACTIVE | IFM_AVALID)) {
225 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
226 1.1 jmcneill case IFM_10_T:
227 1.1 jmcneill case IFM_100_TX:
228 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
229 1.1 jmcneill break;
230 1.1 jmcneill case IFM_1000_T:
231 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0)
232 1.1 jmcneill sc->alc_flags |= ALC_FLAG_LINK;
233 1.1 jmcneill break;
234 1.1 jmcneill default:
235 1.1 jmcneill break;
236 1.1 jmcneill }
237 1.1 jmcneill }
238 1.1 jmcneill alc_stop_queue(sc);
239 1.1 jmcneill /* Stop Rx/Tx MACs. */
240 1.1 jmcneill alc_stop_mac(sc);
241 1.1 jmcneill
242 1.1 jmcneill /* Program MACs with resolved speed/duplex/flow-control. */
243 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
244 1.1 jmcneill alc_start_queue(sc);
245 1.1 jmcneill alc_mac_config(sc);
246 1.1 jmcneill /* Re-enable Tx/Rx MACs. */
247 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
248 1.1 jmcneill reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
249 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
250 1.2 jmcneill alc_aspm(sc, IFM_SUBTYPE(mii->mii_media_active));
251 1.1 jmcneill }
252 1.1 jmcneill }
253 1.1 jmcneill
254 1.1 jmcneill static void
255 1.1 jmcneill alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
256 1.1 jmcneill {
257 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
258 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
259 1.1 jmcneill
260 1.1 jmcneill mii_pollstat(mii);
261 1.1 jmcneill ifmr->ifm_status = mii->mii_media_status;
262 1.1 jmcneill ifmr->ifm_active = mii->mii_media_active;
263 1.1 jmcneill }
264 1.1 jmcneill
265 1.1 jmcneill static int
266 1.1 jmcneill alc_mediachange(struct ifnet *ifp)
267 1.1 jmcneill {
268 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
269 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
270 1.1 jmcneill int error;
271 1.1 jmcneill
272 1.1 jmcneill if (mii->mii_instance != 0) {
273 1.1 jmcneill struct mii_softc *miisc;
274 1.1 jmcneill
275 1.1 jmcneill LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
276 1.1 jmcneill mii_phy_reset(miisc);
277 1.1 jmcneill }
278 1.1 jmcneill error = mii_mediachg(mii);
279 1.1 jmcneill
280 1.1 jmcneill return (error);
281 1.1 jmcneill }
282 1.1 jmcneill
283 1.2 jmcneill static struct alc_ident *
284 1.2 jmcneill alc_find_ident(struct pci_attach_args *pa)
285 1.2 jmcneill {
286 1.2 jmcneill struct alc_ident *ident;
287 1.2 jmcneill uint16_t vendor, devid;
288 1.2 jmcneill
289 1.2 jmcneill vendor = PCI_VENDOR(pa->pa_id);
290 1.2 jmcneill devid = PCI_PRODUCT(pa->pa_id);
291 1.2 jmcneill for (ident = alc_ident_table; ident->name != NULL; ident++) {
292 1.2 jmcneill if (vendor == ident->vendorid && devid == ident->deviceid)
293 1.2 jmcneill return (ident);
294 1.2 jmcneill }
295 1.2 jmcneill
296 1.2 jmcneill return (NULL);
297 1.2 jmcneill }
298 1.2 jmcneill
299 1.1 jmcneill static int
300 1.1 jmcneill alc_match(device_t dev, cfdata_t match, void *aux)
301 1.1 jmcneill {
302 1.1 jmcneill struct pci_attach_args *pa = aux;
303 1.1 jmcneill
304 1.2 jmcneill return alc_find_ident(pa) != NULL;
305 1.1 jmcneill }
306 1.1 jmcneill
307 1.1 jmcneill static void
308 1.1 jmcneill alc_get_macaddr(struct alc_softc *sc)
309 1.1 jmcneill {
310 1.1 jmcneill uint32_t ea[2], opt;
311 1.2 jmcneill uint16_t val;
312 1.2 jmcneill int eeprom, i;
313 1.1 jmcneill
314 1.2 jmcneill eeprom = 0;
315 1.1 jmcneill opt = CSR_READ_4(sc, ALC_OPT_CFG);
316 1.2 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 &&
317 1.2 jmcneill (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) {
318 1.1 jmcneill /*
319 1.1 jmcneill * EEPROM found, let TWSI reload EEPROM configuration.
320 1.1 jmcneill * This will set ethernet address of controller.
321 1.1 jmcneill */
322 1.2 jmcneill eeprom++;
323 1.2 jmcneill switch (sc->alc_ident->deviceid) {
324 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
325 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
326 1.2 jmcneill if ((opt & OPT_CFG_CLK_ENB) == 0) {
327 1.2 jmcneill opt |= OPT_CFG_CLK_ENB;
328 1.2 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
329 1.2 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
330 1.2 jmcneill DELAY(1000);
331 1.2 jmcneill }
332 1.2 jmcneill break;
333 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
334 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
335 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
336 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
337 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
338 1.2 jmcneill ALC_MII_DBG_ADDR, 0x00);
339 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
340 1.2 jmcneill ALC_MII_DBG_DATA);
341 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
342 1.2 jmcneill ALC_MII_DBG_DATA, val & 0xFF7F);
343 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
344 1.2 jmcneill ALC_MII_DBG_ADDR, 0x3B);
345 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
346 1.2 jmcneill ALC_MII_DBG_DATA);
347 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
348 1.2 jmcneill ALC_MII_DBG_DATA, val | 0x0008);
349 1.2 jmcneill DELAY(20);
350 1.2 jmcneill break;
351 1.1 jmcneill }
352 1.2 jmcneill
353 1.2 jmcneill CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
354 1.2 jmcneill CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
355 1.2 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
356 1.2 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
357 1.2 jmcneill
358 1.1 jmcneill CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) |
359 1.1 jmcneill TWSI_CFG_SW_LD_START);
360 1.1 jmcneill for (i = 100; i > 0; i--) {
361 1.1 jmcneill DELAY(1000);
362 1.1 jmcneill if ((CSR_READ_4(sc, ALC_TWSI_CFG) &
363 1.1 jmcneill TWSI_CFG_SW_LD_START) == 0)
364 1.1 jmcneill break;
365 1.1 jmcneill }
366 1.1 jmcneill if (i == 0)
367 1.8 christos printf("%s: reloading EEPROM timeout!\n",
368 1.1 jmcneill device_xname(sc->sc_dev));
369 1.1 jmcneill } else {
370 1.1 jmcneill if (alcdebug)
371 1.1 jmcneill printf("%s: EEPROM not found!\n", device_xname(sc->sc_dev));
372 1.1 jmcneill }
373 1.2 jmcneill if (eeprom != 0) {
374 1.2 jmcneill switch (sc->alc_ident->deviceid) {
375 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8131:
376 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
377 1.2 jmcneill if ((opt & OPT_CFG_CLK_ENB) != 0) {
378 1.2 jmcneill opt &= ~OPT_CFG_CLK_ENB;
379 1.2 jmcneill CSR_WRITE_4(sc, ALC_OPT_CFG, opt);
380 1.2 jmcneill CSR_READ_4(sc, ALC_OPT_CFG);
381 1.2 jmcneill DELAY(1000);
382 1.2 jmcneill }
383 1.2 jmcneill break;
384 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
385 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
386 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
387 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
388 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
389 1.2 jmcneill ALC_MII_DBG_ADDR, 0x00);
390 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
391 1.2 jmcneill ALC_MII_DBG_DATA);
392 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
393 1.2 jmcneill ALC_MII_DBG_DATA, val | 0x0080);
394 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
395 1.2 jmcneill ALC_MII_DBG_ADDR, 0x3B);
396 1.2 jmcneill val = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
397 1.2 jmcneill ALC_MII_DBG_DATA);
398 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
399 1.2 jmcneill ALC_MII_DBG_DATA, val & 0xFFF7);
400 1.2 jmcneill DELAY(20);
401 1.2 jmcneill break;
402 1.2 jmcneill }
403 1.1 jmcneill }
404 1.1 jmcneill
405 1.1 jmcneill ea[0] = CSR_READ_4(sc, ALC_PAR0);
406 1.1 jmcneill ea[1] = CSR_READ_4(sc, ALC_PAR1);
407 1.1 jmcneill sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF;
408 1.1 jmcneill sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF;
409 1.1 jmcneill sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF;
410 1.1 jmcneill sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF;
411 1.1 jmcneill sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF;
412 1.1 jmcneill sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF;
413 1.1 jmcneill }
414 1.1 jmcneill
415 1.1 jmcneill static void
416 1.1 jmcneill alc_disable_l0s_l1(struct alc_softc *sc)
417 1.1 jmcneill {
418 1.1 jmcneill uint32_t pmcfg;
419 1.1 jmcneill
420 1.1 jmcneill /* Another magic from vendor. */
421 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
422 1.1 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 |
423 1.1 jmcneill PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK |
424 1.1 jmcneill PM_CFG_SERDES_PD_EX_L1);
425 1.1 jmcneill pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB |
426 1.1 jmcneill PM_CFG_SERDES_L1_ENB;
427 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
428 1.1 jmcneill }
429 1.1 jmcneill
430 1.1 jmcneill static void
431 1.1 jmcneill alc_phy_reset(struct alc_softc *sc)
432 1.1 jmcneill {
433 1.1 jmcneill uint16_t data;
434 1.1 jmcneill
435 1.1 jmcneill /* Reset magic from Linux. */
436 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
437 1.1 jmcneill GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET);
438 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
439 1.1 jmcneill DELAY(10 * 1000);
440 1.1 jmcneill
441 1.1 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
442 1.1 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
443 1.1 jmcneill GPHY_CFG_SEL_ANA_RESET);
444 1.1 jmcneill CSR_READ_2(sc, ALC_GPHY_CFG);
445 1.1 jmcneill DELAY(10 * 1000);
446 1.1 jmcneill
447 1.2 jmcneill /* DSP fixup, Vendor magic. */
448 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
449 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
450 1.2 jmcneill ALC_MII_DBG_ADDR, 0x000A);
451 1.2 jmcneill data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
452 1.2 jmcneill ALC_MII_DBG_DATA);
453 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
454 1.2 jmcneill ALC_MII_DBG_DATA, data & 0xDFFF);
455 1.2 jmcneill }
456 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
457 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
458 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
459 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
460 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
461 1.2 jmcneill ALC_MII_DBG_ADDR, 0x003B);
462 1.2 jmcneill data = alc_miibus_readreg(sc->sc_dev, sc->alc_phyaddr,
463 1.2 jmcneill ALC_MII_DBG_DATA);
464 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
465 1.2 jmcneill ALC_MII_DBG_DATA, data & 0xFFF7);
466 1.2 jmcneill DELAY(20 * 1000);
467 1.2 jmcneill }
468 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151) {
469 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
470 1.2 jmcneill ALC_MII_DBG_ADDR, 0x0029);
471 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
472 1.2 jmcneill ALC_MII_DBG_DATA, 0x929D);
473 1.2 jmcneill }
474 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
475 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132 ||
476 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
477 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2) {
478 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
479 1.2 jmcneill ALC_MII_DBG_ADDR, 0x0029);
480 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
481 1.2 jmcneill ALC_MII_DBG_DATA, 0xB6DD);
482 1.2 jmcneill }
483 1.2 jmcneill
484 1.1 jmcneill /* Load DSP codes, vendor magic. */
485 1.1 jmcneill data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
486 1.1 jmcneill ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK);
487 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
488 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG18);
489 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
490 1.1 jmcneill ALC_MII_DBG_DATA, data);
491 1.1 jmcneill
492 1.1 jmcneill data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) |
493 1.1 jmcneill ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
494 1.1 jmcneill ANA_SERDES_EN_LCKDT;
495 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
496 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG5);
497 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
498 1.1 jmcneill ALC_MII_DBG_DATA, data);
499 1.1 jmcneill
500 1.1 jmcneill data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) &
501 1.1 jmcneill ANA_LONG_CABLE_TH_100_MASK) |
502 1.1 jmcneill ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) &
503 1.1 jmcneill ANA_SHORT_CABLE_TH_100_SHIFT) |
504 1.1 jmcneill ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW;
505 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
506 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG54);
507 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
508 1.1 jmcneill ALC_MII_DBG_DATA, data);
509 1.1 jmcneill
510 1.1 jmcneill data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) |
511 1.1 jmcneill ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) |
512 1.1 jmcneill ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) |
513 1.1 jmcneill ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK);
514 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
515 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG4);
516 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
517 1.1 jmcneill ALC_MII_DBG_DATA, data);
518 1.1 jmcneill
519 1.1 jmcneill data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) |
520 1.1 jmcneill ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB |
521 1.1 jmcneill ANA_OEN_125M;
522 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
523 1.1 jmcneill ALC_MII_DBG_ADDR, MII_ANA_CFG0);
524 1.1 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
525 1.1 jmcneill ALC_MII_DBG_DATA, data);
526 1.1 jmcneill DELAY(1000);
527 1.1 jmcneill }
528 1.1 jmcneill
529 1.1 jmcneill static void
530 1.1 jmcneill alc_phy_down(struct alc_softc *sc)
531 1.1 jmcneill {
532 1.2 jmcneill switch (sc->alc_ident->deviceid) {
533 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
534 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
535 1.2 jmcneill /*
536 1.2 jmcneill * GPHY power down caused more problems on AR8151 v2.0.
537 1.2 jmcneill * When driver is reloaded after GPHY power down,
538 1.2 jmcneill * accesses to PHY/MAC registers hung the system. Only
539 1.2 jmcneill * cold boot recovered from it. I'm not sure whether
540 1.2 jmcneill * AR8151 v1.0 also requires this one though. I don't
541 1.2 jmcneill * have AR8151 v1.0 controller in hand.
542 1.2 jmcneill * The only option left is to isolate the PHY and
543 1.2 jmcneill * initiates power down the PHY which in turn saves
544 1.2 jmcneill * more power when driver is unloaded.
545 1.2 jmcneill */
546 1.2 jmcneill alc_miibus_writereg(sc->sc_dev, sc->alc_phyaddr,
547 1.2 jmcneill MII_BMCR, BMCR_ISO | BMCR_PDOWN);
548 1.2 jmcneill break;
549 1.2 jmcneill default:
550 1.2 jmcneill /* Force PHY down. */
551 1.2 jmcneill CSR_WRITE_2(sc, ALC_GPHY_CFG,
552 1.2 jmcneill GPHY_CFG_EXT_RESET | GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE |
553 1.2 jmcneill GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ |
554 1.2 jmcneill GPHY_CFG_PWDOWN_HW);
555 1.2 jmcneill DELAY(1000);
556 1.2 jmcneill break;
557 1.2 jmcneill }
558 1.1 jmcneill }
559 1.1 jmcneill
560 1.1 jmcneill static void
561 1.2 jmcneill alc_aspm(struct alc_softc *sc, int media)
562 1.1 jmcneill {
563 1.1 jmcneill uint32_t pmcfg;
564 1.2 jmcneill uint16_t linkcfg;
565 1.8 christos
566 1.1 jmcneill pmcfg = CSR_READ_4(sc, ALC_PM_CFG);
567 1.2 jmcneill if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) ==
568 1.2 jmcneill (ALC_FLAG_APS | ALC_FLAG_PCIE))
569 1.2 jmcneill linkcfg = CSR_READ_2(sc, sc->alc_expcap +
570 1.9 msaitoh PCIE_LCSR);
571 1.2 jmcneill else
572 1.2 jmcneill linkcfg = 0;
573 1.1 jmcneill pmcfg &= ~PM_CFG_SERDES_PD_EX_L1;
574 1.2 jmcneill pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK);
575 1.1 jmcneill pmcfg |= PM_CFG_MAC_ASPM_CHK;
576 1.2 jmcneill pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT);
577 1.2 jmcneill pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
578 1.2 jmcneill
579 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
580 1.2 jmcneill /* Disable extended sync except AR8152 B v1.0 */
581 1.2 jmcneill linkcfg &= ~0x80;
582 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
583 1.2 jmcneill sc->alc_rev == ATHEROS_AR8152_B_V10)
584 1.2 jmcneill linkcfg |= 0x80;
585 1.9 msaitoh CSR_WRITE_2(sc, sc->alc_expcap + PCIE_LCSR,
586 1.2 jmcneill linkcfg);
587 1.2 jmcneill pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB |
588 1.2 jmcneill PM_CFG_HOTRST);
589 1.2 jmcneill pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT <<
590 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
591 1.2 jmcneill pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK;
592 1.2 jmcneill pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT <<
593 1.2 jmcneill PM_CFG_PM_REQ_TIMER_SHIFT);
594 1.2 jmcneill pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV;
595 1.2 jmcneill }
596 1.2 jmcneill
597 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) != 0) {
598 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L0S) != 0)
599 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L0S_ENB;
600 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
601 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L1_ENB;
602 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_APS) != 0) {
603 1.2 jmcneill if (sc->alc_ident->deviceid ==
604 1.2 jmcneill PCI_PRODUCT_ATTANSIC_AR8152_B)
605 1.2 jmcneill pmcfg &= ~PM_CFG_ASPM_L0S_ENB;
606 1.2 jmcneill pmcfg &= ~(PM_CFG_SERDES_L1_ENB |
607 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB |
608 1.2 jmcneill PM_CFG_SERDES_BUDS_RX_L1_ENB);
609 1.2 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
610 1.2 jmcneill if (media == IFM_100_TX || media == IFM_1000_T) {
611 1.2 jmcneill pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK;
612 1.2 jmcneill switch (sc->alc_ident->deviceid) {
613 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
614 1.2 jmcneill pmcfg |= (7 <<
615 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
616 1.2 jmcneill break;
617 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
618 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
619 1.2 jmcneill pmcfg |= (4 <<
620 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
621 1.2 jmcneill break;
622 1.2 jmcneill default:
623 1.2 jmcneill pmcfg |= (15 <<
624 1.2 jmcneill PM_CFG_L1_ENTRY_TIMER_SHIFT);
625 1.2 jmcneill break;
626 1.2 jmcneill }
627 1.2 jmcneill }
628 1.2 jmcneill } else {
629 1.2 jmcneill pmcfg |= PM_CFG_SERDES_L1_ENB |
630 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB |
631 1.2 jmcneill PM_CFG_SERDES_BUDS_RX_L1_ENB;
632 1.2 jmcneill pmcfg &= ~(PM_CFG_CLK_SWH_L1 |
633 1.2 jmcneill PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB);
634 1.2 jmcneill }
635 1.1 jmcneill } else {
636 1.2 jmcneill pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB |
637 1.2 jmcneill PM_CFG_SERDES_PLL_L1_ENB);
638 1.1 jmcneill pmcfg |= PM_CFG_CLK_SWH_L1;
639 1.2 jmcneill if ((sc->alc_flags & ALC_FLAG_L1S) != 0)
640 1.2 jmcneill pmcfg |= PM_CFG_ASPM_L1_ENB;
641 1.1 jmcneill }
642 1.1 jmcneill CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg);
643 1.1 jmcneill }
644 1.1 jmcneill
645 1.1 jmcneill static void
646 1.1 jmcneill alc_attach(device_t parent, device_t self, void *aux)
647 1.1 jmcneill {
648 1.1 jmcneill
649 1.1 jmcneill struct alc_softc *sc = device_private(self);
650 1.1 jmcneill struct pci_attach_args *pa = aux;
651 1.1 jmcneill pci_chipset_tag_t pc = pa->pa_pc;
652 1.1 jmcneill pci_intr_handle_t ih;
653 1.1 jmcneill const char *intrstr;
654 1.1 jmcneill struct ifnet *ifp;
655 1.1 jmcneill pcireg_t memtype;
656 1.2 jmcneill const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" };
657 1.1 jmcneill uint16_t burst;
658 1.1 jmcneill int base, mii_flags, state, error = 0;
659 1.1 jmcneill uint32_t cap, ctl, val;
660 1.1 jmcneill
661 1.2 jmcneill sc->alc_ident = alc_find_ident(pa);
662 1.2 jmcneill
663 1.1 jmcneill aprint_naive("\n");
664 1.2 jmcneill aprint_normal(": %s\n", sc->alc_ident->name);
665 1.1 jmcneill
666 1.1 jmcneill sc->sc_dev = self;
667 1.1 jmcneill sc->sc_dmat = pa->pa_dmat;
668 1.1 jmcneill sc->sc_pct = pa->pa_pc;
669 1.1 jmcneill sc->sc_pcitag = pa->pa_tag;
670 1.1 jmcneill
671 1.1 jmcneill /*
672 1.1 jmcneill * Allocate IO memory
673 1.1 jmcneill */
674 1.1 jmcneill memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, ALC_PCIR_BAR);
675 1.1 jmcneill switch (memtype) {
676 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
677 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
678 1.1 jmcneill case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
679 1.1 jmcneill break;
680 1.1 jmcneill default:
681 1.1 jmcneill aprint_error_dev(self, "invalid base address register\n");
682 1.1 jmcneill break;
683 1.1 jmcneill }
684 1.1 jmcneill
685 1.1 jmcneill if (pci_mapreg_map(pa, ALC_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
686 1.1 jmcneill &sc->sc_mem_bh, NULL, &sc->sc_mem_size)) {
687 1.1 jmcneill aprint_error_dev(self, "could not map mem space\n");
688 1.1 jmcneill return;
689 1.1 jmcneill }
690 1.1 jmcneill
691 1.1 jmcneill if (pci_intr_map(pa, &ih) != 0) {
692 1.1 jmcneill printf(": can't map interrupt\n");
693 1.1 jmcneill goto fail;
694 1.1 jmcneill }
695 1.1 jmcneill
696 1.1 jmcneill /*
697 1.1 jmcneill * Allocate IRQ
698 1.1 jmcneill */
699 1.1 jmcneill intrstr = pci_intr_string(sc->sc_pct, ih);
700 1.1 jmcneill sc->sc_irq_handle = pci_intr_establish(pc, ih, IPL_NET, alc_intr, sc);
701 1.1 jmcneill if (sc->sc_irq_handle == NULL) {
702 1.1 jmcneill printf(": could not establish interrupt");
703 1.1 jmcneill if (intrstr != NULL)
704 1.1 jmcneill printf(" at %s", intrstr);
705 1.1 jmcneill printf("\n");
706 1.1 jmcneill goto fail;
707 1.1 jmcneill }
708 1.4 matt aprint_normal_dev(self, "interrupting at %s\n", intrstr);
709 1.8 christos
710 1.1 jmcneill /* Set PHY address. */
711 1.1 jmcneill sc->alc_phyaddr = ALC_PHY_ADDR;
712 1.1 jmcneill
713 1.1 jmcneill /* Initialize DMA parameters. */
714 1.1 jmcneill sc->alc_dma_rd_burst = 0;
715 1.1 jmcneill sc->alc_dma_wr_burst = 0;
716 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_64;
717 1.1 jmcneill if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
718 1.1 jmcneill &base, NULL)) {
719 1.1 jmcneill sc->alc_flags |= ALC_FLAG_PCIE;
720 1.2 jmcneill sc->alc_expcap = base;
721 1.1 jmcneill burst = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
722 1.9 msaitoh base + PCIE_DCSR) >> 16;
723 1.1 jmcneill sc->alc_dma_rd_burst = (burst & 0x7000) >> 12;
724 1.1 jmcneill sc->alc_dma_wr_burst = (burst & 0x00e0) >> 5;
725 1.1 jmcneill if (alcdebug) {
726 1.1 jmcneill printf("%s: Read request size : %u bytes.\n",
727 1.8 christos device_xname(sc->sc_dev),
728 1.1 jmcneill alc_dma_burst[sc->alc_dma_rd_burst]);
729 1.1 jmcneill printf("%s: TLP payload size : %u bytes.\n",
730 1.1 jmcneill device_xname(sc->sc_dev),
731 1.1 jmcneill alc_dma_burst[sc->alc_dma_wr_burst]);
732 1.1 jmcneill }
733 1.1 jmcneill /* Clear data link and flow-control protocol error. */
734 1.1 jmcneill val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV);
735 1.1 jmcneill val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP);
736 1.1 jmcneill CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val);
737 1.2 jmcneill CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG,
738 1.2 jmcneill CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB);
739 1.2 jmcneill CSR_WRITE_4(sc, ALC_PCIE_PHYMISC,
740 1.2 jmcneill CSR_READ_4(sc, ALC_PCIE_PHYMISC) |
741 1.2 jmcneill PCIE_PHYMISC_FORCE_RCV_DET);
742 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B &&
743 1.2 jmcneill sc->alc_rev == ATHEROS_AR8152_B_V10) {
744 1.2 jmcneill val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2);
745 1.2 jmcneill val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK |
746 1.2 jmcneill PCIE_PHYMISC2_SERDES_TH_MASK);
747 1.2 jmcneill val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT;
748 1.2 jmcneill val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
749 1.2 jmcneill CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val);
750 1.2 jmcneill }
751 1.1 jmcneill /* Disable ASPM L0S and L1. */
752 1.1 jmcneill cap = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
753 1.9 msaitoh base + PCIE_LCAP) >> 16;
754 1.1 jmcneill if ((cap & 0x00000c00) != 0) {
755 1.1 jmcneill ctl = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
756 1.9 msaitoh base + PCIE_LCSR) >> 16;
757 1.1 jmcneill if ((ctl & 0x08) != 0)
758 1.1 jmcneill sc->alc_rcb = DMA_CFG_RCB_128;
759 1.1 jmcneill if (alcdebug)
760 1.1 jmcneill printf("%s: RCB %u bytes\n",
761 1.1 jmcneill device_xname(sc->sc_dev),
762 1.1 jmcneill sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128);
763 1.1 jmcneill state = ctl & 0x03;
764 1.2 jmcneill if (state & 0x01)
765 1.2 jmcneill sc->alc_flags |= ALC_FLAG_L0S;
766 1.2 jmcneill if (state & 0x02)
767 1.2 jmcneill sc->alc_flags |= ALC_FLAG_L1S;
768 1.1 jmcneill if (alcdebug)
769 1.1 jmcneill printf("%s: ASPM %s %s\n",
770 1.1 jmcneill device_xname(sc->sc_dev),
771 1.1 jmcneill aspm_state[state],
772 1.1 jmcneill state == 0 ? "disabled" : "enabled");
773 1.2 jmcneill alc_disable_l0s_l1(sc);
774 1.2 jmcneill } else {
775 1.2 jmcneill aprint_debug_dev(sc->sc_dev, "no ASPM support\n");
776 1.1 jmcneill }
777 1.1 jmcneill }
778 1.1 jmcneill
779 1.1 jmcneill /* Reset PHY. */
780 1.1 jmcneill alc_phy_reset(sc);
781 1.1 jmcneill
782 1.1 jmcneill /* Reset the ethernet controller. */
783 1.1 jmcneill alc_reset(sc);
784 1.1 jmcneill
785 1.1 jmcneill /*
786 1.1 jmcneill * One odd thing is AR8132 uses the same PHY hardware(F1
787 1.1 jmcneill * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports
788 1.1 jmcneill * the PHY supports 1000Mbps but that's not true. The PHY
789 1.1 jmcneill * used in AR8132 can't establish gigabit link even if it
790 1.1 jmcneill * shows the same PHY model/revision number of AR8131.
791 1.1 jmcneill */
792 1.2 jmcneill switch (sc->alc_ident->deviceid) {
793 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B:
794 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8152_B2:
795 1.2 jmcneill sc->alc_flags |= ALC_FLAG_APS;
796 1.2 jmcneill /* FALLTHROUGH */
797 1.1 jmcneill case PCI_PRODUCT_ATTANSIC_AR8132:
798 1.2 jmcneill sc->alc_flags |= ALC_FLAG_FASTETHER;
799 1.1 jmcneill break;
800 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151:
801 1.2 jmcneill case PCI_PRODUCT_ATTANSIC_AR8151_V2:
802 1.2 jmcneill sc->alc_flags |= ALC_FLAG_APS;
803 1.2 jmcneill /* FALLTHROUGH */
804 1.1 jmcneill default:
805 1.1 jmcneill break;
806 1.1 jmcneill }
807 1.2 jmcneill sc->alc_flags |= ALC_FLAG_JUMBO | ALC_FLAG_ASPM_MON;
808 1.1 jmcneill
809 1.1 jmcneill /*
810 1.2 jmcneill * It seems that AR813x/AR815x has silicon bug for SMB. In
811 1.1 jmcneill * addition, Atheros said that enabling SMB wouldn't improve
812 1.1 jmcneill * performance. However I think it's bad to access lots of
813 1.1 jmcneill * registers to extract MAC statistics.
814 1.1 jmcneill */
815 1.1 jmcneill sc->alc_flags |= ALC_FLAG_SMB_BUG;
816 1.1 jmcneill /*
817 1.1 jmcneill * Don't use Tx CMB. It is known to have silicon bug.
818 1.1 jmcneill */
819 1.1 jmcneill sc->alc_flags |= ALC_FLAG_CMB_BUG;
820 1.1 jmcneill sc->alc_rev = PCI_REVISION(pa->pa_class);
821 1.1 jmcneill sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >>
822 1.1 jmcneill MASTER_CHIP_REV_SHIFT;
823 1.1 jmcneill if (alcdebug) {
824 1.1 jmcneill printf("%s: PCI device revision : 0x%04x\n",
825 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_rev);
826 1.1 jmcneill printf("%s: Chip id/revision : 0x%04x\n",
827 1.1 jmcneill device_xname(sc->sc_dev), sc->alc_chip_rev);
828 1.1 jmcneill printf("%s: %u Tx FIFO, %u Rx FIFO\n", device_xname(sc->sc_dev),
829 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8,
830 1.1 jmcneill CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8);
831 1.1 jmcneill }
832 1.1 jmcneill
833 1.1 jmcneill error = alc_dma_alloc(sc);
834 1.1 jmcneill if (error)
835 1.1 jmcneill goto fail;
836 1.1 jmcneill
837 1.1 jmcneill callout_init(&sc->sc_tick_ch, 0);
838 1.1 jmcneill callout_setfunc(&sc->sc_tick_ch, alc_tick, sc);
839 1.1 jmcneill
840 1.1 jmcneill /* Load station address. */
841 1.1 jmcneill alc_get_macaddr(sc);
842 1.1 jmcneill
843 1.1 jmcneill aprint_normal_dev(self, "Ethernet address %s\n",
844 1.1 jmcneill ether_sprintf(sc->alc_eaddr));
845 1.1 jmcneill
846 1.1 jmcneill ifp = &sc->sc_ec.ec_if;
847 1.1 jmcneill ifp->if_softc = sc;
848 1.1 jmcneill ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
849 1.1 jmcneill ifp->if_init = alc_init;
850 1.1 jmcneill ifp->if_ioctl = alc_ioctl;
851 1.1 jmcneill ifp->if_start = alc_start;
852 1.1 jmcneill ifp->if_stop = alc_stop;
853 1.1 jmcneill ifp->if_watchdog = alc_watchdog;
854 1.1 jmcneill ifp->if_baudrate = IF_Gbps(1);
855 1.1 jmcneill IFQ_SET_MAXLEN(&ifp->if_snd, ALC_TX_RING_CNT - 1);
856 1.1 jmcneill IFQ_SET_READY(&ifp->if_snd);
857 1.1 jmcneill strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
858 1.1 jmcneill
859 1.1 jmcneill sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
860 1.1 jmcneill
861 1.1 jmcneill #ifdef ALC_CHECKSUM
862 1.1 jmcneill ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
863 1.1 jmcneill IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
864 1.1 jmcneill IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
865 1.1 jmcneill #endif
866 1.1 jmcneill
867 1.1 jmcneill #if NVLAN > 0
868 1.1 jmcneill sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
869 1.1 jmcneill #endif
870 1.1 jmcneill
871 1.1 jmcneill /* Set up MII bus. */
872 1.1 jmcneill sc->sc_miibus.mii_ifp = ifp;
873 1.1 jmcneill sc->sc_miibus.mii_readreg = alc_miibus_readreg;
874 1.1 jmcneill sc->sc_miibus.mii_writereg = alc_miibus_writereg;
875 1.1 jmcneill sc->sc_miibus.mii_statchg = alc_miibus_statchg;
876 1.1 jmcneill
877 1.1 jmcneill sc->sc_ec.ec_mii = &sc->sc_miibus;
878 1.1 jmcneill ifmedia_init(&sc->sc_miibus.mii_media, 0, alc_mediachange,
879 1.1 jmcneill alc_mediastatus);
880 1.1 jmcneill mii_flags = 0;
881 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_JUMBO) != 0)
882 1.1 jmcneill mii_flags |= MIIF_DOPAUSE;
883 1.1 jmcneill mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
884 1.1 jmcneill MII_OFFSET_ANY, mii_flags);
885 1.1 jmcneill
886 1.1 jmcneill if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
887 1.1 jmcneill printf("%s: no PHY found!\n", device_xname(sc->sc_dev));
888 1.1 jmcneill ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
889 1.1 jmcneill 0, NULL);
890 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
891 1.8 christos } else
892 1.1 jmcneill ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
893 1.1 jmcneill
894 1.1 jmcneill if_attach(ifp);
895 1.1 jmcneill ether_ifattach(ifp, sc->alc_eaddr);
896 1.1 jmcneill
897 1.1 jmcneill if (!pmf_device_register(self, NULL, NULL))
898 1.1 jmcneill aprint_error_dev(self, "couldn't establish power handler\n");
899 1.1 jmcneill else
900 1.1 jmcneill pmf_class_network_register(self, ifp);
901 1.1 jmcneill
902 1.1 jmcneill return;
903 1.1 jmcneill fail:
904 1.1 jmcneill alc_dma_free(sc);
905 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
906 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
907 1.1 jmcneill sc->sc_irq_handle = NULL;
908 1.1 jmcneill }
909 1.1 jmcneill if (sc->sc_mem_size) {
910 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
911 1.1 jmcneill sc->sc_mem_size = 0;
912 1.1 jmcneill }
913 1.1 jmcneill }
914 1.1 jmcneill
915 1.1 jmcneill static int
916 1.1 jmcneill alc_detach(device_t self, int flags)
917 1.1 jmcneill {
918 1.1 jmcneill struct alc_softc *sc = device_private(self);
919 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
920 1.1 jmcneill int s;
921 1.1 jmcneill
922 1.1 jmcneill s = splnet();
923 1.1 jmcneill alc_stop(ifp, 0);
924 1.1 jmcneill splx(s);
925 1.1 jmcneill
926 1.1 jmcneill mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
927 1.1 jmcneill
928 1.1 jmcneill /* Delete all remaining media. */
929 1.1 jmcneill ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
930 1.1 jmcneill
931 1.1 jmcneill ether_ifdetach(ifp);
932 1.1 jmcneill if_detach(ifp);
933 1.1 jmcneill alc_dma_free(sc);
934 1.1 jmcneill
935 1.1 jmcneill alc_phy_down(sc);
936 1.1 jmcneill if (sc->sc_irq_handle != NULL) {
937 1.1 jmcneill pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
938 1.1 jmcneill sc->sc_irq_handle = NULL;
939 1.1 jmcneill }
940 1.1 jmcneill if (sc->sc_mem_size) {
941 1.1 jmcneill bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
942 1.1 jmcneill sc->sc_mem_size = 0;
943 1.1 jmcneill }
944 1.1 jmcneill
945 1.1 jmcneill return (0);
946 1.1 jmcneill }
947 1.1 jmcneill
948 1.1 jmcneill static int
949 1.1 jmcneill alc_dma_alloc(struct alc_softc *sc)
950 1.1 jmcneill {
951 1.1 jmcneill struct alc_txdesc *txd;
952 1.1 jmcneill struct alc_rxdesc *rxd;
953 1.1 jmcneill int nsegs, error, i;
954 1.1 jmcneill
955 1.1 jmcneill /*
956 1.1 jmcneill * Create DMA stuffs for TX ring
957 1.1 jmcneill */
958 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TX_RING_SZ, 1,
959 1.1 jmcneill ALC_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_tx_ring_map);
960 1.1 jmcneill if (error) {
961 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
962 1.1 jmcneill return (ENOBUFS);
963 1.1 jmcneill }
964 1.1 jmcneill
965 1.1 jmcneill /* Allocate DMA'able memory for TX ring */
966 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_TX_RING_SZ,
967 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_tx_ring_seg, 1,
968 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
969 1.1 jmcneill if (error) {
970 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Tx ring.\n",
971 1.1 jmcneill device_xname(sc->sc_dev));
972 1.1 jmcneill return error;
973 1.1 jmcneill }
974 1.1 jmcneill
975 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_tx_ring_seg,
976 1.1 jmcneill nsegs, ALC_TX_RING_SZ, (void **)&sc->alc_rdata.alc_tx_ring,
977 1.1 jmcneill BUS_DMA_NOWAIT);
978 1.1 jmcneill if (error)
979 1.1 jmcneill return (ENOBUFS);
980 1.1 jmcneill
981 1.1 jmcneill /* Load the DMA map for Tx ring. */
982 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map,
983 1.1 jmcneill sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
984 1.1 jmcneill if (error) {
985 1.1 jmcneill printf("%s: could not load DMA'able memory for Tx ring.\n",
986 1.1 jmcneill device_xname(sc->sc_dev));
987 1.8 christos bus_dmamem_free(sc->sc_dmat,
988 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
989 1.1 jmcneill return error;
990 1.1 jmcneill }
991 1.1 jmcneill
992 1.8 christos sc->alc_rdata.alc_tx_ring_paddr =
993 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_segs[0].ds_addr;
994 1.1 jmcneill
995 1.1 jmcneill /*
996 1.1 jmcneill * Create DMA stuffs for RX ring
997 1.1 jmcneill */
998 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_RX_RING_SZ, 1,
999 1.1 jmcneill ALC_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_ring_map);
1000 1.1 jmcneill if (error)
1001 1.1 jmcneill return (ENOBUFS);
1002 1.8 christos
1003 1.1 jmcneill /* Allocate DMA'able memory for RX ring */
1004 1.1 jmcneill error = bus_dmamem_alloc(sc->sc_dmat, ALC_RX_RING_SZ,
1005 1.1 jmcneill ETHER_ALIGN, 0, &sc->alc_rdata.alc_rx_ring_seg, 1,
1006 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1007 1.1 jmcneill if (error) {
1008 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx ring.\n",
1009 1.1 jmcneill device_xname(sc->sc_dev));
1010 1.1 jmcneill return error;
1011 1.1 jmcneill }
1012 1.1 jmcneill
1013 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rx_ring_seg,
1014 1.1 jmcneill nsegs, ALC_RX_RING_SZ, (void **)&sc->alc_rdata.alc_rx_ring,
1015 1.1 jmcneill BUS_DMA_NOWAIT);
1016 1.1 jmcneill if (error)
1017 1.1 jmcneill return (ENOBUFS);
1018 1.1 jmcneill
1019 1.1 jmcneill /* Load the DMA map for Rx ring. */
1020 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map,
1021 1.1 jmcneill sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
1022 1.1 jmcneill if (error) {
1023 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx ring.\n",
1024 1.1 jmcneill device_xname(sc->sc_dev));
1025 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1026 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1027 1.1 jmcneill return error;
1028 1.1 jmcneill }
1029 1.1 jmcneill
1030 1.1 jmcneill sc->alc_rdata.alc_rx_ring_paddr =
1031 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_segs[0].ds_addr;
1032 1.1 jmcneill
1033 1.1 jmcneill /*
1034 1.1 jmcneill * Create DMA stuffs for RX return ring
1035 1.1 jmcneill */
1036 1.8 christos error = bus_dmamap_create(sc->sc_dmat, ALC_RR_RING_SZ, 1,
1037 1.1 jmcneill ALC_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rr_ring_map);
1038 1.1 jmcneill if (error)
1039 1.1 jmcneill return (ENOBUFS);
1040 1.1 jmcneill
1041 1.1 jmcneill /* Allocate DMA'able memory for RX return ring */
1042 1.8 christos error = bus_dmamem_alloc(sc->sc_dmat, ALC_RR_RING_SZ,
1043 1.8 christos ETHER_ALIGN, 0, &sc->alc_rdata.alc_rr_ring_seg, 1,
1044 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1045 1.1 jmcneill if (error) {
1046 1.1 jmcneill printf("%s: could not allocate DMA'able memory for Rx "
1047 1.1 jmcneill "return ring.\n", device_xname(sc->sc_dev));
1048 1.1 jmcneill return error;
1049 1.1 jmcneill }
1050 1.1 jmcneill
1051 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_rr_ring_seg,
1052 1.1 jmcneill nsegs, ALC_RR_RING_SZ, (void **)&sc->alc_rdata.alc_rr_ring,
1053 1.1 jmcneill BUS_DMA_NOWAIT);
1054 1.1 jmcneill if (error)
1055 1.1 jmcneill return (ENOBUFS);
1056 1.1 jmcneill
1057 1.1 jmcneill /* Load the DMA map for Rx return ring. */
1058 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map,
1059 1.1 jmcneill sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
1060 1.1 jmcneill if (error) {
1061 1.1 jmcneill printf("%s: could not load DMA'able memory for Rx return ring."
1062 1.1 jmcneill "\n", device_xname(sc->sc_dev));
1063 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1064 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1065 1.1 jmcneill return error;
1066 1.1 jmcneill }
1067 1.1 jmcneill
1068 1.8 christos sc->alc_rdata.alc_rr_ring_paddr =
1069 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_segs[0].ds_addr;
1070 1.1 jmcneill
1071 1.1 jmcneill /*
1072 1.8 christos * Create DMA stuffs for CMB block
1073 1.1 jmcneill */
1074 1.8 christos error = bus_dmamap_create(sc->sc_dmat, ALC_CMB_SZ, 1,
1075 1.8 christos ALC_CMB_SZ, 0, BUS_DMA_NOWAIT,
1076 1.1 jmcneill &sc->alc_cdata.alc_cmb_map);
1077 1.8 christos if (error)
1078 1.1 jmcneill return (ENOBUFS);
1079 1.1 jmcneill
1080 1.1 jmcneill /* Allocate DMA'able memory for CMB block */
1081 1.8 christos error = bus_dmamem_alloc(sc->sc_dmat, ALC_CMB_SZ,
1082 1.8 christos ETHER_ALIGN, 0, &sc->alc_rdata.alc_cmb_seg, 1,
1083 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1084 1.1 jmcneill if (error) {
1085 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
1086 1.1 jmcneill "CMB block\n", device_xname(sc->sc_dev));
1087 1.1 jmcneill return error;
1088 1.1 jmcneill }
1089 1.1 jmcneill
1090 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_cmb_seg,
1091 1.1 jmcneill nsegs, ALC_CMB_SZ, (void **)&sc->alc_rdata.alc_cmb,
1092 1.1 jmcneill BUS_DMA_NOWAIT);
1093 1.1 jmcneill if (error)
1094 1.1 jmcneill return (ENOBUFS);
1095 1.1 jmcneill
1096 1.1 jmcneill /* Load the DMA map for CMB block. */
1097 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_cmb_map,
1098 1.8 christos sc->alc_rdata.alc_cmb, ALC_CMB_SZ, NULL,
1099 1.1 jmcneill BUS_DMA_WAITOK);
1100 1.1 jmcneill if (error) {
1101 1.1 jmcneill printf("%s: could not load DMA'able memory for CMB block\n",
1102 1.1 jmcneill device_xname(sc->sc_dev));
1103 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1104 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1105 1.1 jmcneill return error;
1106 1.1 jmcneill }
1107 1.1 jmcneill
1108 1.8 christos sc->alc_rdata.alc_cmb_paddr =
1109 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_segs[0].ds_addr;
1110 1.1 jmcneill
1111 1.1 jmcneill /*
1112 1.1 jmcneill * Create DMA stuffs for SMB block
1113 1.1 jmcneill */
1114 1.8 christos error = bus_dmamap_create(sc->sc_dmat, ALC_SMB_SZ, 1,
1115 1.8 christos ALC_SMB_SZ, 0, BUS_DMA_NOWAIT,
1116 1.1 jmcneill &sc->alc_cdata.alc_smb_map);
1117 1.1 jmcneill if (error)
1118 1.1 jmcneill return (ENOBUFS);
1119 1.1 jmcneill
1120 1.1 jmcneill /* Allocate DMA'able memory for SMB block */
1121 1.8 christos error = bus_dmamem_alloc(sc->sc_dmat, ALC_SMB_SZ,
1122 1.8 christos ETHER_ALIGN, 0, &sc->alc_rdata.alc_smb_seg, 1,
1123 1.1 jmcneill &nsegs, BUS_DMA_NOWAIT);
1124 1.1 jmcneill if (error) {
1125 1.1 jmcneill printf("%s: could not allocate DMA'able memory for "
1126 1.1 jmcneill "SMB block\n", device_xname(sc->sc_dev));
1127 1.1 jmcneill return error;
1128 1.1 jmcneill }
1129 1.1 jmcneill
1130 1.1 jmcneill error = bus_dmamem_map(sc->sc_dmat, &sc->alc_rdata.alc_smb_seg,
1131 1.1 jmcneill nsegs, ALC_SMB_SZ, (void **)&sc->alc_rdata.alc_smb,
1132 1.1 jmcneill BUS_DMA_NOWAIT);
1133 1.1 jmcneill if (error)
1134 1.1 jmcneill return (ENOBUFS);
1135 1.1 jmcneill
1136 1.1 jmcneill /* Load the DMA map for SMB block */
1137 1.1 jmcneill error = bus_dmamap_load(sc->sc_dmat, sc->alc_cdata.alc_smb_map,
1138 1.8 christos sc->alc_rdata.alc_smb, ALC_SMB_SZ, NULL,
1139 1.1 jmcneill BUS_DMA_WAITOK);
1140 1.1 jmcneill if (error) {
1141 1.1 jmcneill printf("%s: could not load DMA'able memory for SMB block\n",
1142 1.1 jmcneill device_xname(sc->sc_dev));
1143 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1144 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1145 1.1 jmcneill return error;
1146 1.1 jmcneill }
1147 1.1 jmcneill
1148 1.8 christos sc->alc_rdata.alc_smb_paddr =
1149 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_segs[0].ds_addr;
1150 1.1 jmcneill
1151 1.1 jmcneill
1152 1.1 jmcneill /* Create DMA maps for Tx buffers. */
1153 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
1154 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
1155 1.1 jmcneill txd->tx_m = NULL;
1156 1.1 jmcneill txd->tx_dmamap = NULL;
1157 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, ALC_TSO_MAXSIZE,
1158 1.1 jmcneill ALC_MAXTXSEGS, ALC_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
1159 1.1 jmcneill &txd->tx_dmamap);
1160 1.1 jmcneill if (error) {
1161 1.1 jmcneill printf("%s: could not create Tx dmamap.\n",
1162 1.1 jmcneill device_xname(sc->sc_dev));
1163 1.1 jmcneill return error;
1164 1.1 jmcneill }
1165 1.1 jmcneill }
1166 1.1 jmcneill
1167 1.1 jmcneill /* Create DMA maps for Rx buffers. */
1168 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
1169 1.1 jmcneill BUS_DMA_NOWAIT, &sc->alc_cdata.alc_rx_sparemap);
1170 1.1 jmcneill if (error) {
1171 1.1 jmcneill printf("%s: could not create spare Rx dmamap.\n",
1172 1.1 jmcneill device_xname(sc->sc_dev));
1173 1.1 jmcneill return error;
1174 1.1 jmcneill }
1175 1.1 jmcneill
1176 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
1177 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
1178 1.1 jmcneill rxd->rx_m = NULL;
1179 1.1 jmcneill rxd->rx_dmamap = NULL;
1180 1.1 jmcneill error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1181 1.1 jmcneill MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
1182 1.1 jmcneill if (error) {
1183 1.1 jmcneill printf("%s: could not create Rx dmamap.\n",
1184 1.1 jmcneill device_xname(sc->sc_dev));
1185 1.1 jmcneill return error;
1186 1.1 jmcneill }
1187 1.1 jmcneill }
1188 1.1 jmcneill
1189 1.1 jmcneill return (0);
1190 1.1 jmcneill }
1191 1.1 jmcneill
1192 1.1 jmcneill
1193 1.1 jmcneill static void
1194 1.1 jmcneill alc_dma_free(struct alc_softc *sc)
1195 1.1 jmcneill {
1196 1.1 jmcneill struct alc_txdesc *txd;
1197 1.1 jmcneill struct alc_rxdesc *rxd;
1198 1.1 jmcneill int i;
1199 1.1 jmcneill
1200 1.1 jmcneill /* Tx buffers */
1201 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
1202 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
1203 1.1 jmcneill if (txd->tx_dmamap != NULL) {
1204 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
1205 1.1 jmcneill txd->tx_dmamap = NULL;
1206 1.1 jmcneill }
1207 1.1 jmcneill }
1208 1.1 jmcneill /* Rx buffers */
1209 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
1210 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
1211 1.1 jmcneill if (rxd->rx_dmamap != NULL) {
1212 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
1213 1.1 jmcneill rxd->rx_dmamap = NULL;
1214 1.1 jmcneill }
1215 1.1 jmcneill }
1216 1.1 jmcneill if (sc->alc_cdata.alc_rx_sparemap != NULL) {
1217 1.1 jmcneill bus_dmamap_destroy(sc->sc_dmat, sc->alc_cdata.alc_rx_sparemap);
1218 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = NULL;
1219 1.1 jmcneill }
1220 1.1 jmcneill
1221 1.1 jmcneill /* Tx ring. */
1222 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL)
1223 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map);
1224 1.1 jmcneill if (sc->alc_cdata.alc_tx_ring_map != NULL &&
1225 1.1 jmcneill sc->alc_rdata.alc_tx_ring != NULL)
1226 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1227 1.1 jmcneill &sc->alc_rdata.alc_tx_ring_seg, 1);
1228 1.1 jmcneill sc->alc_rdata.alc_tx_ring = NULL;
1229 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map = NULL;
1230 1.1 jmcneill
1231 1.1 jmcneill /* Rx ring. */
1232 1.8 christos if (sc->alc_cdata.alc_rx_ring_map != NULL)
1233 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map);
1234 1.1 jmcneill if (sc->alc_cdata.alc_rx_ring_map != NULL &&
1235 1.1 jmcneill sc->alc_rdata.alc_rx_ring != NULL)
1236 1.8 christos bus_dmamem_free(sc->sc_dmat,
1237 1.1 jmcneill &sc->alc_rdata.alc_rx_ring_seg, 1);
1238 1.1 jmcneill sc->alc_rdata.alc_rx_ring = NULL;
1239 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map = NULL;
1240 1.1 jmcneill
1241 1.1 jmcneill /* Rx return ring. */
1242 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL)
1243 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map);
1244 1.1 jmcneill if (sc->alc_cdata.alc_rr_ring_map != NULL &&
1245 1.1 jmcneill sc->alc_rdata.alc_rr_ring != NULL)
1246 1.8 christos bus_dmamem_free(sc->sc_dmat,
1247 1.1 jmcneill &sc->alc_rdata.alc_rr_ring_seg, 1);
1248 1.1 jmcneill sc->alc_rdata.alc_rr_ring = NULL;
1249 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map = NULL;
1250 1.1 jmcneill
1251 1.1 jmcneill /* CMB block */
1252 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL)
1253 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_cmb_map);
1254 1.1 jmcneill if (sc->alc_cdata.alc_cmb_map != NULL &&
1255 1.1 jmcneill sc->alc_rdata.alc_cmb != NULL)
1256 1.1 jmcneill bus_dmamem_free(sc->sc_dmat,
1257 1.1 jmcneill &sc->alc_rdata.alc_cmb_seg, 1);
1258 1.1 jmcneill sc->alc_rdata.alc_cmb = NULL;
1259 1.1 jmcneill sc->alc_cdata.alc_cmb_map = NULL;
1260 1.1 jmcneill
1261 1.1 jmcneill /* SMB block */
1262 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL)
1263 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, sc->alc_cdata.alc_smb_map);
1264 1.1 jmcneill if (sc->alc_cdata.alc_smb_map != NULL &&
1265 1.1 jmcneill sc->alc_rdata.alc_smb != NULL)
1266 1.8 christos bus_dmamem_free(sc->sc_dmat,
1267 1.1 jmcneill &sc->alc_rdata.alc_smb_seg, 1);
1268 1.1 jmcneill sc->alc_rdata.alc_smb = NULL;
1269 1.1 jmcneill sc->alc_cdata.alc_smb_map = NULL;
1270 1.1 jmcneill }
1271 1.1 jmcneill
1272 1.1 jmcneill static int
1273 1.1 jmcneill alc_encap(struct alc_softc *sc, struct mbuf **m_head)
1274 1.1 jmcneill {
1275 1.1 jmcneill struct alc_txdesc *txd, *txd_last;
1276 1.1 jmcneill struct tx_desc *desc;
1277 1.1 jmcneill struct mbuf *m;
1278 1.1 jmcneill bus_dmamap_t map;
1279 1.1 jmcneill uint32_t cflags, poff, vtag;
1280 1.1 jmcneill int error, idx, nsegs, prod;
1281 1.1 jmcneill #if NVLAN > 0
1282 1.1 jmcneill struct m_tag *mtag;
1283 1.1 jmcneill #endif
1284 1.1 jmcneill
1285 1.1 jmcneill m = *m_head;
1286 1.1 jmcneill cflags = vtag = 0;
1287 1.1 jmcneill poff = 0;
1288 1.1 jmcneill
1289 1.1 jmcneill prod = sc->alc_cdata.alc_tx_prod;
1290 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1291 1.1 jmcneill txd_last = txd;
1292 1.1 jmcneill map = txd->tx_dmamap;
1293 1.1 jmcneill
1294 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1295 1.1 jmcneill
1296 1.1 jmcneill if (error == EFBIG) {
1297 1.1 jmcneill error = 0;
1298 1.1 jmcneill
1299 1.1 jmcneill *m_head = m_pullup(*m_head, MHLEN);
1300 1.1 jmcneill if (*m_head == NULL) {
1301 1.1 jmcneill printf("%s: can't defrag TX mbuf\n",
1302 1.1 jmcneill device_xname(sc->sc_dev));
1303 1.1 jmcneill return ENOBUFS;
1304 1.1 jmcneill }
1305 1.1 jmcneill
1306 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1307 1.1 jmcneill BUS_DMA_NOWAIT);
1308 1.1 jmcneill
1309 1.1 jmcneill if (error != 0) {
1310 1.1 jmcneill printf("%s: could not load defragged TX mbuf\n",
1311 1.1 jmcneill device_xname(sc->sc_dev));
1312 1.1 jmcneill m_freem(*m_head);
1313 1.1 jmcneill *m_head = NULL;
1314 1.1 jmcneill return error;
1315 1.1 jmcneill }
1316 1.1 jmcneill } else if (error) {
1317 1.1 jmcneill printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1318 1.1 jmcneill return (error);
1319 1.1 jmcneill }
1320 1.1 jmcneill
1321 1.1 jmcneill nsegs = map->dm_nsegs;
1322 1.1 jmcneill
1323 1.1 jmcneill if (nsegs == 0) {
1324 1.1 jmcneill m_freem(*m_head);
1325 1.1 jmcneill *m_head = NULL;
1326 1.1 jmcneill return (EIO);
1327 1.1 jmcneill }
1328 1.1 jmcneill
1329 1.1 jmcneill /* Check descriptor overrun. */
1330 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) {
1331 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, map);
1332 1.1 jmcneill return (ENOBUFS);
1333 1.1 jmcneill }
1334 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1335 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1336 1.1 jmcneill
1337 1.1 jmcneill m = *m_head;
1338 1.1 jmcneill desc = NULL;
1339 1.1 jmcneill idx = 0;
1340 1.1 jmcneill #if NVLAN > 0
1341 1.1 jmcneill /* Configure VLAN hardware tag insertion. */
1342 1.1 jmcneill if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1343 1.1 jmcneill vtag = htons(VLAN_TAG_VALUE(mtag));
1344 1.1 jmcneill vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK;
1345 1.1 jmcneill cflags |= TD_INS_VLAN_TAG;
1346 1.1 jmcneill }
1347 1.1 jmcneill #endif
1348 1.1 jmcneill /* Configure Tx checksum offload. */
1349 1.1 jmcneill if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) {
1350 1.1 jmcneill cflags |= TD_CUSTOM_CSUM;
1351 1.1 jmcneill /* Set checksum start offset. */
1352 1.1 jmcneill cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) &
1353 1.1 jmcneill TD_PLOAD_OFFSET_MASK;
1354 1.8 christos }
1355 1.1 jmcneill for (; idx < nsegs; idx++) {
1356 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1357 1.1 jmcneill desc->len =
1358 1.1 jmcneill htole32(TX_BYTES(map->dm_segs[idx].ds_len) | vtag);
1359 1.1 jmcneill desc->flags = htole32(cflags);
1360 1.1 jmcneill desc->addr = htole64(map->dm_segs[idx].ds_addr);
1361 1.1 jmcneill sc->alc_cdata.alc_tx_cnt++;
1362 1.1 jmcneill ALC_DESC_INC(prod, ALC_TX_RING_CNT);
1363 1.1 jmcneill }
1364 1.1 jmcneill /* Update producer index. */
1365 1.1 jmcneill sc->alc_cdata.alc_tx_prod = prod;
1366 1.1 jmcneill
1367 1.1 jmcneill /* Finally set EOP on the last descriptor. */
1368 1.1 jmcneill prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT;
1369 1.1 jmcneill desc = &sc->alc_rdata.alc_tx_ring[prod];
1370 1.1 jmcneill desc->flags |= htole32(TD_EOP);
1371 1.1 jmcneill
1372 1.1 jmcneill /* Swap dmamap of the first and the last. */
1373 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[prod];
1374 1.1 jmcneill map = txd_last->tx_dmamap;
1375 1.1 jmcneill txd_last->tx_dmamap = txd->tx_dmamap;
1376 1.1 jmcneill txd->tx_dmamap = map;
1377 1.1 jmcneill txd->tx_m = m;
1378 1.1 jmcneill
1379 1.1 jmcneill return (0);
1380 1.1 jmcneill }
1381 1.1 jmcneill
1382 1.1 jmcneill static void
1383 1.1 jmcneill alc_start(struct ifnet *ifp)
1384 1.1 jmcneill {
1385 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1386 1.1 jmcneill struct mbuf *m_head;
1387 1.1 jmcneill int enq;
1388 1.1 jmcneill
1389 1.1 jmcneill if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1390 1.1 jmcneill return;
1391 1.1 jmcneill
1392 1.1 jmcneill /* Reclaim transmitted frames. */
1393 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT)
1394 1.1 jmcneill alc_txeof(sc);
1395 1.1 jmcneill
1396 1.1 jmcneill enq = 0;
1397 1.1 jmcneill for (;;) {
1398 1.1 jmcneill IFQ_DEQUEUE(&ifp->if_snd, m_head);
1399 1.1 jmcneill if (m_head == NULL)
1400 1.1 jmcneill break;
1401 1.1 jmcneill
1402 1.1 jmcneill /*
1403 1.1 jmcneill * Pack the data into the transmit ring. If we
1404 1.1 jmcneill * don't have room, set the OACTIVE flag and wait
1405 1.1 jmcneill * for the NIC to drain the ring.
1406 1.1 jmcneill */
1407 1.1 jmcneill if (alc_encap(sc, &m_head)) {
1408 1.1 jmcneill if (m_head == NULL)
1409 1.1 jmcneill break;
1410 1.1 jmcneill ifp->if_flags |= IFF_OACTIVE;
1411 1.1 jmcneill break;
1412 1.1 jmcneill }
1413 1.1 jmcneill enq = 1;
1414 1.8 christos
1415 1.1 jmcneill /*
1416 1.1 jmcneill * If there's a BPF listener, bounce a copy of this frame
1417 1.1 jmcneill * to him.
1418 1.1 jmcneill */
1419 1.1 jmcneill bpf_mtap(ifp, m_head);
1420 1.1 jmcneill }
1421 1.1 jmcneill
1422 1.1 jmcneill if (enq) {
1423 1.1 jmcneill /* Sync descriptors. */
1424 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1425 1.8 christos sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1426 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1427 1.1 jmcneill /* Kick. Assume we're using normal Tx priority queue. */
1428 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX,
1429 1.1 jmcneill (sc->alc_cdata.alc_tx_prod <<
1430 1.1 jmcneill MBOX_TD_PROD_LO_IDX_SHIFT) &
1431 1.1 jmcneill MBOX_TD_PROD_LO_IDX_MASK);
1432 1.1 jmcneill /* Set a timeout in case the chip goes out to lunch. */
1433 1.1 jmcneill ifp->if_timer = ALC_TX_TIMEOUT;
1434 1.1 jmcneill }
1435 1.1 jmcneill }
1436 1.1 jmcneill
1437 1.1 jmcneill static void
1438 1.1 jmcneill alc_watchdog(struct ifnet *ifp)
1439 1.1 jmcneill {
1440 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1441 1.1 jmcneill
1442 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_LINK) == 0) {
1443 1.1 jmcneill printf("%s: watchdog timeout (missed link)\n",
1444 1.1 jmcneill device_xname(sc->sc_dev));
1445 1.1 jmcneill ifp->if_oerrors++;
1446 1.7 mrg alc_init_backend(ifp, false);
1447 1.1 jmcneill return;
1448 1.1 jmcneill }
1449 1.1 jmcneill
1450 1.1 jmcneill printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1451 1.1 jmcneill ifp->if_oerrors++;
1452 1.7 mrg alc_init_backend(ifp, false);
1453 1.1 jmcneill
1454 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1455 1.1 jmcneill alc_start(ifp);
1456 1.1 jmcneill }
1457 1.1 jmcneill
1458 1.1 jmcneill static int
1459 1.1 jmcneill alc_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1460 1.1 jmcneill {
1461 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
1462 1.1 jmcneill int s, error = 0;
1463 1.1 jmcneill
1464 1.1 jmcneill s = splnet();
1465 1.1 jmcneill
1466 1.1 jmcneill error = ether_ioctl(ifp, cmd, data);
1467 1.1 jmcneill if (error == ENETRESET) {
1468 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING)
1469 1.1 jmcneill alc_iff(sc);
1470 1.1 jmcneill error = 0;
1471 1.1 jmcneill }
1472 1.1 jmcneill
1473 1.1 jmcneill splx(s);
1474 1.1 jmcneill return (error);
1475 1.1 jmcneill }
1476 1.1 jmcneill
1477 1.1 jmcneill static void
1478 1.1 jmcneill alc_mac_config(struct alc_softc *sc)
1479 1.1 jmcneill {
1480 1.1 jmcneill struct mii_data *mii;
1481 1.1 jmcneill uint32_t reg;
1482 1.1 jmcneill
1483 1.1 jmcneill mii = &sc->sc_miibus;
1484 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
1485 1.1 jmcneill reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC |
1486 1.1 jmcneill MAC_CFG_SPEED_MASK);
1487 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
1488 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
1489 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
1490 1.2 jmcneill reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
1491 1.1 jmcneill /* Reprogram MAC with resolved speed/duplex. */
1492 1.1 jmcneill switch (IFM_SUBTYPE(mii->mii_media_active)) {
1493 1.1 jmcneill case IFM_10_T:
1494 1.1 jmcneill case IFM_100_TX:
1495 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
1496 1.1 jmcneill break;
1497 1.1 jmcneill case IFM_1000_T:
1498 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
1499 1.1 jmcneill break;
1500 1.1 jmcneill }
1501 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1502 1.1 jmcneill reg |= MAC_CFG_FULL_DUPLEX;
1503 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1504 1.1 jmcneill reg |= MAC_CFG_TX_FC;
1505 1.1 jmcneill if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1506 1.1 jmcneill reg |= MAC_CFG_RX_FC;
1507 1.1 jmcneill }
1508 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
1509 1.1 jmcneill }
1510 1.1 jmcneill
1511 1.1 jmcneill static void
1512 1.1 jmcneill alc_stats_clear(struct alc_softc *sc)
1513 1.1 jmcneill {
1514 1.1 jmcneill struct smb sb, *smb;
1515 1.1 jmcneill uint32_t *reg;
1516 1.1 jmcneill int i;
1517 1.1 jmcneill
1518 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1519 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1520 1.8 christos sc->alc_cdata.alc_smb_map->dm_mapsize,
1521 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1522 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1523 1.1 jmcneill /* Update done, clear. */
1524 1.1 jmcneill smb->updated = 0;
1525 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1526 1.8 christos sc->alc_cdata.alc_smb_map->dm_mapsize,
1527 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1528 1.1 jmcneill } else {
1529 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1530 1.1 jmcneill reg++) {
1531 1.1 jmcneill CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1532 1.1 jmcneill i += sizeof(uint32_t);
1533 1.1 jmcneill }
1534 1.1 jmcneill /* Read Tx statistics. */
1535 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1536 1.1 jmcneill reg++) {
1537 1.1 jmcneill CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1538 1.1 jmcneill i += sizeof(uint32_t);
1539 1.1 jmcneill }
1540 1.1 jmcneill }
1541 1.1 jmcneill }
1542 1.1 jmcneill
1543 1.1 jmcneill static void
1544 1.1 jmcneill alc_stats_update(struct alc_softc *sc)
1545 1.1 jmcneill {
1546 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1547 1.1 jmcneill struct alc_hw_stats *stat;
1548 1.1 jmcneill struct smb sb, *smb;
1549 1.1 jmcneill uint32_t *reg;
1550 1.1 jmcneill int i;
1551 1.1 jmcneill
1552 1.1 jmcneill stat = &sc->alc_stats;
1553 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1554 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1555 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize,
1556 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1557 1.1 jmcneill smb = sc->alc_rdata.alc_smb;
1558 1.1 jmcneill if (smb->updated == 0)
1559 1.1 jmcneill return;
1560 1.1 jmcneill } else {
1561 1.1 jmcneill smb = &sb;
1562 1.1 jmcneill /* Read Rx statistics. */
1563 1.1 jmcneill for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered;
1564 1.1 jmcneill reg++) {
1565 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i);
1566 1.1 jmcneill i += sizeof(uint32_t);
1567 1.1 jmcneill }
1568 1.1 jmcneill /* Read Tx statistics. */
1569 1.1 jmcneill for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes;
1570 1.1 jmcneill reg++) {
1571 1.1 jmcneill *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i);
1572 1.1 jmcneill i += sizeof(uint32_t);
1573 1.1 jmcneill }
1574 1.1 jmcneill }
1575 1.1 jmcneill
1576 1.1 jmcneill /* Rx stats. */
1577 1.1 jmcneill stat->rx_frames += smb->rx_frames;
1578 1.1 jmcneill stat->rx_bcast_frames += smb->rx_bcast_frames;
1579 1.1 jmcneill stat->rx_mcast_frames += smb->rx_mcast_frames;
1580 1.1 jmcneill stat->rx_pause_frames += smb->rx_pause_frames;
1581 1.1 jmcneill stat->rx_control_frames += smb->rx_control_frames;
1582 1.1 jmcneill stat->rx_crcerrs += smb->rx_crcerrs;
1583 1.1 jmcneill stat->rx_lenerrs += smb->rx_lenerrs;
1584 1.1 jmcneill stat->rx_bytes += smb->rx_bytes;
1585 1.1 jmcneill stat->rx_runts += smb->rx_runts;
1586 1.1 jmcneill stat->rx_fragments += smb->rx_fragments;
1587 1.1 jmcneill stat->rx_pkts_64 += smb->rx_pkts_64;
1588 1.1 jmcneill stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1589 1.1 jmcneill stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1590 1.1 jmcneill stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1591 1.1 jmcneill stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1592 1.1 jmcneill stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1593 1.1 jmcneill stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1594 1.1 jmcneill stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1595 1.1 jmcneill stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1596 1.1 jmcneill stat->rx_rrs_errs += smb->rx_rrs_errs;
1597 1.1 jmcneill stat->rx_alignerrs += smb->rx_alignerrs;
1598 1.1 jmcneill stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1599 1.1 jmcneill stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1600 1.1 jmcneill stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1601 1.1 jmcneill
1602 1.1 jmcneill /* Tx stats. */
1603 1.1 jmcneill stat->tx_frames += smb->tx_frames;
1604 1.1 jmcneill stat->tx_bcast_frames += smb->tx_bcast_frames;
1605 1.1 jmcneill stat->tx_mcast_frames += smb->tx_mcast_frames;
1606 1.1 jmcneill stat->tx_pause_frames += smb->tx_pause_frames;
1607 1.1 jmcneill stat->tx_excess_defer += smb->tx_excess_defer;
1608 1.1 jmcneill stat->tx_control_frames += smb->tx_control_frames;
1609 1.1 jmcneill stat->tx_deferred += smb->tx_deferred;
1610 1.1 jmcneill stat->tx_bytes += smb->tx_bytes;
1611 1.1 jmcneill stat->tx_pkts_64 += smb->tx_pkts_64;
1612 1.1 jmcneill stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
1613 1.1 jmcneill stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
1614 1.1 jmcneill stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
1615 1.1 jmcneill stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
1616 1.1 jmcneill stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
1617 1.1 jmcneill stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
1618 1.1 jmcneill stat->tx_single_colls += smb->tx_single_colls;
1619 1.1 jmcneill stat->tx_multi_colls += smb->tx_multi_colls;
1620 1.1 jmcneill stat->tx_late_colls += smb->tx_late_colls;
1621 1.1 jmcneill stat->tx_excess_colls += smb->tx_excess_colls;
1622 1.1 jmcneill stat->tx_abort += smb->tx_abort;
1623 1.1 jmcneill stat->tx_underrun += smb->tx_underrun;
1624 1.1 jmcneill stat->tx_desc_underrun += smb->tx_desc_underrun;
1625 1.1 jmcneill stat->tx_lenerrs += smb->tx_lenerrs;
1626 1.1 jmcneill stat->tx_pkts_truncated += smb->tx_pkts_truncated;
1627 1.1 jmcneill stat->tx_bcast_bytes += smb->tx_bcast_bytes;
1628 1.1 jmcneill stat->tx_mcast_bytes += smb->tx_mcast_bytes;
1629 1.1 jmcneill
1630 1.1 jmcneill /* Update counters in ifnet. */
1631 1.1 jmcneill ifp->if_opackets += smb->tx_frames;
1632 1.1 jmcneill
1633 1.1 jmcneill ifp->if_collisions += smb->tx_single_colls +
1634 1.1 jmcneill smb->tx_multi_colls * 2 + smb->tx_late_colls +
1635 1.1 jmcneill smb->tx_abort * HDPX_CFG_RETRY_DEFAULT;
1636 1.1 jmcneill
1637 1.1 jmcneill /*
1638 1.1 jmcneill * XXX
1639 1.1 jmcneill * tx_pkts_truncated counter looks suspicious. It constantly
1640 1.1 jmcneill * increments with no sign of Tx errors. This may indicate
1641 1.1 jmcneill * the counter name is not correct one so I've removed the
1642 1.1 jmcneill * counter in output errors.
1643 1.1 jmcneill */
1644 1.1 jmcneill ifp->if_oerrors += smb->tx_abort + smb->tx_late_colls +
1645 1.1 jmcneill smb->tx_underrun;
1646 1.1 jmcneill
1647 1.1 jmcneill ifp->if_ipackets += smb->rx_frames;
1648 1.1 jmcneill
1649 1.1 jmcneill ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
1650 1.1 jmcneill smb->rx_runts + smb->rx_pkts_truncated +
1651 1.1 jmcneill smb->rx_fifo_oflows + smb->rx_rrs_errs +
1652 1.1 jmcneill smb->rx_alignerrs;
1653 1.1 jmcneill
1654 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) {
1655 1.1 jmcneill /* Update done, clear. */
1656 1.1 jmcneill smb->updated = 0;
1657 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
1658 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1659 1.1 jmcneill }
1660 1.1 jmcneill }
1661 1.1 jmcneill
1662 1.1 jmcneill static int
1663 1.1 jmcneill alc_intr(void *arg)
1664 1.1 jmcneill {
1665 1.1 jmcneill struct alc_softc *sc = arg;
1666 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1667 1.1 jmcneill uint32_t status;
1668 1.1 jmcneill
1669 1.1 jmcneill status = CSR_READ_4(sc, ALC_INTR_STATUS);
1670 1.1 jmcneill if ((status & ALC_INTRS) == 0)
1671 1.1 jmcneill return (0);
1672 1.1 jmcneill
1673 1.1 jmcneill /* Acknowledge and disable interrupts. */
1674 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT);
1675 1.1 jmcneill
1676 1.1 jmcneill if (ifp->if_flags & IFF_RUNNING) {
1677 1.1 jmcneill if (status & INTR_RX_PKT) {
1678 1.1 jmcneill int error;
1679 1.1 jmcneill
1680 1.1 jmcneill error = alc_rxintr(sc);
1681 1.1 jmcneill if (error) {
1682 1.7 mrg alc_init_backend(ifp, false);
1683 1.1 jmcneill return (0);
1684 1.1 jmcneill }
1685 1.1 jmcneill }
1686 1.1 jmcneill
1687 1.1 jmcneill if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST |
1688 1.1 jmcneill INTR_TXQ_TO_RST)) {
1689 1.1 jmcneill if (status & INTR_DMA_RD_TO_RST)
1690 1.1 jmcneill printf("%s: DMA read error! -- resetting\n",
1691 1.1 jmcneill device_xname(sc->sc_dev));
1692 1.1 jmcneill if (status & INTR_DMA_WR_TO_RST)
1693 1.1 jmcneill printf("%s: DMA write error! -- resetting\n",
1694 1.1 jmcneill device_xname(sc->sc_dev));
1695 1.1 jmcneill if (status & INTR_TXQ_TO_RST)
1696 1.1 jmcneill printf("%s: TxQ reset! -- resetting\n",
1697 1.1 jmcneill device_xname(sc->sc_dev));
1698 1.7 mrg alc_init_backend(ifp, false);
1699 1.1 jmcneill return (0);
1700 1.1 jmcneill }
1701 1.1 jmcneill
1702 1.1 jmcneill alc_txeof(sc);
1703 1.1 jmcneill if (!IFQ_IS_EMPTY(&ifp->if_snd))
1704 1.1 jmcneill alc_start(ifp);
1705 1.1 jmcneill }
1706 1.1 jmcneill
1707 1.1 jmcneill /* Re-enable interrupts. */
1708 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF);
1709 1.1 jmcneill return (1);
1710 1.1 jmcneill }
1711 1.1 jmcneill
1712 1.1 jmcneill static void
1713 1.1 jmcneill alc_txeof(struct alc_softc *sc)
1714 1.1 jmcneill {
1715 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1716 1.1 jmcneill struct alc_txdesc *txd;
1717 1.1 jmcneill uint32_t cons, prod;
1718 1.1 jmcneill int prog;
1719 1.1 jmcneill
1720 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1721 1.1 jmcneill return;
1722 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
1723 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize,
1724 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1725 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) {
1726 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1727 1.8 christos sc->alc_cdata.alc_cmb_map->dm_mapsize,
1728 1.1 jmcneill BUS_DMASYNC_POSTREAD);
1729 1.1 jmcneill prod = sc->alc_rdata.alc_cmb->cons;
1730 1.1 jmcneill } else
1731 1.1 jmcneill prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX);
1732 1.1 jmcneill /* Assume we're using normal Tx priority queue. */
1733 1.1 jmcneill prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >>
1734 1.1 jmcneill MBOX_TD_CONS_LO_IDX_SHIFT;
1735 1.1 jmcneill cons = sc->alc_cdata.alc_tx_cons;
1736 1.1 jmcneill /*
1737 1.1 jmcneill * Go through our Tx list and free mbufs for those
1738 1.1 jmcneill * frames which have been transmitted.
1739 1.1 jmcneill */
1740 1.1 jmcneill for (prog = 0; cons != prod; prog++,
1741 1.1 jmcneill ALC_DESC_INC(cons, ALC_TX_RING_CNT)) {
1742 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt <= 0)
1743 1.1 jmcneill break;
1744 1.1 jmcneill prog++;
1745 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
1746 1.1 jmcneill sc->alc_cdata.alc_tx_cnt--;
1747 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[cons];
1748 1.1 jmcneill if (txd->tx_m != NULL) {
1749 1.1 jmcneill /* Reclaim transmitted mbufs. */
1750 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1751 1.1 jmcneill m_freem(txd->tx_m);
1752 1.1 jmcneill txd->tx_m = NULL;
1753 1.1 jmcneill }
1754 1.1 jmcneill }
1755 1.1 jmcneill
1756 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
1757 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
1758 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1759 1.1 jmcneill sc->alc_cdata.alc_tx_cons = cons;
1760 1.1 jmcneill /*
1761 1.1 jmcneill * Unarm watchdog timer only when there is no pending
1762 1.1 jmcneill * frames in Tx queue.
1763 1.1 jmcneill */
1764 1.1 jmcneill if (sc->alc_cdata.alc_tx_cnt == 0)
1765 1.1 jmcneill ifp->if_timer = 0;
1766 1.1 jmcneill }
1767 1.1 jmcneill
1768 1.1 jmcneill static int
1769 1.7 mrg alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd, bool init)
1770 1.1 jmcneill {
1771 1.1 jmcneill struct mbuf *m;
1772 1.1 jmcneill bus_dmamap_t map;
1773 1.1 jmcneill int error;
1774 1.1 jmcneill
1775 1.1 jmcneill MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
1776 1.1 jmcneill if (m == NULL)
1777 1.1 jmcneill return (ENOBUFS);
1778 1.1 jmcneill MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
1779 1.1 jmcneill if (!(m->m_flags & M_EXT)) {
1780 1.1 jmcneill m_freem(m);
1781 1.1 jmcneill return (ENOBUFS);
1782 1.1 jmcneill }
1783 1.1 jmcneill
1784 1.1 jmcneill m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
1785 1.1 jmcneill
1786 1.1 jmcneill error = bus_dmamap_load_mbuf(sc->sc_dmat,
1787 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap, m, BUS_DMA_NOWAIT);
1788 1.1 jmcneill
1789 1.1 jmcneill if (error != 0) {
1790 1.1 jmcneill if (!error) {
1791 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat,
1792 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap);
1793 1.1 jmcneill error = EFBIG;
1794 1.1 jmcneill printf("%s: too many segments?!\n",
1795 1.1 jmcneill device_xname(sc->sc_dev));
1796 1.1 jmcneill }
1797 1.1 jmcneill m_freem(m);
1798 1.1 jmcneill
1799 1.1 jmcneill if (init)
1800 1.1 jmcneill printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
1801 1.1 jmcneill
1802 1.1 jmcneill return (error);
1803 1.1 jmcneill }
1804 1.1 jmcneill
1805 1.1 jmcneill if (rxd->rx_m != NULL) {
1806 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
1807 1.1 jmcneill rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1808 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1809 1.1 jmcneill }
1810 1.1 jmcneill map = rxd->rx_dmamap;
1811 1.1 jmcneill rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap;
1812 1.1 jmcneill sc->alc_cdata.alc_rx_sparemap = map;
1813 1.1 jmcneill rxd->rx_m = m;
1814 1.1 jmcneill rxd->rx_desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
1815 1.1 jmcneill return (0);
1816 1.1 jmcneill }
1817 1.1 jmcneill
1818 1.1 jmcneill static int
1819 1.1 jmcneill alc_rxintr(struct alc_softc *sc)
1820 1.1 jmcneill {
1821 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1822 1.1 jmcneill struct rx_rdesc *rrd;
1823 1.1 jmcneill uint32_t nsegs, status;
1824 1.1 jmcneill int rr_cons, prog;
1825 1.1 jmcneill
1826 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1827 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1828 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1829 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1830 1.1 jmcneill rr_cons = sc->alc_cdata.alc_rr_cons;
1831 1.1 jmcneill for (prog = 0; (ifp->if_flags & IFF_RUNNING) != 0;) {
1832 1.1 jmcneill rrd = &sc->alc_rdata.alc_rr_ring[rr_cons];
1833 1.1 jmcneill status = le32toh(rrd->status);
1834 1.1 jmcneill if ((status & RRD_VALID) == 0)
1835 1.1 jmcneill break;
1836 1.1 jmcneill nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo));
1837 1.1 jmcneill if (nsegs == 0) {
1838 1.1 jmcneill /* This should not happen! */
1839 1.1 jmcneill if (alcdebug)
1840 1.1 jmcneill printf("%s: unexpected segment count -- "
1841 1.1 jmcneill "resetting\n", device_xname(sc->sc_dev));
1842 1.1 jmcneill return (EIO);
1843 1.1 jmcneill }
1844 1.1 jmcneill alc_rxeof(sc, rrd);
1845 1.1 jmcneill /* Clear Rx return status. */
1846 1.1 jmcneill rrd->status = 0;
1847 1.1 jmcneill ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT);
1848 1.1 jmcneill sc->alc_cdata.alc_rx_cons += nsegs;
1849 1.1 jmcneill sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT;
1850 1.1 jmcneill prog += nsegs;
1851 1.1 jmcneill }
1852 1.1 jmcneill
1853 1.1 jmcneill if (prog > 0) {
1854 1.1 jmcneill /* Update the consumer index. */
1855 1.1 jmcneill sc->alc_cdata.alc_rr_cons = rr_cons;
1856 1.1 jmcneill /* Sync Rx return descriptors. */
1857 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
1858 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize,
1859 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1860 1.1 jmcneill /*
1861 1.1 jmcneill * Sync updated Rx descriptors such that controller see
1862 1.1 jmcneill * modified buffer addresses.
1863 1.1 jmcneill */
1864 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
1865 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize,
1866 1.1 jmcneill BUS_DMASYNC_PREWRITE);
1867 1.1 jmcneill /*
1868 1.1 jmcneill * Let controller know availability of new Rx buffers.
1869 1.1 jmcneill * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors
1870 1.1 jmcneill * it may be possible to update ALC_MBOX_RD0_PROD_IDX
1871 1.1 jmcneill * only when Rx buffer pre-fetching is required. In
1872 1.1 jmcneill * addition we already set ALC_RX_RD_FREE_THRESH to
1873 1.1 jmcneill * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However
1874 1.1 jmcneill * it still seems that pre-fetching needs more
1875 1.1 jmcneill * experimentation.
1876 1.1 jmcneill */
1877 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX,
1878 1.1 jmcneill sc->alc_cdata.alc_rx_cons);
1879 1.1 jmcneill }
1880 1.1 jmcneill
1881 1.1 jmcneill return (0);
1882 1.1 jmcneill }
1883 1.1 jmcneill
1884 1.1 jmcneill /* Receive a frame. */
1885 1.1 jmcneill static void
1886 1.1 jmcneill alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd)
1887 1.1 jmcneill {
1888 1.1 jmcneill struct ifnet *ifp = &sc->sc_ec.ec_if;
1889 1.1 jmcneill struct alc_rxdesc *rxd;
1890 1.1 jmcneill struct mbuf *mp, *m;
1891 1.1 jmcneill uint32_t rdinfo, status;
1892 1.1 jmcneill int count, nsegs, rx_cons;
1893 1.1 jmcneill
1894 1.1 jmcneill status = le32toh(rrd->status);
1895 1.1 jmcneill rdinfo = le32toh(rrd->rdinfo);
1896 1.1 jmcneill rx_cons = RRD_RD_IDX(rdinfo);
1897 1.1 jmcneill nsegs = RRD_RD_CNT(rdinfo);
1898 1.1 jmcneill
1899 1.1 jmcneill sc->alc_cdata.alc_rxlen = RRD_BYTES(status);
1900 1.1 jmcneill if (status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) {
1901 1.1 jmcneill /*
1902 1.1 jmcneill * We want to pass the following frames to upper
1903 1.1 jmcneill * layer regardless of error status of Rx return
1904 1.1 jmcneill * ring.
1905 1.1 jmcneill *
1906 1.1 jmcneill * o IP/TCP/UDP checksum is bad.
1907 1.1 jmcneill * o frame length and protocol specific length
1908 1.1 jmcneill * does not match.
1909 1.1 jmcneill *
1910 1.1 jmcneill * Force network stack compute checksum for
1911 1.1 jmcneill * errored frames.
1912 1.1 jmcneill */
1913 1.1 jmcneill status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK;
1914 1.2 jmcneill if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN |
1915 1.2 jmcneill RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0)
1916 1.1 jmcneill return;
1917 1.1 jmcneill }
1918 1.1 jmcneill
1919 1.1 jmcneill for (count = 0; count < nsegs; count++,
1920 1.1 jmcneill ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) {
1921 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[rx_cons];
1922 1.1 jmcneill mp = rxd->rx_m;
1923 1.1 jmcneill /* Add a new receive buffer to the ring. */
1924 1.7 mrg if (alc_newbuf(sc, rxd, false) != 0) {
1925 1.1 jmcneill ifp->if_iqdrops++;
1926 1.1 jmcneill /* Reuse Rx buffers. */
1927 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
1928 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
1929 1.1 jmcneill break;
1930 1.1 jmcneill }
1931 1.1 jmcneill
1932 1.1 jmcneill /*
1933 1.1 jmcneill * Assume we've received a full sized frame.
1934 1.1 jmcneill * Actual size is fixed when we encounter the end of
1935 1.1 jmcneill * multi-segmented frame.
1936 1.1 jmcneill */
1937 1.1 jmcneill mp->m_len = sc->alc_buf_size;
1938 1.1 jmcneill
1939 1.1 jmcneill /* Chain received mbufs. */
1940 1.1 jmcneill if (sc->alc_cdata.alc_rxhead == NULL) {
1941 1.1 jmcneill sc->alc_cdata.alc_rxhead = mp;
1942 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1943 1.1 jmcneill } else {
1944 1.1 jmcneill mp->m_flags &= ~M_PKTHDR;
1945 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail =
1946 1.1 jmcneill sc->alc_cdata.alc_rxtail;
1947 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = mp;
1948 1.1 jmcneill sc->alc_cdata.alc_rxtail = mp;
1949 1.1 jmcneill }
1950 1.1 jmcneill
1951 1.1 jmcneill if (count == nsegs - 1) {
1952 1.1 jmcneill /* Last desc. for this frame. */
1953 1.1 jmcneill m = sc->alc_cdata.alc_rxhead;
1954 1.1 jmcneill m->m_flags |= M_PKTHDR;
1955 1.1 jmcneill /*
1956 1.1 jmcneill * It seems that L1C/L2C controller has no way
1957 1.1 jmcneill * to tell hardware to strip CRC bytes.
1958 1.1 jmcneill */
1959 1.1 jmcneill m->m_pkthdr.len =
1960 1.1 jmcneill sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN;
1961 1.1 jmcneill if (nsegs > 1) {
1962 1.1 jmcneill /* Set last mbuf size. */
1963 1.1 jmcneill mp->m_len = sc->alc_cdata.alc_rxlen -
1964 1.1 jmcneill (nsegs - 1) * sc->alc_buf_size;
1965 1.1 jmcneill /* Remove the CRC bytes in chained mbufs. */
1966 1.1 jmcneill if (mp->m_len <= ETHER_CRC_LEN) {
1967 1.1 jmcneill sc->alc_cdata.alc_rxtail =
1968 1.1 jmcneill sc->alc_cdata.alc_rxprev_tail;
1969 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_len -=
1970 1.1 jmcneill (ETHER_CRC_LEN - mp->m_len);
1971 1.1 jmcneill sc->alc_cdata.alc_rxtail->m_next = NULL;
1972 1.1 jmcneill m_freem(mp);
1973 1.1 jmcneill } else {
1974 1.1 jmcneill mp->m_len -= ETHER_CRC_LEN;
1975 1.1 jmcneill }
1976 1.1 jmcneill } else
1977 1.1 jmcneill m->m_len = m->m_pkthdr.len;
1978 1.1 jmcneill m->m_pkthdr.rcvif = ifp;
1979 1.1 jmcneill #if NVLAN > 0
1980 1.1 jmcneill /*
1981 1.1 jmcneill * Due to hardware bugs, Rx checksum offloading
1982 1.1 jmcneill * was intentionally disabled.
1983 1.1 jmcneill */
1984 1.1 jmcneill if (status & RRD_VLAN_TAG) {
1985 1.1 jmcneill u_int32_t vtag = RRD_VLAN(le32toh(rrd->vtag));
1986 1.1 jmcneill VLAN_INPUT_TAG(ifp, m, ntohs(vtag), );
1987 1.1 jmcneill }
1988 1.1 jmcneill #endif
1989 1.1 jmcneill
1990 1.1 jmcneill bpf_mtap(ifp, m);
1991 1.1 jmcneill
1992 1.1 jmcneill /* Pass it on. */
1993 1.10 christos (*ifp->if_input)(ifp, m);
1994 1.1 jmcneill }
1995 1.1 jmcneill }
1996 1.1 jmcneill /* Reset mbuf chains. */
1997 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
1998 1.1 jmcneill }
1999 1.1 jmcneill
2000 1.1 jmcneill static void
2001 1.1 jmcneill alc_tick(void *xsc)
2002 1.1 jmcneill {
2003 1.1 jmcneill struct alc_softc *sc = xsc;
2004 1.1 jmcneill struct mii_data *mii = &sc->sc_miibus;
2005 1.1 jmcneill int s;
2006 1.1 jmcneill
2007 1.1 jmcneill s = splnet();
2008 1.1 jmcneill mii_tick(mii);
2009 1.1 jmcneill alc_stats_update(sc);
2010 1.1 jmcneill splx(s);
2011 1.1 jmcneill
2012 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2013 1.1 jmcneill }
2014 1.1 jmcneill
2015 1.1 jmcneill static void
2016 1.1 jmcneill alc_reset(struct alc_softc *sc)
2017 1.1 jmcneill {
2018 1.1 jmcneill uint32_t reg;
2019 1.1 jmcneill int i;
2020 1.1 jmcneill
2021 1.2 jmcneill reg = CSR_READ_4(sc, ALC_MASTER_CFG) & 0xFFFF;
2022 1.2 jmcneill reg |= MASTER_OOB_DIS_OFF | MASTER_RESET;
2023 1.2 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2024 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2025 1.1 jmcneill DELAY(10);
2026 1.1 jmcneill if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0)
2027 1.1 jmcneill break;
2028 1.1 jmcneill }
2029 1.1 jmcneill if (i == 0)
2030 1.1 jmcneill printf("%s: master reset timeout!\n", device_xname(sc->sc_dev));
2031 1.1 jmcneill
2032 1.1 jmcneill for (i = ALC_RESET_TIMEOUT; i > 0; i--) {
2033 1.1 jmcneill if ((reg = CSR_READ_4(sc, ALC_IDLE_STATUS)) == 0)
2034 1.1 jmcneill break;
2035 1.1 jmcneill DELAY(10);
2036 1.1 jmcneill }
2037 1.1 jmcneill
2038 1.1 jmcneill if (i == 0)
2039 1.8 christos printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
2040 1.1 jmcneill reg);
2041 1.1 jmcneill }
2042 1.1 jmcneill
2043 1.1 jmcneill static int
2044 1.1 jmcneill alc_init(struct ifnet *ifp)
2045 1.1 jmcneill {
2046 1.8 christos
2047 1.7 mrg return alc_init_backend(ifp, true);
2048 1.7 mrg }
2049 1.7 mrg
2050 1.7 mrg static int
2051 1.7 mrg alc_init_backend(struct ifnet *ifp, bool init)
2052 1.7 mrg {
2053 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2054 1.1 jmcneill struct mii_data *mii;
2055 1.1 jmcneill uint8_t eaddr[ETHER_ADDR_LEN];
2056 1.1 jmcneill bus_addr_t paddr;
2057 1.1 jmcneill uint32_t reg, rxf_hi, rxf_lo;
2058 1.1 jmcneill int error;
2059 1.1 jmcneill
2060 1.1 jmcneill /*
2061 1.1 jmcneill * Cancel any pending I/O.
2062 1.1 jmcneill */
2063 1.1 jmcneill alc_stop(ifp, 0);
2064 1.1 jmcneill /*
2065 1.1 jmcneill * Reset the chip to a known state.
2066 1.1 jmcneill */
2067 1.1 jmcneill alc_reset(sc);
2068 1.1 jmcneill
2069 1.1 jmcneill /* Initialize Rx descriptors. */
2070 1.7 mrg error = alc_init_rx_ring(sc, init);
2071 1.1 jmcneill if (error != 0) {
2072 1.1 jmcneill printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
2073 1.1 jmcneill alc_stop(ifp, 0);
2074 1.1 jmcneill return (error);
2075 1.1 jmcneill }
2076 1.1 jmcneill alc_init_rr_ring(sc);
2077 1.1 jmcneill alc_init_tx_ring(sc);
2078 1.1 jmcneill alc_init_cmb(sc);
2079 1.1 jmcneill alc_init_smb(sc);
2080 1.1 jmcneill
2081 1.2 jmcneill /* Enable all clocks. */
2082 1.2 jmcneill CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0);
2083 1.2 jmcneill
2084 1.1 jmcneill /* Reprogram the station address. */
2085 1.1 jmcneill memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
2086 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR0,
2087 1.1 jmcneill eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2088 1.1 jmcneill CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]);
2089 1.1 jmcneill /*
2090 1.1 jmcneill * Clear WOL status and disable all WOL feature as WOL
2091 1.1 jmcneill * would interfere Rx operation under normal environments.
2092 1.1 jmcneill */
2093 1.1 jmcneill CSR_READ_4(sc, ALC_WOL_CFG);
2094 1.1 jmcneill CSR_WRITE_4(sc, ALC_WOL_CFG, 0);
2095 1.1 jmcneill /* Set Tx descriptor base addresses. */
2096 1.1 jmcneill paddr = sc->alc_rdata.alc_tx_ring_paddr;
2097 1.1 jmcneill CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2098 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2099 1.1 jmcneill /* We don't use high priority ring. */
2100 1.1 jmcneill CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0);
2101 1.1 jmcneill /* Set Tx descriptor counter. */
2102 1.1 jmcneill CSR_WRITE_4(sc, ALC_TD_RING_CNT,
2103 1.1 jmcneill (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK);
2104 1.1 jmcneill /* Set Rx descriptor base addresses. */
2105 1.1 jmcneill paddr = sc->alc_rdata.alc_rx_ring_paddr;
2106 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2107 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2108 1.1 jmcneill /* We use one Rx ring. */
2109 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0);
2110 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0);
2111 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0);
2112 1.1 jmcneill /* Set Rx descriptor counter. */
2113 1.1 jmcneill CSR_WRITE_4(sc, ALC_RD_RING_CNT,
2114 1.1 jmcneill (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK);
2115 1.1 jmcneill
2116 1.1 jmcneill /*
2117 1.1 jmcneill * Let hardware split jumbo frames into alc_max_buf_sized chunks.
2118 1.1 jmcneill * if it do not fit the buffer size. Rx return descriptor holds
2119 1.1 jmcneill * a counter that indicates how many fragments were made by the
2120 1.1 jmcneill * hardware. The buffer size should be multiple of 8 bytes.
2121 1.1 jmcneill * Since hardware has limit on the size of buffer size, always
2122 1.1 jmcneill * use the maximum value.
2123 1.1 jmcneill * For strict-alignment architectures make sure to reduce buffer
2124 1.1 jmcneill * size by 8 bytes to make room for alignment fixup.
2125 1.1 jmcneill */
2126 1.1 jmcneill sc->alc_buf_size = RX_BUF_SIZE_MAX;
2127 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size);
2128 1.1 jmcneill
2129 1.1 jmcneill paddr = sc->alc_rdata.alc_rr_ring_paddr;
2130 1.1 jmcneill /* Set Rx return descriptor base addresses. */
2131 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr));
2132 1.1 jmcneill /* We use one Rx return ring. */
2133 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0);
2134 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0);
2135 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0);
2136 1.1 jmcneill /* Set Rx return descriptor counter. */
2137 1.1 jmcneill CSR_WRITE_4(sc, ALC_RRD_RING_CNT,
2138 1.1 jmcneill (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK);
2139 1.1 jmcneill paddr = sc->alc_rdata.alc_cmb_paddr;
2140 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2141 1.1 jmcneill paddr = sc->alc_rdata.alc_smb_paddr;
2142 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr));
2143 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr));
2144 1.1 jmcneill
2145 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B) {
2146 1.2 jmcneill /* Reconfigure SRAM - Vendor magic. */
2147 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0);
2148 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100);
2149 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000);
2150 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0);
2151 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0);
2152 1.2 jmcneill CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0);
2153 1.2 jmcneill CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000);
2154 1.2 jmcneill CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000);
2155 1.2 jmcneill }
2156 1.2 jmcneill
2157 1.1 jmcneill /* Tell hardware that we're ready to load DMA blocks. */
2158 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD);
2159 1.1 jmcneill
2160 1.1 jmcneill /* Configure interrupt moderation timer. */
2161 1.1 jmcneill sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT;
2162 1.1 jmcneill sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT;
2163 1.1 jmcneill reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT;
2164 1.1 jmcneill reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT;
2165 1.1 jmcneill CSR_WRITE_4(sc, ALC_IM_TIMER, reg);
2166 1.1 jmcneill /*
2167 1.1 jmcneill * We don't want to automatic interrupt clear as task queue
2168 1.1 jmcneill * for the interrupt should know interrupt status.
2169 1.1 jmcneill */
2170 1.2 jmcneill reg = MASTER_SA_TIMER_ENB;
2171 1.1 jmcneill if (ALC_USECS(sc->alc_int_rx_mod) != 0)
2172 1.1 jmcneill reg |= MASTER_IM_RX_TIMER_ENB;
2173 1.1 jmcneill if (ALC_USECS(sc->alc_int_tx_mod) != 0)
2174 1.1 jmcneill reg |= MASTER_IM_TX_TIMER_ENB;
2175 1.1 jmcneill CSR_WRITE_4(sc, ALC_MASTER_CFG, reg);
2176 1.1 jmcneill /*
2177 1.1 jmcneill * Disable interrupt re-trigger timer. We don't want automatic
2178 1.1 jmcneill * re-triggering of un-ACKed interrupts.
2179 1.1 jmcneill */
2180 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0));
2181 1.1 jmcneill /* Configure CMB. */
2182 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4);
2183 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2184 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000));
2185 1.1 jmcneill else
2186 1.1 jmcneill CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0));
2187 1.1 jmcneill /*
2188 1.1 jmcneill * Hardware can be configured to issue SMB interrupt based
2189 1.1 jmcneill * on programmed interval. Since there is a callout that is
2190 1.1 jmcneill * invoked for every hz in driver we use that instead of
2191 1.1 jmcneill * relying on periodic SMB interrupt.
2192 1.1 jmcneill */
2193 1.1 jmcneill CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0));
2194 1.1 jmcneill /* Clear MAC statistics. */
2195 1.1 jmcneill alc_stats_clear(sc);
2196 1.1 jmcneill
2197 1.1 jmcneill /*
2198 1.1 jmcneill * Always use maximum frame size that controller can support.
2199 1.1 jmcneill * Otherwise received frames that has larger frame length
2200 1.1 jmcneill * than alc(4) MTU would be silently dropped in hardware. This
2201 1.1 jmcneill * would make path-MTU discovery hard as sender wouldn't get
2202 1.1 jmcneill * any responses from receiver. alc(4) supports
2203 1.1 jmcneill * multi-fragmented frames on Rx path so it has no issue on
2204 1.1 jmcneill * assembling fragmented frames. Using maximum frame size also
2205 1.1 jmcneill * removes the need to reinitialize hardware when interface
2206 1.1 jmcneill * MTU configuration was changed.
2207 1.1 jmcneill *
2208 1.1 jmcneill * Be conservative in what you do, be liberal in what you
2209 1.1 jmcneill * accept from others - RFC 793.
2210 1.1 jmcneill */
2211 1.2 jmcneill CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen);
2212 1.1 jmcneill
2213 1.1 jmcneill /* Disable header split(?) */
2214 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDS_CFG, 0);
2215 1.1 jmcneill
2216 1.1 jmcneill /* Configure IPG/IFG parameters. */
2217 1.1 jmcneill CSR_WRITE_4(sc, ALC_IPG_IFG_CFG,
2218 1.1 jmcneill ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) |
2219 1.1 jmcneill ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2220 1.1 jmcneill ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2221 1.1 jmcneill ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK));
2222 1.1 jmcneill /* Set parameters for half-duplex media. */
2223 1.1 jmcneill CSR_WRITE_4(sc, ALC_HDPX_CFG,
2224 1.1 jmcneill ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2225 1.1 jmcneill HDPX_CFG_LCOL_MASK) |
2226 1.1 jmcneill ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2227 1.1 jmcneill HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2228 1.1 jmcneill ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2229 1.1 jmcneill HDPX_CFG_ABEBT_MASK) |
2230 1.1 jmcneill ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2231 1.1 jmcneill HDPX_CFG_JAMIPG_MASK));
2232 1.1 jmcneill /*
2233 1.1 jmcneill * Set TSO/checksum offload threshold. For frames that is
2234 1.1 jmcneill * larger than this threshold, hardware wouldn't do
2235 1.1 jmcneill * TSO/checksum offloading.
2236 1.1 jmcneill */
2237 1.1 jmcneill CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH,
2238 1.2 jmcneill (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) &
2239 1.1 jmcneill TSO_OFFLOAD_THRESH_MASK);
2240 1.1 jmcneill /* Configure TxQ. */
2241 1.1 jmcneill reg = (alc_dma_burst[sc->alc_dma_rd_burst] <<
2242 1.1 jmcneill TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK;
2243 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2244 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2245 1.2 jmcneill reg >>= 1;
2246 1.1 jmcneill reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) &
2247 1.1 jmcneill TXQ_CFG_TD_BURST_MASK;
2248 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE);
2249 1.1 jmcneill
2250 1.1 jmcneill /* Configure Rx free descriptor pre-fetching. */
2251 1.1 jmcneill CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH,
2252 1.1 jmcneill ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) &
2253 1.1 jmcneill RX_RD_FREE_THRESH_HI_MASK) |
2254 1.1 jmcneill ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) &
2255 1.1 jmcneill RX_RD_FREE_THRESH_LO_MASK));
2256 1.1 jmcneill
2257 1.1 jmcneill /*
2258 1.1 jmcneill * Configure flow control parameters.
2259 1.1 jmcneill * XON : 80% of Rx FIFO
2260 1.1 jmcneill * XOFF : 30% of Rx FIFO
2261 1.1 jmcneill */
2262 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8131 ||
2263 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8132) {
2264 1.2 jmcneill reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN);
2265 1.2 jmcneill rxf_hi = (reg * 8) / 10;
2266 1.2 jmcneill rxf_lo = (reg * 3) / 10;
2267 1.2 jmcneill CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH,
2268 1.2 jmcneill ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) &
2269 1.2 jmcneill RX_FIFO_PAUSE_THRESH_LO_MASK) |
2270 1.2 jmcneill ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) &
2271 1.2 jmcneill RX_FIFO_PAUSE_THRESH_HI_MASK));
2272 1.2 jmcneill }
2273 1.2 jmcneill
2274 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B ||
2275 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2)
2276 1.2 jmcneill CSR_WRITE_4(sc, ALC_SERDES_LOCK,
2277 1.2 jmcneill CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN |
2278 1.2 jmcneill SERDES_PHY_CLK_SLOWDOWN);
2279 1.1 jmcneill
2280 1.1 jmcneill /* Disable RSS until I understand L1C/L2C's RSS logic. */
2281 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0);
2282 1.1 jmcneill CSR_WRITE_4(sc, ALC_RSS_CPU, 0);
2283 1.1 jmcneill
2284 1.1 jmcneill /* Configure RxQ. */
2285 1.1 jmcneill reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2286 1.1 jmcneill RXQ_CFG_RD_BURST_MASK;
2287 1.1 jmcneill reg |= RXQ_CFG_RSS_MODE_DIS;
2288 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_ASPM_MON) != 0)
2289 1.2 jmcneill reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M;
2290 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2291 1.1 jmcneill
2292 1.1 jmcneill /* Configure DMA parameters. */
2293 1.1 jmcneill reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI;
2294 1.1 jmcneill reg |= sc->alc_rcb;
2295 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0)
2296 1.1 jmcneill reg |= DMA_CFG_CMB_ENB;
2297 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0)
2298 1.1 jmcneill reg |= DMA_CFG_SMB_ENB;
2299 1.1 jmcneill else
2300 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2301 1.1 jmcneill reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) <<
2302 1.1 jmcneill DMA_CFG_RD_BURST_SHIFT;
2303 1.1 jmcneill reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) <<
2304 1.1 jmcneill DMA_CFG_WR_BURST_SHIFT;
2305 1.1 jmcneill reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) &
2306 1.1 jmcneill DMA_CFG_RD_DELAY_CNT_MASK;
2307 1.1 jmcneill reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) &
2308 1.1 jmcneill DMA_CFG_WR_DELAY_CNT_MASK;
2309 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2310 1.1 jmcneill
2311 1.1 jmcneill /*
2312 1.1 jmcneill * Configure Tx/Rx MACs.
2313 1.1 jmcneill * - Auto-padding for short frames.
2314 1.1 jmcneill * - Enable CRC generation.
2315 1.1 jmcneill * Actual reconfiguration of MAC for resolved speed/duplex
2316 1.1 jmcneill * is followed after detection of link establishment.
2317 1.2 jmcneill * AR813x/AR815x always does checksum computation regardless
2318 1.1 jmcneill * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to
2319 1.1 jmcneill * have bug in protocol field in Rx return structure so
2320 1.1 jmcneill * these controllers can't handle fragmented frames. Disable
2321 1.1 jmcneill * Rx checksum offloading until there is a newer controller
2322 1.1 jmcneill * that has sane implementation.
2323 1.1 jmcneill */
2324 1.1 jmcneill reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX |
2325 1.1 jmcneill ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2326 1.1 jmcneill MAC_CFG_PREAMBLE_MASK);
2327 1.2 jmcneill if (sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151 ||
2328 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8151_V2 ||
2329 1.2 jmcneill sc->alc_ident->deviceid == PCI_PRODUCT_ATTANSIC_AR8152_B2)
2330 1.2 jmcneill reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW;
2331 1.1 jmcneill if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0)
2332 1.1 jmcneill reg |= MAC_CFG_SPEED_10_100;
2333 1.1 jmcneill else
2334 1.1 jmcneill reg |= MAC_CFG_SPEED_1000;
2335 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2336 1.1 jmcneill
2337 1.1 jmcneill /* Set up the receive filter. */
2338 1.1 jmcneill alc_iff(sc);
2339 1.1 jmcneill alc_rxvlan(sc);
2340 1.1 jmcneill
2341 1.1 jmcneill /* Acknowledge all pending interrupts and clear it. */
2342 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS);
2343 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2344 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0);
2345 1.1 jmcneill
2346 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2347 1.1 jmcneill /* Switch to the current media. */
2348 1.1 jmcneill mii = &sc->sc_miibus;
2349 1.1 jmcneill mii_mediachg(mii);
2350 1.1 jmcneill
2351 1.1 jmcneill callout_schedule(&sc->sc_tick_ch, hz);
2352 1.1 jmcneill
2353 1.1 jmcneill ifp->if_flags |= IFF_RUNNING;
2354 1.1 jmcneill ifp->if_flags &= ~IFF_OACTIVE;
2355 1.1 jmcneill
2356 1.1 jmcneill return (0);
2357 1.1 jmcneill }
2358 1.1 jmcneill
2359 1.1 jmcneill static void
2360 1.1 jmcneill alc_stop(struct ifnet *ifp, int disable)
2361 1.1 jmcneill {
2362 1.1 jmcneill struct alc_softc *sc = ifp->if_softc;
2363 1.1 jmcneill struct alc_txdesc *txd;
2364 1.1 jmcneill struct alc_rxdesc *rxd;
2365 1.1 jmcneill uint32_t reg;
2366 1.1 jmcneill int i;
2367 1.1 jmcneill
2368 1.1 jmcneill callout_stop(&sc->sc_tick_ch);
2369 1.1 jmcneill
2370 1.1 jmcneill /*
2371 1.1 jmcneill * Mark the interface down and cancel the watchdog timer.
2372 1.1 jmcneill */
2373 1.1 jmcneill ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2374 1.1 jmcneill ifp->if_timer = 0;
2375 1.1 jmcneill
2376 1.1 jmcneill sc->alc_flags &= ~ALC_FLAG_LINK;
2377 1.1 jmcneill
2378 1.1 jmcneill alc_stats_update(sc);
2379 1.1 jmcneill
2380 1.1 jmcneill mii_down(&sc->sc_miibus);
2381 1.1 jmcneill
2382 1.1 jmcneill /* Disable interrupts. */
2383 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_MASK, 0);
2384 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2385 1.1 jmcneill alc_stop_queue(sc);
2386 1.1 jmcneill
2387 1.1 jmcneill /* Disable DMA. */
2388 1.1 jmcneill reg = CSR_READ_4(sc, ALC_DMA_CFG);
2389 1.1 jmcneill reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB);
2390 1.1 jmcneill reg |= DMA_CFG_SMB_DIS;
2391 1.1 jmcneill CSR_WRITE_4(sc, ALC_DMA_CFG, reg);
2392 1.1 jmcneill DELAY(1000);
2393 1.1 jmcneill
2394 1.1 jmcneill /* Stop Rx/Tx MACs. */
2395 1.1 jmcneill alc_stop_mac(sc);
2396 1.1 jmcneill
2397 1.1 jmcneill /* Disable interrupts which might be touched in taskq handler. */
2398 1.1 jmcneill CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF);
2399 1.1 jmcneill
2400 1.1 jmcneill /* Reclaim Rx buffers that have been processed. */
2401 1.1 jmcneill if (sc->alc_cdata.alc_rxhead != NULL)
2402 1.1 jmcneill m_freem(sc->alc_cdata.alc_rxhead);
2403 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2404 1.1 jmcneill /*
2405 1.1 jmcneill * Free Tx/Rx mbufs still in the queues.
2406 1.1 jmcneill */
2407 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2408 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2409 1.1 jmcneill if (rxd->rx_m != NULL) {
2410 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2411 1.1 jmcneill m_freem(rxd->rx_m);
2412 1.1 jmcneill rxd->rx_m = NULL;
2413 1.1 jmcneill }
2414 1.1 jmcneill }
2415 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2416 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2417 1.1 jmcneill if (txd->tx_m != NULL) {
2418 1.1 jmcneill bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
2419 1.1 jmcneill m_freem(txd->tx_m);
2420 1.1 jmcneill txd->tx_m = NULL;
2421 1.1 jmcneill }
2422 1.1 jmcneill }
2423 1.1 jmcneill }
2424 1.1 jmcneill
2425 1.1 jmcneill static void
2426 1.1 jmcneill alc_stop_mac(struct alc_softc *sc)
2427 1.1 jmcneill {
2428 1.1 jmcneill uint32_t reg;
2429 1.1 jmcneill int i;
2430 1.1 jmcneill
2431 1.1 jmcneill /* Disable Rx/Tx MAC. */
2432 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2433 1.1 jmcneill if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) {
2434 1.2 jmcneill reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2435 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2436 1.1 jmcneill }
2437 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2438 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2439 1.1 jmcneill if (reg == 0)
2440 1.1 jmcneill break;
2441 1.1 jmcneill DELAY(10);
2442 1.1 jmcneill }
2443 1.1 jmcneill if (i == 0)
2444 1.1 jmcneill printf("%s: could not disable Rx/Tx MAC(0x%08x)!\n",
2445 1.1 jmcneill device_xname(sc->sc_dev), reg);
2446 1.1 jmcneill }
2447 1.1 jmcneill
2448 1.1 jmcneill static void
2449 1.1 jmcneill alc_start_queue(struct alc_softc *sc)
2450 1.1 jmcneill {
2451 1.1 jmcneill uint32_t qcfg[] = {
2452 1.1 jmcneill 0,
2453 1.1 jmcneill RXQ_CFG_QUEUE0_ENB,
2454 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB,
2455 1.1 jmcneill RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB,
2456 1.1 jmcneill RXQ_CFG_ENB
2457 1.1 jmcneill };
2458 1.1 jmcneill uint32_t cfg;
2459 1.1 jmcneill
2460 1.1 jmcneill /* Enable RxQ. */
2461 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_RXQ_CFG);
2462 1.1 jmcneill cfg &= ~RXQ_CFG_ENB;
2463 1.1 jmcneill cfg |= qcfg[1];
2464 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg);
2465 1.1 jmcneill /* Enable TxQ. */
2466 1.1 jmcneill cfg = CSR_READ_4(sc, ALC_TXQ_CFG);
2467 1.1 jmcneill cfg |= TXQ_CFG_ENB;
2468 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg);
2469 1.1 jmcneill }
2470 1.1 jmcneill
2471 1.1 jmcneill static void
2472 1.1 jmcneill alc_stop_queue(struct alc_softc *sc)
2473 1.1 jmcneill {
2474 1.1 jmcneill uint32_t reg;
2475 1.1 jmcneill int i;
2476 1.1 jmcneill
2477 1.1 jmcneill /* Disable RxQ. */
2478 1.1 jmcneill reg = CSR_READ_4(sc, ALC_RXQ_CFG);
2479 1.1 jmcneill if ((reg & RXQ_CFG_ENB) != 0) {
2480 1.1 jmcneill reg &= ~RXQ_CFG_ENB;
2481 1.1 jmcneill CSR_WRITE_4(sc, ALC_RXQ_CFG, reg);
2482 1.1 jmcneill }
2483 1.1 jmcneill /* Disable TxQ. */
2484 1.1 jmcneill reg = CSR_READ_4(sc, ALC_TXQ_CFG);
2485 1.2 jmcneill if ((reg & TXQ_CFG_ENB) != 0) {
2486 1.1 jmcneill reg &= ~TXQ_CFG_ENB;
2487 1.1 jmcneill CSR_WRITE_4(sc, ALC_TXQ_CFG, reg);
2488 1.1 jmcneill }
2489 1.1 jmcneill for (i = ALC_TIMEOUT; i > 0; i--) {
2490 1.1 jmcneill reg = CSR_READ_4(sc, ALC_IDLE_STATUS);
2491 1.1 jmcneill if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0)
2492 1.1 jmcneill break;
2493 1.1 jmcneill DELAY(10);
2494 1.1 jmcneill }
2495 1.1 jmcneill if (i == 0)
2496 1.1 jmcneill printf("%s: could not disable RxQ/TxQ (0x%08x)!\n",
2497 1.1 jmcneill device_xname(sc->sc_dev), reg);
2498 1.1 jmcneill }
2499 1.1 jmcneill
2500 1.1 jmcneill static void
2501 1.1 jmcneill alc_init_tx_ring(struct alc_softc *sc)
2502 1.1 jmcneill {
2503 1.1 jmcneill struct alc_ring_data *rd;
2504 1.1 jmcneill struct alc_txdesc *txd;
2505 1.1 jmcneill int i;
2506 1.1 jmcneill
2507 1.1 jmcneill sc->alc_cdata.alc_tx_prod = 0;
2508 1.1 jmcneill sc->alc_cdata.alc_tx_cons = 0;
2509 1.1 jmcneill sc->alc_cdata.alc_tx_cnt = 0;
2510 1.1 jmcneill
2511 1.1 jmcneill rd = &sc->alc_rdata;
2512 1.1 jmcneill memset(rd->alc_tx_ring, 0, ALC_TX_RING_SZ);
2513 1.1 jmcneill for (i = 0; i < ALC_TX_RING_CNT; i++) {
2514 1.1 jmcneill txd = &sc->alc_cdata.alc_txdesc[i];
2515 1.1 jmcneill txd->tx_m = NULL;
2516 1.1 jmcneill }
2517 1.1 jmcneill
2518 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_tx_ring_map, 0,
2519 1.1 jmcneill sc->alc_cdata.alc_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2520 1.1 jmcneill }
2521 1.1 jmcneill
2522 1.1 jmcneill static int
2523 1.7 mrg alc_init_rx_ring(struct alc_softc *sc, bool init)
2524 1.1 jmcneill {
2525 1.1 jmcneill struct alc_ring_data *rd;
2526 1.1 jmcneill struct alc_rxdesc *rxd;
2527 1.1 jmcneill int i;
2528 1.1 jmcneill
2529 1.1 jmcneill sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1;
2530 1.1 jmcneill rd = &sc->alc_rdata;
2531 1.1 jmcneill memset(rd->alc_rx_ring, 0, ALC_RX_RING_SZ);
2532 1.1 jmcneill for (i = 0; i < ALC_RX_RING_CNT; i++) {
2533 1.1 jmcneill rxd = &sc->alc_cdata.alc_rxdesc[i];
2534 1.1 jmcneill rxd->rx_m = NULL;
2535 1.1 jmcneill rxd->rx_desc = &rd->alc_rx_ring[i];
2536 1.7 mrg if (alc_newbuf(sc, rxd, init) != 0)
2537 1.1 jmcneill return (ENOBUFS);
2538 1.1 jmcneill }
2539 1.1 jmcneill
2540 1.1 jmcneill /*
2541 1.1 jmcneill * Since controller does not update Rx descriptors, driver
2542 1.1 jmcneill * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE
2543 1.1 jmcneill * is enough to ensure coherence.
2544 1.1 jmcneill */
2545 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rx_ring_map, 0,
2546 1.1 jmcneill sc->alc_cdata.alc_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2547 1.1 jmcneill /* Let controller know availability of new Rx buffers. */
2548 1.1 jmcneill CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons);
2549 1.1 jmcneill
2550 1.1 jmcneill return (0);
2551 1.1 jmcneill }
2552 1.1 jmcneill
2553 1.1 jmcneill static void
2554 1.1 jmcneill alc_init_rr_ring(struct alc_softc *sc)
2555 1.1 jmcneill {
2556 1.1 jmcneill struct alc_ring_data *rd;
2557 1.1 jmcneill
2558 1.1 jmcneill sc->alc_cdata.alc_rr_cons = 0;
2559 1.1 jmcneill ALC_RXCHAIN_RESET(sc);
2560 1.1 jmcneill
2561 1.1 jmcneill rd = &sc->alc_rdata;
2562 1.1 jmcneill memset(rd->alc_rr_ring, 0, ALC_RR_RING_SZ);
2563 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_rr_ring_map, 0,
2564 1.1 jmcneill sc->alc_cdata.alc_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2565 1.1 jmcneill }
2566 1.1 jmcneill
2567 1.1 jmcneill static void
2568 1.1 jmcneill alc_init_cmb(struct alc_softc *sc)
2569 1.1 jmcneill {
2570 1.1 jmcneill struct alc_ring_data *rd;
2571 1.1 jmcneill
2572 1.1 jmcneill rd = &sc->alc_rdata;
2573 1.1 jmcneill memset(rd->alc_cmb, 0, ALC_CMB_SZ);
2574 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_cmb_map, 0,
2575 1.1 jmcneill sc->alc_cdata.alc_cmb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2576 1.1 jmcneill }
2577 1.1 jmcneill
2578 1.1 jmcneill static void
2579 1.1 jmcneill alc_init_smb(struct alc_softc *sc)
2580 1.1 jmcneill {
2581 1.1 jmcneill struct alc_ring_data *rd;
2582 1.1 jmcneill
2583 1.1 jmcneill rd = &sc->alc_rdata;
2584 1.1 jmcneill memset(rd->alc_smb, 0, ALC_SMB_SZ);
2585 1.1 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->alc_cdata.alc_smb_map, 0,
2586 1.1 jmcneill sc->alc_cdata.alc_smb_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2587 1.1 jmcneill }
2588 1.1 jmcneill
2589 1.1 jmcneill static void
2590 1.1 jmcneill alc_rxvlan(struct alc_softc *sc)
2591 1.1 jmcneill {
2592 1.1 jmcneill uint32_t reg;
2593 1.1 jmcneill
2594 1.1 jmcneill reg = CSR_READ_4(sc, ALC_MAC_CFG);
2595 1.3 sborrill if (sc->sc_ec.ec_capenable & ETHERCAP_VLAN_HWTAGGING)
2596 1.1 jmcneill reg |= MAC_CFG_VLAN_TAG_STRIP;
2597 1.1 jmcneill else
2598 1.1 jmcneill reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2599 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, reg);
2600 1.1 jmcneill }
2601 1.1 jmcneill
2602 1.1 jmcneill static void
2603 1.1 jmcneill alc_iff(struct alc_softc *sc)
2604 1.1 jmcneill {
2605 1.1 jmcneill struct ethercom *ec = &sc->sc_ec;
2606 1.1 jmcneill struct ifnet *ifp = &ec->ec_if;
2607 1.1 jmcneill struct ether_multi *enm;
2608 1.1 jmcneill struct ether_multistep step;
2609 1.1 jmcneill uint32_t crc;
2610 1.1 jmcneill uint32_t mchash[2];
2611 1.1 jmcneill uint32_t rxcfg;
2612 1.1 jmcneill
2613 1.1 jmcneill rxcfg = CSR_READ_4(sc, ALC_MAC_CFG);
2614 1.1 jmcneill rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2615 1.1 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
2616 1.1 jmcneill
2617 1.1 jmcneill /*
2618 1.1 jmcneill * Always accept broadcast frames.
2619 1.1 jmcneill */
2620 1.1 jmcneill rxcfg |= MAC_CFG_BCAST;
2621 1.1 jmcneill
2622 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2623 1.1 jmcneill ifp->if_flags |= IFF_ALLMULTI;
2624 1.1 jmcneill if (ifp->if_flags & IFF_PROMISC)
2625 1.1 jmcneill rxcfg |= MAC_CFG_PROMISC;
2626 1.1 jmcneill else
2627 1.1 jmcneill rxcfg |= MAC_CFG_ALLMULTI;
2628 1.1 jmcneill mchash[0] = mchash[1] = 0xFFFFFFFF;
2629 1.1 jmcneill } else {
2630 1.1 jmcneill /* Program new filter. */
2631 1.1 jmcneill memset(mchash, 0, sizeof(mchash));
2632 1.1 jmcneill
2633 1.1 jmcneill ETHER_FIRST_MULTI(step, ec, enm);
2634 1.1 jmcneill while (enm != NULL) {
2635 1.1 jmcneill crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
2636 1.1 jmcneill mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2637 1.1 jmcneill ETHER_NEXT_MULTI(step, enm);
2638 1.1 jmcneill }
2639 1.1 jmcneill }
2640 1.1 jmcneill
2641 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR0, mchash[0]);
2642 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAR1, mchash[1]);
2643 1.1 jmcneill CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg);
2644 1.1 jmcneill }
2645 1.1 jmcneill
2646 1.5 jmcneill MODULE(MODULE_CLASS_DRIVER, if_alc, "pci");
2647 1.1 jmcneill
2648 1.1 jmcneill #ifdef _MODULE
2649 1.1 jmcneill #include "ioconf.c"
2650 1.1 jmcneill #endif
2651 1.1 jmcneill
2652 1.1 jmcneill static int
2653 1.1 jmcneill if_alc_modcmd(modcmd_t cmd, void *opaque)
2654 1.1 jmcneill {
2655 1.1 jmcneill int error = 0;
2656 1.1 jmcneill
2657 1.1 jmcneill switch (cmd) {
2658 1.1 jmcneill case MODULE_CMD_INIT:
2659 1.1 jmcneill #ifdef _MODULE
2660 1.1 jmcneill error = config_init_component(cfdriver_ioconf_if_alc,
2661 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2662 1.1 jmcneill #endif
2663 1.1 jmcneill return error;
2664 1.1 jmcneill case MODULE_CMD_FINI:
2665 1.1 jmcneill #ifdef _MODULE
2666 1.1 jmcneill error = config_fini_component(cfdriver_ioconf_if_alc,
2667 1.1 jmcneill cfattach_ioconf_if_alc, cfdata_ioconf_if_alc);
2668 1.1 jmcneill #endif
2669 1.1 jmcneill return error;
2670 1.1 jmcneill default:
2671 1.1 jmcneill return ENOTTY;
2672 1.1 jmcneill }
2673 1.1 jmcneill }
2674