if_age.c revision 1.38.2.2 1 1.38.2.2 matt /* $NetBSD: if_age.c,v 1.38.2.2 2010/04/21 00:27:40 matt Exp $ */
2 1.38.2.2 matt /* $OpenBSD: if_age.c,v 1.1 2009/01/16 05:00:34 kevlo Exp $ */
3 1.38.2.2 matt
4 1.38.2.2 matt /*-
5 1.38.2.2 matt * Copyright (c) 2008, Pyun YongHyeon <yongari (at) FreeBSD.org>
6 1.38.2.2 matt * All rights reserved.
7 1.38.2.2 matt *
8 1.38.2.2 matt * Redistribution and use in source and binary forms, with or without
9 1.38.2.2 matt * modification, are permitted provided that the following conditions
10 1.38.2.2 matt * are met:
11 1.38.2.2 matt * 1. Redistributions of source code must retain the above copyright
12 1.38.2.2 matt * notice unmodified, this list of conditions, and the following
13 1.38.2.2 matt * disclaimer.
14 1.38.2.2 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.38.2.2 matt * notice, this list of conditions and the following disclaimer in the
16 1.38.2.2 matt * documentation and/or other materials provided with the distribution.
17 1.38.2.2 matt *
18 1.38.2.2 matt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 1.38.2.2 matt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 1.38.2.2 matt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 1.38.2.2 matt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 1.38.2.2 matt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.38.2.2 matt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 1.38.2.2 matt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.38.2.2 matt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.38.2.2 matt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.38.2.2 matt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.38.2.2 matt * SUCH DAMAGE.
29 1.38.2.2 matt */
30 1.38.2.2 matt
31 1.38.2.2 matt /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
32 1.38.2.2 matt
33 1.38.2.2 matt #include <sys/cdefs.h>
34 1.38.2.2 matt __KERNEL_RCSID(0, "$NetBSD: if_age.c,v 1.38.2.2 2010/04/21 00:27:40 matt Exp $");
35 1.38.2.2 matt
36 1.38.2.2 matt #include "bpfilter.h"
37 1.38.2.2 matt #include "vlan.h"
38 1.38.2.2 matt
39 1.38.2.2 matt #include <sys/param.h>
40 1.38.2.2 matt #include <sys/proc.h>
41 1.38.2.2 matt #include <sys/endian.h>
42 1.38.2.2 matt #include <sys/systm.h>
43 1.38.2.2 matt #include <sys/types.h>
44 1.38.2.2 matt #include <sys/sockio.h>
45 1.38.2.2 matt #include <sys/mbuf.h>
46 1.38.2.2 matt #include <sys/queue.h>
47 1.38.2.2 matt #include <sys/kernel.h>
48 1.38.2.2 matt #include <sys/device.h>
49 1.38.2.2 matt #include <sys/callout.h>
50 1.38.2.2 matt #include <sys/socket.h>
51 1.38.2.2 matt
52 1.38.2.2 matt #include <net/if.h>
53 1.38.2.2 matt #include <net/if_dl.h>
54 1.38.2.2 matt #include <net/if_media.h>
55 1.38.2.2 matt #include <net/if_ether.h>
56 1.38.2.2 matt
57 1.38.2.2 matt #ifdef INET
58 1.38.2.2 matt #include <netinet/in.h>
59 1.38.2.2 matt #include <netinet/in_systm.h>
60 1.38.2.2 matt #include <netinet/in_var.h>
61 1.38.2.2 matt #include <netinet/ip.h>
62 1.38.2.2 matt #endif
63 1.38.2.2 matt
64 1.38.2.2 matt #include <net/if_types.h>
65 1.38.2.2 matt #include <net/if_vlanvar.h>
66 1.38.2.2 matt
67 1.38.2.2 matt #if NBPFILTER > 0
68 1.38.2.2 matt #include <net/bpf.h>
69 1.38.2.2 matt #endif
70 1.38.2.2 matt
71 1.38.2.2 matt #include <sys/rnd.h>
72 1.38.2.2 matt
73 1.38.2.2 matt #include <dev/mii/mii.h>
74 1.38.2.2 matt #include <dev/mii/miivar.h>
75 1.38.2.2 matt
76 1.38.2.2 matt #include <dev/pci/pcireg.h>
77 1.38.2.2 matt #include <dev/pci/pcivar.h>
78 1.38.2.2 matt #include <dev/pci/pcidevs.h>
79 1.38.2.2 matt
80 1.38.2.2 matt #include <dev/pci/if_agereg.h>
81 1.38.2.2 matt
82 1.38.2.2 matt static int age_match(device_t, cfdata_t, void *);
83 1.38.2.2 matt static void age_attach(device_t, device_t, void *);
84 1.38.2.2 matt static int age_detach(device_t, int);
85 1.38.2.2 matt
86 1.38.2.2 matt static bool age_resume(device_t PMF_FN_PROTO);
87 1.38.2.2 matt
88 1.38.2.2 matt static int age_miibus_readreg(device_t, int, int);
89 1.38.2.2 matt static void age_miibus_writereg(device_t, int, int, int);
90 1.38.2.2 matt static void age_miibus_statchg(device_t);
91 1.38.2.2 matt
92 1.38.2.2 matt static int age_init(struct ifnet *);
93 1.38.2.2 matt static int age_ioctl(struct ifnet *, u_long, void *);
94 1.38.2.2 matt static void age_start(struct ifnet *);
95 1.38.2.2 matt static void age_watchdog(struct ifnet *);
96 1.38.2.2 matt static void age_mediastatus(struct ifnet *, struct ifmediareq *);
97 1.38.2.2 matt static int age_mediachange(struct ifnet *);
98 1.38.2.2 matt
99 1.38.2.2 matt static int age_intr(void *);
100 1.38.2.2 matt static int age_dma_alloc(struct age_softc *);
101 1.38.2.2 matt static void age_dma_free(struct age_softc *);
102 1.38.2.2 matt static void age_get_macaddr(struct age_softc *, uint8_t[]);
103 1.38.2.2 matt static void age_phy_reset(struct age_softc *);
104 1.38.2.2 matt
105 1.38.2.2 matt static int age_encap(struct age_softc *, struct mbuf **);
106 1.38.2.2 matt static void age_init_tx_ring(struct age_softc *);
107 1.38.2.2 matt static int age_init_rx_ring(struct age_softc *);
108 1.38.2.2 matt static void age_init_rr_ring(struct age_softc *);
109 1.38.2.2 matt static void age_init_cmb_block(struct age_softc *);
110 1.38.2.2 matt static void age_init_smb_block(struct age_softc *);
111 1.38.2.2 matt static int age_newbuf(struct age_softc *, struct age_rxdesc *, int);
112 1.38.2.2 matt static void age_mac_config(struct age_softc *);
113 1.38.2.2 matt static void age_txintr(struct age_softc *, int);
114 1.38.2.2 matt static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
115 1.38.2.2 matt static void age_rxintr(struct age_softc *, int);
116 1.38.2.2 matt static void age_tick(void *);
117 1.38.2.2 matt static void age_reset(struct age_softc *);
118 1.38.2.2 matt static void age_stop(struct ifnet *, int);
119 1.38.2.2 matt static void age_stats_update(struct age_softc *);
120 1.38.2.2 matt static void age_stop_txmac(struct age_softc *);
121 1.38.2.2 matt static void age_stop_rxmac(struct age_softc *);
122 1.38.2.2 matt static void age_rxvlan(struct age_softc *sc);
123 1.38.2.2 matt static void age_rxfilter(struct age_softc *);
124 1.38.2.2 matt
125 1.38.2.2 matt CFATTACH_DECL_NEW(age, sizeof(struct age_softc),
126 1.38.2.2 matt age_match, age_attach, age_detach, NULL);
127 1.38.2.2 matt
128 1.38.2.2 matt int agedebug = 0;
129 1.38.2.2 matt #define DPRINTF(x) do { if (agedebug) printf x; } while (0)
130 1.38.2.2 matt
131 1.38.2.2 matt #define ETHER_ALIGN 2
132 1.38.2.2 matt #define AGE_CSUM_FEATURES (M_CSUM_TCPv4 | M_CSUM_UDPv4)
133 1.38.2.2 matt
134 1.38.2.2 matt static int
135 1.38.2.2 matt age_match(device_t dev, cfdata_t match, void *aux)
136 1.38.2.2 matt {
137 1.38.2.2 matt struct pci_attach_args *pa = aux;
138 1.38.2.2 matt
139 1.38.2.2 matt return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATTANSIC &&
140 1.38.2.2 matt PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATTANSIC_ETHERNET_GIGA);
141 1.38.2.2 matt }
142 1.38.2.2 matt
143 1.38.2.2 matt static void
144 1.38.2.2 matt age_attach(device_t parent, device_t self, void *aux)
145 1.38.2.2 matt {
146 1.38.2.2 matt struct age_softc *sc = device_private(self);
147 1.38.2.2 matt struct pci_attach_args *pa = aux;
148 1.38.2.2 matt pci_intr_handle_t ih;
149 1.38.2.2 matt const char *intrstr;
150 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
151 1.38.2.2 matt pcireg_t memtype;
152 1.38.2.2 matt int error = 0;
153 1.38.2.2 matt
154 1.38.2.2 matt aprint_naive("\n");
155 1.38.2.2 matt aprint_normal(": Attansic/Atheros L1 Gigabit Ethernet\n");
156 1.38.2.2 matt
157 1.38.2.2 matt sc->sc_dev = self;
158 1.38.2.2 matt sc->sc_dmat = pa->pa_dmat;
159 1.38.2.2 matt sc->sc_pct = pa->pa_pc;
160 1.38.2.2 matt sc->sc_pcitag = pa->pa_tag;
161 1.38.2.2 matt
162 1.38.2.2 matt /*
163 1.38.2.2 matt * Allocate IO memory
164 1.38.2.2 matt */
165 1.38.2.2 matt memtype = pci_mapreg_type(sc->sc_pct, sc->sc_pcitag, AGE_PCIR_BAR);
166 1.38.2.2 matt switch (memtype) {
167 1.38.2.2 matt case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
168 1.38.2.2 matt case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT_1M:
169 1.38.2.2 matt case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
170 1.38.2.2 matt break;
171 1.38.2.2 matt default:
172 1.38.2.2 matt aprint_error_dev(self, "invalid base address register\n");
173 1.38.2.2 matt break;
174 1.38.2.2 matt }
175 1.38.2.2 matt
176 1.38.2.2 matt if (pci_mapreg_map(pa, AGE_PCIR_BAR, memtype, 0, &sc->sc_mem_bt,
177 1.38.2.2 matt &sc->sc_mem_bh, NULL, &sc->sc_mem_size) != 0) {
178 1.38.2.2 matt aprint_error_dev(self, "could not map mem space\n");
179 1.38.2.2 matt return;
180 1.38.2.2 matt }
181 1.38.2.2 matt
182 1.38.2.2 matt if (pci_intr_map(pa, &ih) != 0) {
183 1.38.2.2 matt aprint_error_dev(self, "could not map interrupt\n");
184 1.38.2.2 matt goto fail;
185 1.38.2.2 matt }
186 1.38.2.2 matt
187 1.38.2.2 matt /*
188 1.38.2.2 matt * Allocate IRQ
189 1.38.2.2 matt */
190 1.38.2.2 matt intrstr = pci_intr_string(sc->sc_pct, ih);
191 1.38.2.2 matt sc->sc_irq_handle = pci_intr_establish(sc->sc_pct, ih, IPL_NET,
192 1.38.2.2 matt age_intr, sc);
193 1.38.2.2 matt if (sc->sc_irq_handle == NULL) {
194 1.38.2.2 matt aprint_error_dev(self, "could not establish interrupt");
195 1.38.2.2 matt if (intrstr != NULL)
196 1.38.2.2 matt aprint_error(" at %s", intrstr);
197 1.38.2.2 matt aprint_error("\n");
198 1.38.2.2 matt goto fail;
199 1.38.2.2 matt }
200 1.38.2.2 matt aprint_normal_dev(self, "%s\n", intrstr);
201 1.38.2.2 matt
202 1.38.2.2 matt /* Set PHY address. */
203 1.38.2.2 matt sc->age_phyaddr = AGE_PHY_ADDR;
204 1.38.2.2 matt
205 1.38.2.2 matt /* Reset PHY. */
206 1.38.2.2 matt age_phy_reset(sc);
207 1.38.2.2 matt
208 1.38.2.2 matt /* Reset the ethernet controller. */
209 1.38.2.2 matt age_reset(sc);
210 1.38.2.2 matt
211 1.38.2.2 matt /* Get PCI and chip id/revision. */
212 1.38.2.2 matt sc->age_rev = PCI_REVISION(pa->pa_class);
213 1.38.2.2 matt sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
214 1.38.2.2 matt MASTER_CHIP_REV_SHIFT;
215 1.38.2.2 matt
216 1.38.2.2 matt aprint_debug_dev(self, "PCI device revision : 0x%04x\n", sc->age_rev);
217 1.38.2.2 matt aprint_debug_dev(self, "Chip id/revision : 0x%04x\n", sc->age_chip_rev);
218 1.38.2.2 matt
219 1.38.2.2 matt if (agedebug) {
220 1.38.2.2 matt aprint_debug_dev(self, "%d Tx FIFO, %d Rx FIFO\n",
221 1.38.2.2 matt CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
222 1.38.2.2 matt CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
223 1.38.2.2 matt }
224 1.38.2.2 matt
225 1.38.2.2 matt /* Set max allowable DMA size. */
226 1.38.2.2 matt sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
227 1.38.2.2 matt sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
228 1.38.2.2 matt
229 1.38.2.2 matt /* Allocate DMA stuffs */
230 1.38.2.2 matt error = age_dma_alloc(sc);
231 1.38.2.2 matt if (error)
232 1.38.2.2 matt goto fail;
233 1.38.2.2 matt
234 1.38.2.2 matt callout_init(&sc->sc_tick_ch, 0);
235 1.38.2.2 matt callout_setfunc(&sc->sc_tick_ch, age_tick, sc);
236 1.38.2.2 matt
237 1.38.2.2 matt /* Load station address. */
238 1.38.2.2 matt age_get_macaddr(sc, sc->sc_enaddr);
239 1.38.2.2 matt
240 1.38.2.2 matt aprint_normal_dev(self, "Ethernet address %s\n",
241 1.38.2.2 matt ether_sprintf(sc->sc_enaddr));
242 1.38.2.2 matt
243 1.38.2.2 matt ifp->if_softc = sc;
244 1.38.2.2 matt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
245 1.38.2.2 matt ifp->if_init = age_init;
246 1.38.2.2 matt ifp->if_ioctl = age_ioctl;
247 1.38.2.2 matt ifp->if_start = age_start;
248 1.38.2.2 matt ifp->if_stop = age_stop;
249 1.38.2.2 matt ifp->if_watchdog = age_watchdog;
250 1.38.2.2 matt ifp->if_baudrate = IF_Gbps(1);
251 1.38.2.2 matt IFQ_SET_MAXLEN(&ifp->if_snd, AGE_TX_RING_CNT - 1);
252 1.38.2.2 matt IFQ_SET_READY(&ifp->if_snd);
253 1.38.2.2 matt strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
254 1.38.2.2 matt
255 1.38.2.2 matt sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU;
256 1.38.2.2 matt
257 1.38.2.2 matt #ifdef AGE_CHECKSUM
258 1.38.2.2 matt ifp->if_capabilities |= IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
259 1.38.2.2 matt IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
260 1.38.2.2 matt IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_TCPv4_Rx;
261 1.38.2.2 matt #endif
262 1.38.2.2 matt
263 1.38.2.2 matt #if NVLAN > 0
264 1.38.2.2 matt sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
265 1.38.2.2 matt #endif
266 1.38.2.2 matt
267 1.38.2.2 matt /* Set up MII bus. */
268 1.38.2.2 matt sc->sc_miibus.mii_ifp = ifp;
269 1.38.2.2 matt sc->sc_miibus.mii_readreg = age_miibus_readreg;
270 1.38.2.2 matt sc->sc_miibus.mii_writereg = age_miibus_writereg;
271 1.38.2.2 matt sc->sc_miibus.mii_statchg = age_miibus_statchg;
272 1.38.2.2 matt
273 1.38.2.2 matt sc->sc_ec.ec_mii = &sc->sc_miibus;
274 1.38.2.2 matt ifmedia_init(&sc->sc_miibus.mii_media, 0, age_mediachange,
275 1.38.2.2 matt age_mediastatus);
276 1.38.2.2 matt mii_attach(self, &sc->sc_miibus, 0xffffffff, MII_PHY_ANY,
277 1.38.2.2 matt MII_OFFSET_ANY, MIIF_DOPAUSE);
278 1.38.2.2 matt
279 1.38.2.2 matt if (LIST_FIRST(&sc->sc_miibus.mii_phys) == NULL) {
280 1.38.2.2 matt aprint_error_dev(self, "no PHY found!\n");
281 1.38.2.2 matt ifmedia_add(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL,
282 1.38.2.2 matt 0, NULL);
283 1.38.2.2 matt ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_MANUAL);
284 1.38.2.2 matt } else
285 1.38.2.2 matt ifmedia_set(&sc->sc_miibus.mii_media, IFM_ETHER | IFM_AUTO);
286 1.38.2.2 matt
287 1.38.2.2 matt if_attach(ifp);
288 1.38.2.2 matt ether_ifattach(ifp, sc->sc_enaddr);
289 1.38.2.2 matt
290 1.38.2.2 matt if (!pmf_device_register(self, NULL, age_resume))
291 1.38.2.2 matt aprint_error_dev(self, "couldn't establish power handler\n");
292 1.38.2.2 matt else
293 1.38.2.2 matt pmf_class_network_register(self, ifp);
294 1.38.2.2 matt
295 1.38.2.2 matt return;
296 1.38.2.2 matt
297 1.38.2.2 matt fail:
298 1.38.2.2 matt age_dma_free(sc);
299 1.38.2.2 matt if (sc->sc_irq_handle != NULL) {
300 1.38.2.2 matt pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
301 1.38.2.2 matt sc->sc_irq_handle = NULL;
302 1.38.2.2 matt }
303 1.38.2.2 matt if (sc->sc_mem_size) {
304 1.38.2.2 matt bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
305 1.38.2.2 matt sc->sc_mem_size = 0;
306 1.38.2.2 matt }
307 1.38.2.2 matt }
308 1.38.2.2 matt
309 1.38.2.2 matt static int
310 1.38.2.2 matt age_detach(device_t self, int flags)
311 1.38.2.2 matt {
312 1.38.2.2 matt struct age_softc *sc = device_private(self);
313 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
314 1.38.2.2 matt int s;
315 1.38.2.2 matt
316 1.38.2.2 matt pmf_device_deregister(self);
317 1.38.2.2 matt s = splnet();
318 1.38.2.2 matt age_stop(ifp, 0);
319 1.38.2.2 matt splx(s);
320 1.38.2.2 matt
321 1.38.2.2 matt mii_detach(&sc->sc_miibus, MII_PHY_ANY, MII_OFFSET_ANY);
322 1.38.2.2 matt
323 1.38.2.2 matt /* Delete all remaining media. */
324 1.38.2.2 matt ifmedia_delete_instance(&sc->sc_miibus.mii_media, IFM_INST_ANY);
325 1.38.2.2 matt
326 1.38.2.2 matt ether_ifdetach(ifp);
327 1.38.2.2 matt if_detach(ifp);
328 1.38.2.2 matt age_dma_free(sc);
329 1.38.2.2 matt
330 1.38.2.2 matt if (sc->sc_irq_handle != NULL) {
331 1.38.2.2 matt pci_intr_disestablish(sc->sc_pct, sc->sc_irq_handle);
332 1.38.2.2 matt sc->sc_irq_handle = NULL;
333 1.38.2.2 matt }
334 1.38.2.2 matt if (sc->sc_mem_size) {
335 1.38.2.2 matt bus_space_unmap(sc->sc_mem_bt, sc->sc_mem_bh, sc->sc_mem_size);
336 1.38.2.2 matt sc->sc_mem_size = 0;
337 1.38.2.2 matt }
338 1.38.2.2 matt return 0;
339 1.38.2.2 matt }
340 1.38.2.2 matt
341 1.38.2.2 matt /*
342 1.38.2.2 matt * Read a PHY register on the MII of the L1.
343 1.38.2.2 matt */
344 1.38.2.2 matt static int
345 1.38.2.2 matt age_miibus_readreg(device_t dev, int phy, int reg)
346 1.38.2.2 matt {
347 1.38.2.2 matt struct age_softc *sc = device_private(dev);
348 1.38.2.2 matt uint32_t v;
349 1.38.2.2 matt int i;
350 1.38.2.2 matt
351 1.38.2.2 matt if (phy != sc->age_phyaddr)
352 1.38.2.2 matt return 0;
353 1.38.2.2 matt
354 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
355 1.38.2.2 matt MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
356 1.38.2.2 matt for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
357 1.38.2.2 matt DELAY(1);
358 1.38.2.2 matt v = CSR_READ_4(sc, AGE_MDIO);
359 1.38.2.2 matt if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
360 1.38.2.2 matt break;
361 1.38.2.2 matt }
362 1.38.2.2 matt
363 1.38.2.2 matt if (i == 0) {
364 1.38.2.2 matt printf("%s: phy read timeout: phy %d, reg %d\n",
365 1.38.2.2 matt device_xname(sc->sc_dev), phy, reg);
366 1.38.2.2 matt return 0;
367 1.38.2.2 matt }
368 1.38.2.2 matt
369 1.38.2.2 matt return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
370 1.38.2.2 matt }
371 1.38.2.2 matt
372 1.38.2.2 matt /*
373 1.38.2.2 matt * Write a PHY register on the MII of the L1.
374 1.38.2.2 matt */
375 1.38.2.2 matt static void
376 1.38.2.2 matt age_miibus_writereg(device_t dev, int phy, int reg, int val)
377 1.38.2.2 matt {
378 1.38.2.2 matt struct age_softc *sc = device_private(dev);
379 1.38.2.2 matt uint32_t v;
380 1.38.2.2 matt int i;
381 1.38.2.2 matt
382 1.38.2.2 matt if (phy != sc->age_phyaddr)
383 1.38.2.2 matt return;
384 1.38.2.2 matt
385 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
386 1.38.2.2 matt (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
387 1.38.2.2 matt MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
388 1.38.2.2 matt
389 1.38.2.2 matt for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
390 1.38.2.2 matt DELAY(1);
391 1.38.2.2 matt v = CSR_READ_4(sc, AGE_MDIO);
392 1.38.2.2 matt if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
393 1.38.2.2 matt break;
394 1.38.2.2 matt }
395 1.38.2.2 matt
396 1.38.2.2 matt if (i == 0) {
397 1.38.2.2 matt printf("%s: phy write timeout: phy %d, reg %d\n",
398 1.38.2.2 matt device_xname(sc->sc_dev), phy, reg);
399 1.38.2.2 matt }
400 1.38.2.2 matt }
401 1.38.2.2 matt
402 1.38.2.2 matt /*
403 1.38.2.2 matt * Callback from MII layer when media changes.
404 1.38.2.2 matt */
405 1.38.2.2 matt static void
406 1.38.2.2 matt age_miibus_statchg(device_t dev)
407 1.38.2.2 matt {
408 1.38.2.2 matt struct age_softc *sc = device_private(dev);
409 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
410 1.38.2.2 matt struct mii_data *mii;
411 1.38.2.2 matt
412 1.38.2.2 matt if ((ifp->if_flags & IFF_RUNNING) == 0)
413 1.38.2.2 matt return;
414 1.38.2.2 matt
415 1.38.2.2 matt mii = &sc->sc_miibus;
416 1.38.2.2 matt
417 1.38.2.2 matt sc->age_flags &= ~AGE_FLAG_LINK;
418 1.38.2.2 matt if ((mii->mii_media_status & IFM_AVALID) != 0) {
419 1.38.2.2 matt switch (IFM_SUBTYPE(mii->mii_media_active)) {
420 1.38.2.2 matt case IFM_10_T:
421 1.38.2.2 matt case IFM_100_TX:
422 1.38.2.2 matt case IFM_1000_T:
423 1.38.2.2 matt sc->age_flags |= AGE_FLAG_LINK;
424 1.38.2.2 matt break;
425 1.38.2.2 matt default:
426 1.38.2.2 matt break;
427 1.38.2.2 matt }
428 1.38.2.2 matt }
429 1.38.2.2 matt
430 1.38.2.2 matt /* Stop Rx/Tx MACs. */
431 1.38.2.2 matt age_stop_rxmac(sc);
432 1.38.2.2 matt age_stop_txmac(sc);
433 1.38.2.2 matt
434 1.38.2.2 matt /* Program MACs with resolved speed/duplex/flow-control. */
435 1.38.2.2 matt if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
436 1.38.2.2 matt uint32_t reg;
437 1.38.2.2 matt
438 1.38.2.2 matt age_mac_config(sc);
439 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MAC_CFG);
440 1.38.2.2 matt /* Restart DMA engine and Tx/Rx MAC. */
441 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
442 1.38.2.2 matt DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
443 1.38.2.2 matt reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
444 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
445 1.38.2.2 matt }
446 1.38.2.2 matt }
447 1.38.2.2 matt
448 1.38.2.2 matt /*
449 1.38.2.2 matt * Get the current interface media status.
450 1.38.2.2 matt */
451 1.38.2.2 matt static void
452 1.38.2.2 matt age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
453 1.38.2.2 matt {
454 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
455 1.38.2.2 matt struct mii_data *mii = &sc->sc_miibus;
456 1.38.2.2 matt
457 1.38.2.2 matt mii_pollstat(mii);
458 1.38.2.2 matt ifmr->ifm_status = mii->mii_media_status;
459 1.38.2.2 matt ifmr->ifm_active = mii->mii_media_active;
460 1.38.2.2 matt }
461 1.38.2.2 matt
462 1.38.2.2 matt /*
463 1.38.2.2 matt * Set hardware to newly-selected media.
464 1.38.2.2 matt */
465 1.38.2.2 matt static int
466 1.38.2.2 matt age_mediachange(struct ifnet *ifp)
467 1.38.2.2 matt {
468 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
469 1.38.2.2 matt struct mii_data *mii = &sc->sc_miibus;
470 1.38.2.2 matt int error;
471 1.38.2.2 matt
472 1.38.2.2 matt if (mii->mii_instance != 0) {
473 1.38.2.2 matt struct mii_softc *miisc;
474 1.38.2.2 matt
475 1.38.2.2 matt LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
476 1.38.2.2 matt mii_phy_reset(miisc);
477 1.38.2.2 matt }
478 1.38.2.2 matt error = mii_mediachg(mii);
479 1.38.2.2 matt
480 1.38.2.2 matt return error;
481 1.38.2.2 matt }
482 1.38.2.2 matt
483 1.38.2.2 matt static int
484 1.38.2.2 matt age_intr(void *arg)
485 1.38.2.2 matt {
486 1.38.2.2 matt struct age_softc *sc = arg;
487 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
488 1.38.2.2 matt struct cmb *cmb;
489 1.38.2.2 matt uint32_t status;
490 1.38.2.2 matt
491 1.38.2.2 matt status = CSR_READ_4(sc, AGE_INTR_STATUS);
492 1.38.2.2 matt if (status == 0 || (status & AGE_INTRS) == 0)
493 1.38.2.2 matt return 0;
494 1.38.2.2 matt
495 1.38.2.2 matt cmb = sc->age_rdata.age_cmb_block;
496 1.38.2.2 matt if (cmb == NULL) {
497 1.38.2.2 matt /* Happens when bringing up the interface
498 1.38.2.2 matt * w/o having a carrier. Ack. the interrupt.
499 1.38.2.2 matt */
500 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_STATUS, status);
501 1.38.2.2 matt return 0;
502 1.38.2.2 matt }
503 1.38.2.2 matt
504 1.38.2.2 matt /* Disable interrupts. */
505 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
506 1.38.2.2 matt
507 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
508 1.38.2.2 matt sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
509 1.38.2.2 matt status = le32toh(cmb->intr_status);
510 1.38.2.2 matt if ((status & AGE_INTRS) == 0)
511 1.38.2.2 matt goto back;
512 1.38.2.2 matt
513 1.38.2.2 matt sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
514 1.38.2.2 matt TPD_CONS_SHIFT;
515 1.38.2.2 matt sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
516 1.38.2.2 matt RRD_PROD_SHIFT;
517 1.38.2.2 matt
518 1.38.2.2 matt /* Let hardware know CMB was served. */
519 1.38.2.2 matt cmb->intr_status = 0;
520 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
521 1.38.2.2 matt sc->age_cdata.age_cmb_block_map->dm_mapsize,
522 1.38.2.2 matt BUS_DMASYNC_PREWRITE);
523 1.38.2.2 matt
524 1.38.2.2 matt if (ifp->if_flags & IFF_RUNNING) {
525 1.38.2.2 matt if (status & INTR_CMB_RX)
526 1.38.2.2 matt age_rxintr(sc, sc->age_rr_prod);
527 1.38.2.2 matt
528 1.38.2.2 matt if (status & INTR_CMB_TX)
529 1.38.2.2 matt age_txintr(sc, sc->age_tpd_cons);
530 1.38.2.2 matt
531 1.38.2.2 matt if (status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) {
532 1.38.2.2 matt if (status & INTR_DMA_RD_TO_RST)
533 1.38.2.2 matt printf("%s: DMA read error! -- resetting\n",
534 1.38.2.2 matt device_xname(sc->sc_dev));
535 1.38.2.2 matt if (status & INTR_DMA_WR_TO_RST)
536 1.38.2.2 matt printf("%s: DMA write error! -- resetting\n",
537 1.38.2.2 matt device_xname(sc->sc_dev));
538 1.38.2.2 matt age_init(ifp);
539 1.38.2.2 matt }
540 1.38.2.2 matt
541 1.38.2.2 matt if (!IFQ_IS_EMPTY(&ifp->if_snd))
542 1.38.2.2 matt age_start(ifp);
543 1.38.2.2 matt
544 1.38.2.2 matt if (status & INTR_SMB)
545 1.38.2.2 matt age_stats_update(sc);
546 1.38.2.2 matt }
547 1.38.2.2 matt
548 1.38.2.2 matt /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
549 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
550 1.38.2.2 matt sc->age_cdata.age_cmb_block_map->dm_mapsize,
551 1.38.2.2 matt BUS_DMASYNC_POSTREAD);
552 1.38.2.2 matt
553 1.38.2.2 matt back:
554 1.38.2.2 matt /* Re-enable interrupts. */
555 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
556 1.38.2.2 matt
557 1.38.2.2 matt return 1;
558 1.38.2.2 matt }
559 1.38.2.2 matt
560 1.38.2.2 matt static void
561 1.38.2.2 matt age_get_macaddr(struct age_softc *sc, uint8_t eaddr[])
562 1.38.2.2 matt {
563 1.38.2.2 matt uint32_t ea[2], reg;
564 1.38.2.2 matt int i, vpdc;
565 1.38.2.2 matt
566 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_SPI_CTRL);
567 1.38.2.2 matt if ((reg & SPI_VPD_ENB) != 0) {
568 1.38.2.2 matt /* Get VPD stored in TWSI EEPROM. */
569 1.38.2.2 matt reg &= ~SPI_VPD_ENB;
570 1.38.2.2 matt CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
571 1.38.2.2 matt }
572 1.38.2.2 matt
573 1.38.2.2 matt if (pci_get_capability(sc->sc_pct, sc->sc_pcitag,
574 1.38.2.2 matt PCI_CAP_VPD, &vpdc, NULL)) {
575 1.38.2.2 matt /*
576 1.38.2.2 matt * PCI VPD capability found, let TWSI reload EEPROM.
577 1.38.2.2 matt * This will set Ethernet address of controller.
578 1.38.2.2 matt */
579 1.38.2.2 matt CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
580 1.38.2.2 matt TWSI_CTRL_SW_LD_START);
581 1.38.2.2 matt for (i = 100; i > 0; i++) {
582 1.38.2.2 matt DELAY(1000);
583 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
584 1.38.2.2 matt if ((reg & TWSI_CTRL_SW_LD_START) == 0)
585 1.38.2.2 matt break;
586 1.38.2.2 matt }
587 1.38.2.2 matt if (i == 0)
588 1.38.2.2 matt printf("%s: reloading EEPROM timeout!\n",
589 1.38.2.2 matt device_xname(sc->sc_dev));
590 1.38.2.2 matt } else {
591 1.38.2.2 matt if (agedebug)
592 1.38.2.2 matt printf("%s: PCI VPD capability not found!\n",
593 1.38.2.2 matt device_xname(sc->sc_dev));
594 1.38.2.2 matt }
595 1.38.2.2 matt
596 1.38.2.2 matt ea[0] = CSR_READ_4(sc, AGE_PAR0);
597 1.38.2.2 matt ea[1] = CSR_READ_4(sc, AGE_PAR1);
598 1.38.2.2 matt
599 1.38.2.2 matt eaddr[0] = (ea[1] >> 8) & 0xFF;
600 1.38.2.2 matt eaddr[1] = (ea[1] >> 0) & 0xFF;
601 1.38.2.2 matt eaddr[2] = (ea[0] >> 24) & 0xFF;
602 1.38.2.2 matt eaddr[3] = (ea[0] >> 16) & 0xFF;
603 1.38.2.2 matt eaddr[4] = (ea[0] >> 8) & 0xFF;
604 1.38.2.2 matt eaddr[5] = (ea[0] >> 0) & 0xFF;
605 1.38.2.2 matt }
606 1.38.2.2 matt
607 1.38.2.2 matt static void
608 1.38.2.2 matt age_phy_reset(struct age_softc *sc)
609 1.38.2.2 matt {
610 1.38.2.2 matt uint16_t reg, pn;
611 1.38.2.2 matt int i, linkup;
612 1.38.2.2 matt
613 1.38.2.2 matt /* Reset PHY. */
614 1.38.2.2 matt CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
615 1.38.2.2 matt DELAY(2000);
616 1.38.2.2 matt CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
617 1.38.2.2 matt DELAY(2000);
618 1.38.2.2 matt
619 1.38.2.2 matt #define ATPHY_DBG_ADDR 0x1D
620 1.38.2.2 matt #define ATPHY_DBG_DATA 0x1E
621 1.38.2.2 matt #define ATPHY_CDTC 0x16
622 1.38.2.2 matt #define PHY_CDTC_ENB 0x0001
623 1.38.2.2 matt #define PHY_CDTC_POFF 8
624 1.38.2.2 matt #define ATPHY_CDTS 0x1C
625 1.38.2.2 matt #define PHY_CDTS_STAT_OK 0x0000
626 1.38.2.2 matt #define PHY_CDTS_STAT_SHORT 0x0100
627 1.38.2.2 matt #define PHY_CDTS_STAT_OPEN 0x0200
628 1.38.2.2 matt #define PHY_CDTS_STAT_INVAL 0x0300
629 1.38.2.2 matt #define PHY_CDTS_STAT_MASK 0x0300
630 1.38.2.2 matt
631 1.38.2.2 matt /* Check power saving mode. Magic from Linux. */
632 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
633 1.38.2.2 matt for (linkup = 0, pn = 0; pn < 4; pn++) {
634 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, ATPHY_CDTC,
635 1.38.2.2 matt (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
636 1.38.2.2 matt for (i = 200; i > 0; i--) {
637 1.38.2.2 matt DELAY(1000);
638 1.38.2.2 matt reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
639 1.38.2.2 matt ATPHY_CDTC);
640 1.38.2.2 matt if ((reg & PHY_CDTC_ENB) == 0)
641 1.38.2.2 matt break;
642 1.38.2.2 matt }
643 1.38.2.2 matt DELAY(1000);
644 1.38.2.2 matt reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
645 1.38.2.2 matt ATPHY_CDTS);
646 1.38.2.2 matt if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
647 1.38.2.2 matt linkup++;
648 1.38.2.2 matt break;
649 1.38.2.2 matt }
650 1.38.2.2 matt }
651 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr, MII_BMCR,
652 1.38.2.2 matt BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
653 1.38.2.2 matt if (linkup == 0) {
654 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
655 1.38.2.2 matt ATPHY_DBG_ADDR, 0);
656 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
657 1.38.2.2 matt ATPHY_DBG_DATA, 0x124E);
658 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
659 1.38.2.2 matt ATPHY_DBG_ADDR, 1);
660 1.38.2.2 matt reg = age_miibus_readreg(sc->sc_dev, sc->age_phyaddr,
661 1.38.2.2 matt ATPHY_DBG_DATA);
662 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
663 1.38.2.2 matt ATPHY_DBG_DATA, reg | 0x03);
664 1.38.2.2 matt /* XXX */
665 1.38.2.2 matt DELAY(1500 * 1000);
666 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
667 1.38.2.2 matt ATPHY_DBG_ADDR, 0);
668 1.38.2.2 matt age_miibus_writereg(sc->sc_dev, sc->age_phyaddr,
669 1.38.2.2 matt ATPHY_DBG_DATA, 0x024E);
670 1.38.2.2 matt }
671 1.38.2.2 matt
672 1.38.2.2 matt #undef ATPHY_DBG_ADDR
673 1.38.2.2 matt #undef ATPHY_DBG_DATA
674 1.38.2.2 matt #undef ATPHY_CDTC
675 1.38.2.2 matt #undef PHY_CDTC_ENB
676 1.38.2.2 matt #undef PHY_CDTC_POFF
677 1.38.2.2 matt #undef ATPHY_CDTS
678 1.38.2.2 matt #undef PHY_CDTS_STAT_OK
679 1.38.2.2 matt #undef PHY_CDTS_STAT_SHORT
680 1.38.2.2 matt #undef PHY_CDTS_STAT_OPEN
681 1.38.2.2 matt #undef PHY_CDTS_STAT_INVAL
682 1.38.2.2 matt #undef PHY_CDTS_STAT_MASK
683 1.38.2.2 matt }
684 1.38.2.2 matt
685 1.38.2.2 matt static int
686 1.38.2.2 matt age_dma_alloc(struct age_softc *sc)
687 1.38.2.2 matt {
688 1.38.2.2 matt struct age_txdesc *txd;
689 1.38.2.2 matt struct age_rxdesc *rxd;
690 1.38.2.2 matt int nsegs, error, i;
691 1.38.2.2 matt
692 1.38.2.2 matt /*
693 1.38.2.2 matt * Create DMA stuffs for TX ring
694 1.38.2.2 matt */
695 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, AGE_TX_RING_SZ, 1,
696 1.38.2.2 matt AGE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_tx_ring_map);
697 1.38.2.2 matt if (error) {
698 1.38.2.2 matt sc->age_cdata.age_tx_ring_map = NULL;
699 1.38.2.2 matt return ENOBUFS;
700 1.38.2.2 matt }
701 1.38.2.2 matt
702 1.38.2.2 matt /* Allocate DMA'able memory for TX ring */
703 1.38.2.2 matt error = bus_dmamem_alloc(sc->sc_dmat, AGE_TX_RING_SZ,
704 1.38.2.2 matt ETHER_ALIGN, 0, &sc->age_rdata.age_tx_ring_seg, 1,
705 1.38.2.2 matt &nsegs, BUS_DMA_WAITOK);
706 1.38.2.2 matt if (error) {
707 1.38.2.2 matt printf("%s: could not allocate DMA'able memory for Tx ring, "
708 1.38.2.2 matt "error = %i\n", device_xname(sc->sc_dev), error);
709 1.38.2.2 matt return error;
710 1.38.2.2 matt }
711 1.38.2.2 matt
712 1.38.2.2 matt error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_tx_ring_seg,
713 1.38.2.2 matt nsegs, AGE_TX_RING_SZ, (void **)&sc->age_rdata.age_tx_ring,
714 1.38.2.2 matt BUS_DMA_NOWAIT);
715 1.38.2.2 matt if (error)
716 1.38.2.2 matt return ENOBUFS;
717 1.38.2.2 matt
718 1.38.2.2 matt memset(sc->age_rdata.age_tx_ring, 0, AGE_TX_RING_SZ);
719 1.38.2.2 matt
720 1.38.2.2 matt /* Load the DMA map for Tx ring. */
721 1.38.2.2 matt error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_tx_ring_map,
722 1.38.2.2 matt sc->age_rdata.age_tx_ring, AGE_TX_RING_SZ, NULL, BUS_DMA_WAITOK);
723 1.38.2.2 matt if (error) {
724 1.38.2.2 matt printf("%s: could not load DMA'able memory for Tx ring, "
725 1.38.2.2 matt "error = %i\n", device_xname(sc->sc_dev), error);
726 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
727 1.38.2.2 matt &sc->age_rdata.age_tx_ring_seg, 1);
728 1.38.2.2 matt return error;
729 1.38.2.2 matt }
730 1.38.2.2 matt
731 1.38.2.2 matt sc->age_rdata.age_tx_ring_paddr =
732 1.38.2.2 matt sc->age_cdata.age_tx_ring_map->dm_segs[0].ds_addr;
733 1.38.2.2 matt
734 1.38.2.2 matt /*
735 1.38.2.2 matt * Create DMA stuffs for RX ring
736 1.38.2.2 matt */
737 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, AGE_RX_RING_SZ, 1,
738 1.38.2.2 matt AGE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_ring_map);
739 1.38.2.2 matt if (error) {
740 1.38.2.2 matt sc->age_cdata.age_rx_ring_map = NULL;
741 1.38.2.2 matt return ENOBUFS;
742 1.38.2.2 matt }
743 1.38.2.2 matt
744 1.38.2.2 matt /* Allocate DMA'able memory for RX ring */
745 1.38.2.2 matt error = bus_dmamem_alloc(sc->sc_dmat, AGE_RX_RING_SZ,
746 1.38.2.2 matt ETHER_ALIGN, 0, &sc->age_rdata.age_rx_ring_seg, 1,
747 1.38.2.2 matt &nsegs, BUS_DMA_WAITOK);
748 1.38.2.2 matt if (error) {
749 1.38.2.2 matt printf("%s: could not allocate DMA'able memory for Rx ring, "
750 1.38.2.2 matt "error = %i.\n", device_xname(sc->sc_dev), error);
751 1.38.2.2 matt return error;
752 1.38.2.2 matt }
753 1.38.2.2 matt
754 1.38.2.2 matt error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rx_ring_seg,
755 1.38.2.2 matt nsegs, AGE_RX_RING_SZ, (void **)&sc->age_rdata.age_rx_ring,
756 1.38.2.2 matt BUS_DMA_NOWAIT);
757 1.38.2.2 matt if (error)
758 1.38.2.2 matt return ENOBUFS;
759 1.38.2.2 matt
760 1.38.2.2 matt memset(sc->age_rdata.age_rx_ring, 0, AGE_RX_RING_SZ);
761 1.38.2.2 matt
762 1.38.2.2 matt /* Load the DMA map for Rx ring. */
763 1.38.2.2 matt error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rx_ring_map,
764 1.38.2.2 matt sc->age_rdata.age_rx_ring, AGE_RX_RING_SZ, NULL, BUS_DMA_WAITOK);
765 1.38.2.2 matt if (error) {
766 1.38.2.2 matt printf("%s: could not load DMA'able memory for Rx ring, "
767 1.38.2.2 matt "error = %i.\n", device_xname(sc->sc_dev), error);
768 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
769 1.38.2.2 matt &sc->age_rdata.age_rx_ring_seg, 1);
770 1.38.2.2 matt return error;
771 1.38.2.2 matt }
772 1.38.2.2 matt
773 1.38.2.2 matt sc->age_rdata.age_rx_ring_paddr =
774 1.38.2.2 matt sc->age_cdata.age_rx_ring_map->dm_segs[0].ds_addr;
775 1.38.2.2 matt
776 1.38.2.2 matt /*
777 1.38.2.2 matt * Create DMA stuffs for RX return ring
778 1.38.2.2 matt */
779 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, AGE_RR_RING_SZ, 1,
780 1.38.2.2 matt AGE_RR_RING_SZ, 0, BUS_DMA_NOWAIT, &sc->age_cdata.age_rr_ring_map);
781 1.38.2.2 matt if (error) {
782 1.38.2.2 matt sc->age_cdata.age_rr_ring_map = NULL;
783 1.38.2.2 matt return ENOBUFS;
784 1.38.2.2 matt }
785 1.38.2.2 matt
786 1.38.2.2 matt /* Allocate DMA'able memory for RX return ring */
787 1.38.2.2 matt error = bus_dmamem_alloc(sc->sc_dmat, AGE_RR_RING_SZ,
788 1.38.2.2 matt ETHER_ALIGN, 0, &sc->age_rdata.age_rr_ring_seg, 1,
789 1.38.2.2 matt &nsegs, BUS_DMA_WAITOK);
790 1.38.2.2 matt if (error) {
791 1.38.2.2 matt printf("%s: could not allocate DMA'able memory for Rx "
792 1.38.2.2 matt "return ring, error = %i.\n",
793 1.38.2.2 matt device_xname(sc->sc_dev), error);
794 1.38.2.2 matt return error;
795 1.38.2.2 matt }
796 1.38.2.2 matt
797 1.38.2.2 matt error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_rr_ring_seg,
798 1.38.2.2 matt nsegs, AGE_RR_RING_SZ, (void **)&sc->age_rdata.age_rr_ring,
799 1.38.2.2 matt BUS_DMA_NOWAIT);
800 1.38.2.2 matt if (error)
801 1.38.2.2 matt return ENOBUFS;
802 1.38.2.2 matt
803 1.38.2.2 matt memset(sc->age_rdata.age_rr_ring, 0, AGE_RR_RING_SZ);
804 1.38.2.2 matt
805 1.38.2.2 matt /* Load the DMA map for Rx return ring. */
806 1.38.2.2 matt error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_rr_ring_map,
807 1.38.2.2 matt sc->age_rdata.age_rr_ring, AGE_RR_RING_SZ, NULL, BUS_DMA_WAITOK);
808 1.38.2.2 matt if (error) {
809 1.38.2.2 matt printf("%s: could not load DMA'able memory for Rx return ring, "
810 1.38.2.2 matt "error = %i\n", device_xname(sc->sc_dev), error);
811 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
812 1.38.2.2 matt &sc->age_rdata.age_rr_ring_seg, 1);
813 1.38.2.2 matt return error;
814 1.38.2.2 matt }
815 1.38.2.2 matt
816 1.38.2.2 matt sc->age_rdata.age_rr_ring_paddr =
817 1.38.2.2 matt sc->age_cdata.age_rr_ring_map->dm_segs[0].ds_addr;
818 1.38.2.2 matt
819 1.38.2.2 matt /*
820 1.38.2.2 matt * Create DMA stuffs for CMB block
821 1.38.2.2 matt */
822 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, AGE_CMB_BLOCK_SZ, 1,
823 1.38.2.2 matt AGE_CMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
824 1.38.2.2 matt &sc->age_cdata.age_cmb_block_map);
825 1.38.2.2 matt if (error) {
826 1.38.2.2 matt sc->age_cdata.age_cmb_block_map = NULL;
827 1.38.2.2 matt return ENOBUFS;
828 1.38.2.2 matt }
829 1.38.2.2 matt
830 1.38.2.2 matt /* Allocate DMA'able memory for CMB block */
831 1.38.2.2 matt error = bus_dmamem_alloc(sc->sc_dmat, AGE_CMB_BLOCK_SZ,
832 1.38.2.2 matt ETHER_ALIGN, 0, &sc->age_rdata.age_cmb_block_seg, 1,
833 1.38.2.2 matt &nsegs, BUS_DMA_WAITOK);
834 1.38.2.2 matt if (error) {
835 1.38.2.2 matt printf("%s: could not allocate DMA'able memory for "
836 1.38.2.2 matt "CMB block, error = %i\n", device_xname(sc->sc_dev), error);
837 1.38.2.2 matt return error;
838 1.38.2.2 matt }
839 1.38.2.2 matt
840 1.38.2.2 matt error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_cmb_block_seg,
841 1.38.2.2 matt nsegs, AGE_CMB_BLOCK_SZ, (void **)&sc->age_rdata.age_cmb_block,
842 1.38.2.2 matt BUS_DMA_NOWAIT);
843 1.38.2.2 matt if (error)
844 1.38.2.2 matt return ENOBUFS;
845 1.38.2.2 matt
846 1.38.2.2 matt memset(sc->age_rdata.age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
847 1.38.2.2 matt
848 1.38.2.2 matt /* Load the DMA map for CMB block. */
849 1.38.2.2 matt error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_cmb_block_map,
850 1.38.2.2 matt sc->age_rdata.age_cmb_block, AGE_CMB_BLOCK_SZ, NULL,
851 1.38.2.2 matt BUS_DMA_WAITOK);
852 1.38.2.2 matt if (error) {
853 1.38.2.2 matt printf("%s: could not load DMA'able memory for CMB block, "
854 1.38.2.2 matt "error = %i\n", device_xname(sc->sc_dev), error);
855 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
856 1.38.2.2 matt &sc->age_rdata.age_cmb_block_seg, 1);
857 1.38.2.2 matt return error;
858 1.38.2.2 matt }
859 1.38.2.2 matt
860 1.38.2.2 matt sc->age_rdata.age_cmb_block_paddr =
861 1.38.2.2 matt sc->age_cdata.age_cmb_block_map->dm_segs[0].ds_addr;
862 1.38.2.2 matt
863 1.38.2.2 matt /*
864 1.38.2.2 matt * Create DMA stuffs for SMB block
865 1.38.2.2 matt */
866 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, AGE_SMB_BLOCK_SZ, 1,
867 1.38.2.2 matt AGE_SMB_BLOCK_SZ, 0, BUS_DMA_NOWAIT,
868 1.38.2.2 matt &sc->age_cdata.age_smb_block_map);
869 1.38.2.2 matt if (error) {
870 1.38.2.2 matt sc->age_cdata.age_smb_block_map = NULL;
871 1.38.2.2 matt return ENOBUFS;
872 1.38.2.2 matt }
873 1.38.2.2 matt
874 1.38.2.2 matt /* Allocate DMA'able memory for SMB block */
875 1.38.2.2 matt error = bus_dmamem_alloc(sc->sc_dmat, AGE_SMB_BLOCK_SZ,
876 1.38.2.2 matt ETHER_ALIGN, 0, &sc->age_rdata.age_smb_block_seg, 1,
877 1.38.2.2 matt &nsegs, BUS_DMA_WAITOK);
878 1.38.2.2 matt if (error) {
879 1.38.2.2 matt printf("%s: could not allocate DMA'able memory for "
880 1.38.2.2 matt "SMB block, error = %i\n", device_xname(sc->sc_dev), error);
881 1.38.2.2 matt return error;
882 1.38.2.2 matt }
883 1.38.2.2 matt
884 1.38.2.2 matt error = bus_dmamem_map(sc->sc_dmat, &sc->age_rdata.age_smb_block_seg,
885 1.38.2.2 matt nsegs, AGE_SMB_BLOCK_SZ, (void **)&sc->age_rdata.age_smb_block,
886 1.38.2.2 matt BUS_DMA_NOWAIT);
887 1.38.2.2 matt if (error)
888 1.38.2.2 matt return ENOBUFS;
889 1.38.2.2 matt
890 1.38.2.2 matt memset(sc->age_rdata.age_smb_block, 0, AGE_SMB_BLOCK_SZ);
891 1.38.2.2 matt
892 1.38.2.2 matt /* Load the DMA map for SMB block */
893 1.38.2.2 matt error = bus_dmamap_load(sc->sc_dmat, sc->age_cdata.age_smb_block_map,
894 1.38.2.2 matt sc->age_rdata.age_smb_block, AGE_SMB_BLOCK_SZ, NULL,
895 1.38.2.2 matt BUS_DMA_WAITOK);
896 1.38.2.2 matt if (error) {
897 1.38.2.2 matt printf("%s: could not load DMA'able memory for SMB block, "
898 1.38.2.2 matt "error = %i\n", device_xname(sc->sc_dev), error);
899 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
900 1.38.2.2 matt &sc->age_rdata.age_smb_block_seg, 1);
901 1.38.2.2 matt return error;
902 1.38.2.2 matt }
903 1.38.2.2 matt
904 1.38.2.2 matt sc->age_rdata.age_smb_block_paddr =
905 1.38.2.2 matt sc->age_cdata.age_smb_block_map->dm_segs[0].ds_addr;
906 1.38.2.2 matt
907 1.38.2.2 matt /* Create DMA maps for Tx buffers. */
908 1.38.2.2 matt for (i = 0; i < AGE_TX_RING_CNT; i++) {
909 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[i];
910 1.38.2.2 matt txd->tx_m = NULL;
911 1.38.2.2 matt txd->tx_dmamap = NULL;
912 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, AGE_TSO_MAXSIZE,
913 1.38.2.2 matt AGE_MAXTXSEGS, AGE_TSO_MAXSEGSIZE, 0, BUS_DMA_NOWAIT,
914 1.38.2.2 matt &txd->tx_dmamap);
915 1.38.2.2 matt if (error) {
916 1.38.2.2 matt txd->tx_dmamap = NULL;
917 1.38.2.2 matt printf("%s: could not create Tx dmamap, error = %i.\n",
918 1.38.2.2 matt device_xname(sc->sc_dev), error);
919 1.38.2.2 matt return error;
920 1.38.2.2 matt }
921 1.38.2.2 matt }
922 1.38.2.2 matt
923 1.38.2.2 matt /* Create DMA maps for Rx buffers. */
924 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
925 1.38.2.2 matt BUS_DMA_NOWAIT, &sc->age_cdata.age_rx_sparemap);
926 1.38.2.2 matt if (error) {
927 1.38.2.2 matt sc->age_cdata.age_rx_sparemap = NULL;
928 1.38.2.2 matt printf("%s: could not create spare Rx dmamap, error = %i.\n",
929 1.38.2.2 matt device_xname(sc->sc_dev), error);
930 1.38.2.2 matt return error;
931 1.38.2.2 matt }
932 1.38.2.2 matt for (i = 0; i < AGE_RX_RING_CNT; i++) {
933 1.38.2.2 matt rxd = &sc->age_cdata.age_rxdesc[i];
934 1.38.2.2 matt rxd->rx_m = NULL;
935 1.38.2.2 matt rxd->rx_dmamap = NULL;
936 1.38.2.2 matt error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
937 1.38.2.2 matt MCLBYTES, 0, BUS_DMA_NOWAIT, &rxd->rx_dmamap);
938 1.38.2.2 matt if (error) {
939 1.38.2.2 matt rxd->rx_dmamap = NULL;
940 1.38.2.2 matt printf("%s: could not create Rx dmamap, error = %i.\n",
941 1.38.2.2 matt device_xname(sc->sc_dev), error);
942 1.38.2.2 matt return error;
943 1.38.2.2 matt }
944 1.38.2.2 matt }
945 1.38.2.2 matt
946 1.38.2.2 matt return 0;
947 1.38.2.2 matt }
948 1.38.2.2 matt
949 1.38.2.2 matt static void
950 1.38.2.2 matt age_dma_free(struct age_softc *sc)
951 1.38.2.2 matt {
952 1.38.2.2 matt struct age_txdesc *txd;
953 1.38.2.2 matt struct age_rxdesc *rxd;
954 1.38.2.2 matt int i;
955 1.38.2.2 matt
956 1.38.2.2 matt /* Tx buffers */
957 1.38.2.2 matt for (i = 0; i < AGE_TX_RING_CNT; i++) {
958 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[i];
959 1.38.2.2 matt if (txd->tx_dmamap != NULL) {
960 1.38.2.2 matt bus_dmamap_destroy(sc->sc_dmat, txd->tx_dmamap);
961 1.38.2.2 matt txd->tx_dmamap = NULL;
962 1.38.2.2 matt }
963 1.38.2.2 matt }
964 1.38.2.2 matt /* Rx buffers */
965 1.38.2.2 matt for (i = 0; i < AGE_RX_RING_CNT; i++) {
966 1.38.2.2 matt rxd = &sc->age_cdata.age_rxdesc[i];
967 1.38.2.2 matt if (rxd->rx_dmamap != NULL) {
968 1.38.2.2 matt bus_dmamap_destroy(sc->sc_dmat, rxd->rx_dmamap);
969 1.38.2.2 matt rxd->rx_dmamap = NULL;
970 1.38.2.2 matt }
971 1.38.2.2 matt }
972 1.38.2.2 matt if (sc->age_cdata.age_rx_sparemap != NULL) {
973 1.38.2.2 matt bus_dmamap_destroy(sc->sc_dmat, sc->age_cdata.age_rx_sparemap);
974 1.38.2.2 matt sc->age_cdata.age_rx_sparemap = NULL;
975 1.38.2.2 matt }
976 1.38.2.2 matt
977 1.38.2.2 matt /* Tx ring. */
978 1.38.2.2 matt if (sc->age_cdata.age_tx_ring_map != NULL)
979 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_tx_ring_map);
980 1.38.2.2 matt if (sc->age_cdata.age_tx_ring_map != NULL &&
981 1.38.2.2 matt sc->age_rdata.age_tx_ring != NULL)
982 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
983 1.38.2.2 matt &sc->age_rdata.age_tx_ring_seg, 1);
984 1.38.2.2 matt sc->age_rdata.age_tx_ring = NULL;
985 1.38.2.2 matt sc->age_cdata.age_tx_ring_map = NULL;
986 1.38.2.2 matt
987 1.38.2.2 matt /* Rx ring. */
988 1.38.2.2 matt if (sc->age_cdata.age_rx_ring_map != NULL)
989 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rx_ring_map);
990 1.38.2.2 matt if (sc->age_cdata.age_rx_ring_map != NULL &&
991 1.38.2.2 matt sc->age_rdata.age_rx_ring != NULL)
992 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
993 1.38.2.2 matt &sc->age_rdata.age_rx_ring_seg, 1);
994 1.38.2.2 matt sc->age_rdata.age_rx_ring = NULL;
995 1.38.2.2 matt sc->age_cdata.age_rx_ring_map = NULL;
996 1.38.2.2 matt
997 1.38.2.2 matt /* Rx return ring. */
998 1.38.2.2 matt if (sc->age_cdata.age_rr_ring_map != NULL)
999 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_rr_ring_map);
1000 1.38.2.2 matt if (sc->age_cdata.age_rr_ring_map != NULL &&
1001 1.38.2.2 matt sc->age_rdata.age_rr_ring != NULL)
1002 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
1003 1.38.2.2 matt &sc->age_rdata.age_rr_ring_seg, 1);
1004 1.38.2.2 matt sc->age_rdata.age_rr_ring = NULL;
1005 1.38.2.2 matt sc->age_cdata.age_rr_ring_map = NULL;
1006 1.38.2.2 matt
1007 1.38.2.2 matt /* CMB block */
1008 1.38.2.2 matt if (sc->age_cdata.age_cmb_block_map != NULL)
1009 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_cmb_block_map);
1010 1.38.2.2 matt if (sc->age_cdata.age_cmb_block_map != NULL &&
1011 1.38.2.2 matt sc->age_rdata.age_cmb_block != NULL)
1012 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
1013 1.38.2.2 matt &sc->age_rdata.age_cmb_block_seg, 1);
1014 1.38.2.2 matt sc->age_rdata.age_cmb_block = NULL;
1015 1.38.2.2 matt sc->age_cdata.age_cmb_block_map = NULL;
1016 1.38.2.2 matt
1017 1.38.2.2 matt /* SMB block */
1018 1.38.2.2 matt if (sc->age_cdata.age_smb_block_map != NULL)
1019 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, sc->age_cdata.age_smb_block_map);
1020 1.38.2.2 matt if (sc->age_cdata.age_smb_block_map != NULL &&
1021 1.38.2.2 matt sc->age_rdata.age_smb_block != NULL)
1022 1.38.2.2 matt bus_dmamem_free(sc->sc_dmat,
1023 1.38.2.2 matt &sc->age_rdata.age_smb_block_seg, 1);
1024 1.38.2.2 matt sc->age_rdata.age_smb_block = NULL;
1025 1.38.2.2 matt sc->age_cdata.age_smb_block_map = NULL;
1026 1.38.2.2 matt }
1027 1.38.2.2 matt
1028 1.38.2.2 matt static void
1029 1.38.2.2 matt age_start(struct ifnet *ifp)
1030 1.38.2.2 matt {
1031 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
1032 1.38.2.2 matt struct mbuf *m_head;
1033 1.38.2.2 matt int enq;
1034 1.38.2.2 matt
1035 1.38.2.2 matt if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1036 1.38.2.2 matt return;
1037 1.38.2.2 matt
1038 1.38.2.2 matt enq = 0;
1039 1.38.2.2 matt for (;;) {
1040 1.38.2.2 matt IFQ_DEQUEUE(&ifp->if_snd, m_head);
1041 1.38.2.2 matt if (m_head == NULL)
1042 1.38.2.2 matt break;
1043 1.38.2.2 matt
1044 1.38.2.2 matt /*
1045 1.38.2.2 matt * Pack the data into the transmit ring. If we
1046 1.38.2.2 matt * don't have room, set the OACTIVE flag and wait
1047 1.38.2.2 matt * for the NIC to drain the ring.
1048 1.38.2.2 matt */
1049 1.38.2.2 matt if (age_encap(sc, &m_head)) {
1050 1.38.2.2 matt if (m_head == NULL)
1051 1.38.2.2 matt break;
1052 1.38.2.2 matt IF_PREPEND(&ifp->if_snd, m_head);
1053 1.38.2.2 matt ifp->if_flags |= IFF_OACTIVE;
1054 1.38.2.2 matt break;
1055 1.38.2.2 matt }
1056 1.38.2.2 matt enq = 1;
1057 1.38.2.2 matt
1058 1.38.2.2 matt #if NBPFILTER > 0
1059 1.38.2.2 matt /*
1060 1.38.2.2 matt * If there's a BPF listener, bounce a copy of this frame
1061 1.38.2.2 matt * to him.
1062 1.38.2.2 matt */
1063 1.38.2.2 matt if (ifp->if_bpf != NULL)
1064 1.38.2.2 matt bpf_mtap(ifp->if_bpf, m_head);
1065 1.38.2.2 matt #endif
1066 1.38.2.2 matt }
1067 1.38.2.2 matt
1068 1.38.2.2 matt if (enq) {
1069 1.38.2.2 matt /* Update mbox. */
1070 1.38.2.2 matt AGE_COMMIT_MBOX(sc);
1071 1.38.2.2 matt /* Set a timeout in case the chip goes out to lunch. */
1072 1.38.2.2 matt ifp->if_timer = AGE_TX_TIMEOUT;
1073 1.38.2.2 matt }
1074 1.38.2.2 matt }
1075 1.38.2.2 matt
1076 1.38.2.2 matt static void
1077 1.38.2.2 matt age_watchdog(struct ifnet *ifp)
1078 1.38.2.2 matt {
1079 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
1080 1.38.2.2 matt
1081 1.38.2.2 matt if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1082 1.38.2.2 matt printf("%s: watchdog timeout (missed link)\n",
1083 1.38.2.2 matt device_xname(sc->sc_dev));
1084 1.38.2.2 matt ifp->if_oerrors++;
1085 1.38.2.2 matt age_init(ifp);
1086 1.38.2.2 matt return;
1087 1.38.2.2 matt }
1088 1.38.2.2 matt
1089 1.38.2.2 matt if (sc->age_cdata.age_tx_cnt == 0) {
1090 1.38.2.2 matt printf("%s: watchdog timeout (missed Tx interrupts) "
1091 1.38.2.2 matt "-- recovering\n", device_xname(sc->sc_dev));
1092 1.38.2.2 matt if (!IFQ_IS_EMPTY(&ifp->if_snd))
1093 1.38.2.2 matt age_start(ifp);
1094 1.38.2.2 matt return;
1095 1.38.2.2 matt }
1096 1.38.2.2 matt
1097 1.38.2.2 matt printf("%s: watchdog timeout\n", device_xname(sc->sc_dev));
1098 1.38.2.2 matt ifp->if_oerrors++;
1099 1.38.2.2 matt age_init(ifp);
1100 1.38.2.2 matt
1101 1.38.2.2 matt if (!IFQ_IS_EMPTY(&ifp->if_snd))
1102 1.38.2.2 matt age_start(ifp);
1103 1.38.2.2 matt }
1104 1.38.2.2 matt
1105 1.38.2.2 matt static int
1106 1.38.2.2 matt age_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1107 1.38.2.2 matt {
1108 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
1109 1.38.2.2 matt int s, error;
1110 1.38.2.2 matt
1111 1.38.2.2 matt s = splnet();
1112 1.38.2.2 matt
1113 1.38.2.2 matt error = ether_ioctl(ifp, cmd, data);
1114 1.38.2.2 matt if (error == ENETRESET) {
1115 1.38.2.2 matt if (ifp->if_flags & IFF_RUNNING)
1116 1.38.2.2 matt age_rxfilter(sc);
1117 1.38.2.2 matt error = 0;
1118 1.38.2.2 matt }
1119 1.38.2.2 matt
1120 1.38.2.2 matt splx(s);
1121 1.38.2.2 matt return error;
1122 1.38.2.2 matt }
1123 1.38.2.2 matt
1124 1.38.2.2 matt static void
1125 1.38.2.2 matt age_mac_config(struct age_softc *sc)
1126 1.38.2.2 matt {
1127 1.38.2.2 matt struct mii_data *mii;
1128 1.38.2.2 matt uint32_t reg;
1129 1.38.2.2 matt
1130 1.38.2.2 matt mii = &sc->sc_miibus;
1131 1.38.2.2 matt
1132 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MAC_CFG);
1133 1.38.2.2 matt reg &= ~MAC_CFG_FULL_DUPLEX;
1134 1.38.2.2 matt reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1135 1.38.2.2 matt reg &= ~MAC_CFG_SPEED_MASK;
1136 1.38.2.2 matt
1137 1.38.2.2 matt /* Reprogram MAC with resolved speed/duplex. */
1138 1.38.2.2 matt switch (IFM_SUBTYPE(mii->mii_media_active)) {
1139 1.38.2.2 matt case IFM_10_T:
1140 1.38.2.2 matt case IFM_100_TX:
1141 1.38.2.2 matt reg |= MAC_CFG_SPEED_10_100;
1142 1.38.2.2 matt break;
1143 1.38.2.2 matt case IFM_1000_T:
1144 1.38.2.2 matt reg |= MAC_CFG_SPEED_1000;
1145 1.38.2.2 matt break;
1146 1.38.2.2 matt }
1147 1.38.2.2 matt if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1148 1.38.2.2 matt reg |= MAC_CFG_FULL_DUPLEX;
1149 1.38.2.2 matt if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1150 1.38.2.2 matt reg |= MAC_CFG_TX_FC;
1151 1.38.2.2 matt if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1152 1.38.2.2 matt reg |= MAC_CFG_RX_FC;
1153 1.38.2.2 matt }
1154 1.38.2.2 matt
1155 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1156 1.38.2.2 matt }
1157 1.38.2.2 matt
1158 1.38.2.2 matt static bool
1159 1.38.2.2 matt age_resume(device_t dv PMF_FN_ARGS)
1160 1.38.2.2 matt {
1161 1.38.2.2 matt struct age_softc *sc = device_private(dv);
1162 1.38.2.2 matt uint16_t cmd;
1163 1.38.2.2 matt
1164 1.38.2.2 matt /*
1165 1.38.2.2 matt * Clear INTx emulation disable for hardware that
1166 1.38.2.2 matt * is set in resume event. From Linux.
1167 1.38.2.2 matt */
1168 1.38.2.2 matt cmd = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
1169 1.38.2.2 matt if ((cmd & PCI_COMMAND_INTERRUPT_DISABLE) != 0) {
1170 1.38.2.2 matt cmd &= ~PCI_COMMAND_INTERRUPT_DISABLE;
1171 1.38.2.2 matt pci_conf_write(sc->sc_pct, sc->sc_pcitag,
1172 1.38.2.2 matt PCI_COMMAND_STATUS_REG, cmd);
1173 1.38.2.2 matt }
1174 1.38.2.2 matt
1175 1.38.2.2 matt return true;
1176 1.38.2.2 matt }
1177 1.38.2.2 matt
1178 1.38.2.2 matt static int
1179 1.38.2.2 matt age_encap(struct age_softc *sc, struct mbuf **m_head)
1180 1.38.2.2 matt {
1181 1.38.2.2 matt struct age_txdesc *txd, *txd_last;
1182 1.38.2.2 matt struct tx_desc *desc;
1183 1.38.2.2 matt struct mbuf *m;
1184 1.38.2.2 matt bus_dmamap_t map;
1185 1.38.2.2 matt uint32_t cflags, poff, vtag;
1186 1.38.2.2 matt int error, i, nsegs, prod;
1187 1.38.2.2 matt #if NVLAN > 0
1188 1.38.2.2 matt struct m_tag *mtag;
1189 1.38.2.2 matt #endif
1190 1.38.2.2 matt
1191 1.38.2.2 matt m = *m_head;
1192 1.38.2.2 matt cflags = vtag = 0;
1193 1.38.2.2 matt poff = 0;
1194 1.38.2.2 matt
1195 1.38.2.2 matt prod = sc->age_cdata.age_tx_prod;
1196 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[prod];
1197 1.38.2.2 matt txd_last = txd;
1198 1.38.2.2 matt map = txd->tx_dmamap;
1199 1.38.2.2 matt
1200 1.38.2.2 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head, BUS_DMA_NOWAIT);
1201 1.38.2.2 matt
1202 1.38.2.2 matt if (error == EFBIG) {
1203 1.38.2.2 matt error = 0;
1204 1.38.2.2 matt
1205 1.38.2.2 matt *m_head = m_pullup(*m_head, MHLEN);
1206 1.38.2.2 matt if (*m_head == NULL) {
1207 1.38.2.2 matt printf("%s: can't defrag TX mbuf\n",
1208 1.38.2.2 matt device_xname(sc->sc_dev));
1209 1.38.2.2 matt return ENOBUFS;
1210 1.38.2.2 matt }
1211 1.38.2.2 matt
1212 1.38.2.2 matt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, *m_head,
1213 1.38.2.2 matt BUS_DMA_NOWAIT);
1214 1.38.2.2 matt
1215 1.38.2.2 matt if (error != 0) {
1216 1.38.2.2 matt printf("%s: could not load defragged TX mbuf\n",
1217 1.38.2.2 matt device_xname(sc->sc_dev));
1218 1.38.2.2 matt m_freem(*m_head);
1219 1.38.2.2 matt *m_head = NULL;
1220 1.38.2.2 matt return error;
1221 1.38.2.2 matt }
1222 1.38.2.2 matt } else if (error) {
1223 1.38.2.2 matt printf("%s: could not load TX mbuf\n", device_xname(sc->sc_dev));
1224 1.38.2.2 matt return error;
1225 1.38.2.2 matt }
1226 1.38.2.2 matt
1227 1.38.2.2 matt nsegs = map->dm_nsegs;
1228 1.38.2.2 matt
1229 1.38.2.2 matt if (nsegs == 0) {
1230 1.38.2.2 matt m_freem(*m_head);
1231 1.38.2.2 matt *m_head = NULL;
1232 1.38.2.2 matt return EIO;
1233 1.38.2.2 matt }
1234 1.38.2.2 matt
1235 1.38.2.2 matt /* Check descriptor overrun. */
1236 1.38.2.2 matt if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1237 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, map);
1238 1.38.2.2 matt return ENOBUFS;
1239 1.38.2.2 matt }
1240 1.38.2.2 matt
1241 1.38.2.2 matt m = *m_head;
1242 1.38.2.2 matt /* Configure Tx IP/TCP/UDP checksum offload. */
1243 1.38.2.2 matt if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1244 1.38.2.2 matt cflags |= AGE_TD_CSUM;
1245 1.38.2.2 matt if ((m->m_pkthdr.csum_flags & M_CSUM_TCPv4) != 0)
1246 1.38.2.2 matt cflags |= AGE_TD_TCPCSUM;
1247 1.38.2.2 matt if ((m->m_pkthdr.csum_flags & M_CSUM_UDPv4) != 0)
1248 1.38.2.2 matt cflags |= AGE_TD_UDPCSUM;
1249 1.38.2.2 matt /* Set checksum start offset. */
1250 1.38.2.2 matt cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1251 1.38.2.2 matt }
1252 1.38.2.2 matt
1253 1.38.2.2 matt #if NVLAN > 0
1254 1.38.2.2 matt /* Configure VLAN hardware tag insertion. */
1255 1.38.2.2 matt if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ec, m))) {
1256 1.38.2.2 matt vtag = AGE_TX_VLAN_TAG(htons(VLAN_TAG_VALUE(mtag)));
1257 1.38.2.2 matt vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1258 1.38.2.2 matt cflags |= AGE_TD_INSERT_VLAN_TAG;
1259 1.38.2.2 matt }
1260 1.38.2.2 matt #endif
1261 1.38.2.2 matt
1262 1.38.2.2 matt desc = NULL;
1263 1.38.2.2 matt for (i = 0; i < nsegs; i++) {
1264 1.38.2.2 matt desc = &sc->age_rdata.age_tx_ring[prod];
1265 1.38.2.2 matt desc->addr = htole64(map->dm_segs[i].ds_addr);
1266 1.38.2.2 matt desc->len =
1267 1.38.2.2 matt htole32(AGE_TX_BYTES(map->dm_segs[i].ds_len) | vtag);
1268 1.38.2.2 matt desc->flags = htole32(cflags);
1269 1.38.2.2 matt sc->age_cdata.age_tx_cnt++;
1270 1.38.2.2 matt AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1271 1.38.2.2 matt }
1272 1.38.2.2 matt
1273 1.38.2.2 matt /* Update producer index. */
1274 1.38.2.2 matt sc->age_cdata.age_tx_prod = prod;
1275 1.38.2.2 matt
1276 1.38.2.2 matt /* Set EOP on the last descriptor. */
1277 1.38.2.2 matt prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1278 1.38.2.2 matt desc = &sc->age_rdata.age_tx_ring[prod];
1279 1.38.2.2 matt desc->flags |= htole32(AGE_TD_EOP);
1280 1.38.2.2 matt
1281 1.38.2.2 matt /* Swap dmamap of the first and the last. */
1282 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[prod];
1283 1.38.2.2 matt map = txd_last->tx_dmamap;
1284 1.38.2.2 matt txd_last->tx_dmamap = txd->tx_dmamap;
1285 1.38.2.2 matt txd->tx_dmamap = map;
1286 1.38.2.2 matt txd->tx_m = m;
1287 1.38.2.2 matt
1288 1.38.2.2 matt /* Sync descriptors. */
1289 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1290 1.38.2.2 matt BUS_DMASYNC_PREWRITE);
1291 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1292 1.38.2.2 matt sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1293 1.38.2.2 matt
1294 1.38.2.2 matt return 0;
1295 1.38.2.2 matt }
1296 1.38.2.2 matt
1297 1.38.2.2 matt static void
1298 1.38.2.2 matt age_txintr(struct age_softc *sc, int tpd_cons)
1299 1.38.2.2 matt {
1300 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
1301 1.38.2.2 matt struct age_txdesc *txd;
1302 1.38.2.2 matt int cons, prog;
1303 1.38.2.2 matt
1304 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1305 1.38.2.2 matt sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1306 1.38.2.2 matt
1307 1.38.2.2 matt /*
1308 1.38.2.2 matt * Go through our Tx list and free mbufs for those
1309 1.38.2.2 matt * frames which have been transmitted.
1310 1.38.2.2 matt */
1311 1.38.2.2 matt cons = sc->age_cdata.age_tx_cons;
1312 1.38.2.2 matt for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
1313 1.38.2.2 matt if (sc->age_cdata.age_tx_cnt <= 0)
1314 1.38.2.2 matt break;
1315 1.38.2.2 matt prog++;
1316 1.38.2.2 matt ifp->if_flags &= ~IFF_OACTIVE;
1317 1.38.2.2 matt sc->age_cdata.age_tx_cnt--;
1318 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[cons];
1319 1.38.2.2 matt /*
1320 1.38.2.2 matt * Clear Tx descriptors, it's not required but would
1321 1.38.2.2 matt * help debugging in case of Tx issues.
1322 1.38.2.2 matt */
1323 1.38.2.2 matt txd->tx_desc->addr = 0;
1324 1.38.2.2 matt txd->tx_desc->len = 0;
1325 1.38.2.2 matt txd->tx_desc->flags = 0;
1326 1.38.2.2 matt
1327 1.38.2.2 matt if (txd->tx_m == NULL)
1328 1.38.2.2 matt continue;
1329 1.38.2.2 matt /* Reclaim transmitted mbufs. */
1330 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1331 1.38.2.2 matt m_freem(txd->tx_m);
1332 1.38.2.2 matt txd->tx_m = NULL;
1333 1.38.2.2 matt }
1334 1.38.2.2 matt
1335 1.38.2.2 matt if (prog > 0) {
1336 1.38.2.2 matt sc->age_cdata.age_tx_cons = cons;
1337 1.38.2.2 matt
1338 1.38.2.2 matt /*
1339 1.38.2.2 matt * Unarm watchdog timer only when there are no pending
1340 1.38.2.2 matt * Tx descriptors in queue.
1341 1.38.2.2 matt */
1342 1.38.2.2 matt if (sc->age_cdata.age_tx_cnt == 0)
1343 1.38.2.2 matt ifp->if_timer = 0;
1344 1.38.2.2 matt
1345 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
1346 1.38.2.2 matt sc->age_cdata.age_tx_ring_map->dm_mapsize,
1347 1.38.2.2 matt BUS_DMASYNC_PREWRITE);
1348 1.38.2.2 matt }
1349 1.38.2.2 matt }
1350 1.38.2.2 matt
1351 1.38.2.2 matt /* Receive a frame. */
1352 1.38.2.2 matt static void
1353 1.38.2.2 matt age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
1354 1.38.2.2 matt {
1355 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
1356 1.38.2.2 matt struct age_rxdesc *rxd;
1357 1.38.2.2 matt struct rx_desc *desc;
1358 1.38.2.2 matt struct mbuf *mp, *m;
1359 1.38.2.2 matt uint32_t status, index;
1360 1.38.2.2 matt int count, nsegs, pktlen;
1361 1.38.2.2 matt int rx_cons;
1362 1.38.2.2 matt
1363 1.38.2.2 matt status = le32toh(rxrd->flags);
1364 1.38.2.2 matt index = le32toh(rxrd->index);
1365 1.38.2.2 matt rx_cons = AGE_RX_CONS(index);
1366 1.38.2.2 matt nsegs = AGE_RX_NSEGS(index);
1367 1.38.2.2 matt
1368 1.38.2.2 matt sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
1369 1.38.2.2 matt if ((status & AGE_RRD_ERROR) != 0 &&
1370 1.38.2.2 matt (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
1371 1.38.2.2 matt AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
1372 1.38.2.2 matt /*
1373 1.38.2.2 matt * We want to pass the following frames to upper
1374 1.38.2.2 matt * layer regardless of error status of Rx return
1375 1.38.2.2 matt * ring.
1376 1.38.2.2 matt *
1377 1.38.2.2 matt * o IP/TCP/UDP checksum is bad.
1378 1.38.2.2 matt * o frame length and protocol specific length
1379 1.38.2.2 matt * does not match.
1380 1.38.2.2 matt */
1381 1.38.2.2 matt sc->age_cdata.age_rx_cons += nsegs;
1382 1.38.2.2 matt sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1383 1.38.2.2 matt return;
1384 1.38.2.2 matt }
1385 1.38.2.2 matt
1386 1.38.2.2 matt pktlen = 0;
1387 1.38.2.2 matt for (count = 0; count < nsegs; count++,
1388 1.38.2.2 matt AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
1389 1.38.2.2 matt rxd = &sc->age_cdata.age_rxdesc[rx_cons];
1390 1.38.2.2 matt mp = rxd->rx_m;
1391 1.38.2.2 matt desc = rxd->rx_desc;
1392 1.38.2.2 matt /* Add a new receive buffer to the ring. */
1393 1.38.2.2 matt if (age_newbuf(sc, rxd, 0) != 0) {
1394 1.38.2.2 matt ifp->if_iqdrops++;
1395 1.38.2.2 matt /* Reuse Rx buffers. */
1396 1.38.2.2 matt if (sc->age_cdata.age_rxhead != NULL) {
1397 1.38.2.2 matt m_freem(sc->age_cdata.age_rxhead);
1398 1.38.2.2 matt AGE_RXCHAIN_RESET(sc);
1399 1.38.2.2 matt }
1400 1.38.2.2 matt break;
1401 1.38.2.2 matt }
1402 1.38.2.2 matt
1403 1.38.2.2 matt /* The length of the first mbuf is computed last. */
1404 1.38.2.2 matt if (count != 0) {
1405 1.38.2.2 matt mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
1406 1.38.2.2 matt pktlen += mp->m_len;
1407 1.38.2.2 matt }
1408 1.38.2.2 matt
1409 1.38.2.2 matt /* Chain received mbufs. */
1410 1.38.2.2 matt if (sc->age_cdata.age_rxhead == NULL) {
1411 1.38.2.2 matt sc->age_cdata.age_rxhead = mp;
1412 1.38.2.2 matt sc->age_cdata.age_rxtail = mp;
1413 1.38.2.2 matt } else {
1414 1.38.2.2 matt mp->m_flags &= ~M_PKTHDR;
1415 1.38.2.2 matt sc->age_cdata.age_rxprev_tail =
1416 1.38.2.2 matt sc->age_cdata.age_rxtail;
1417 1.38.2.2 matt sc->age_cdata.age_rxtail->m_next = mp;
1418 1.38.2.2 matt sc->age_cdata.age_rxtail = mp;
1419 1.38.2.2 matt }
1420 1.38.2.2 matt
1421 1.38.2.2 matt if (count == nsegs - 1) {
1422 1.38.2.2 matt /*
1423 1.38.2.2 matt * It seems that L1 controller has no way
1424 1.38.2.2 matt * to tell hardware to strip CRC bytes.
1425 1.38.2.2 matt */
1426 1.38.2.2 matt sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
1427 1.38.2.2 matt if (nsegs > 1) {
1428 1.38.2.2 matt /* Remove the CRC bytes in chained mbufs. */
1429 1.38.2.2 matt pktlen -= ETHER_CRC_LEN;
1430 1.38.2.2 matt if (mp->m_len <= ETHER_CRC_LEN) {
1431 1.38.2.2 matt sc->age_cdata.age_rxtail =
1432 1.38.2.2 matt sc->age_cdata.age_rxprev_tail;
1433 1.38.2.2 matt sc->age_cdata.age_rxtail->m_len -=
1434 1.38.2.2 matt (ETHER_CRC_LEN - mp->m_len);
1435 1.38.2.2 matt sc->age_cdata.age_rxtail->m_next = NULL;
1436 1.38.2.2 matt m_freem(mp);
1437 1.38.2.2 matt } else {
1438 1.38.2.2 matt mp->m_len -= ETHER_CRC_LEN;
1439 1.38.2.2 matt }
1440 1.38.2.2 matt }
1441 1.38.2.2 matt
1442 1.38.2.2 matt m = sc->age_cdata.age_rxhead;
1443 1.38.2.2 matt m->m_flags |= M_PKTHDR;
1444 1.38.2.2 matt m->m_pkthdr.rcvif = ifp;
1445 1.38.2.2 matt m->m_pkthdr.len = sc->age_cdata.age_rxlen;
1446 1.38.2.2 matt /* Set the first mbuf length. */
1447 1.38.2.2 matt m->m_len = sc->age_cdata.age_rxlen - pktlen;
1448 1.38.2.2 matt
1449 1.38.2.2 matt /*
1450 1.38.2.2 matt * Set checksum information.
1451 1.38.2.2 matt * It seems that L1 controller can compute partial
1452 1.38.2.2 matt * checksum. The partial checksum value can be used
1453 1.38.2.2 matt * to accelerate checksum computation for fragmented
1454 1.38.2.2 matt * TCP/UDP packets. Upper network stack already
1455 1.38.2.2 matt * takes advantage of the partial checksum value in
1456 1.38.2.2 matt * IP reassembly stage. But I'm not sure the
1457 1.38.2.2 matt * correctness of the partial hardware checksum
1458 1.38.2.2 matt * assistance due to lack of data sheet. If it is
1459 1.38.2.2 matt * proven to work on L1 I'll enable it.
1460 1.38.2.2 matt */
1461 1.38.2.2 matt if (status & AGE_RRD_IPV4) {
1462 1.38.2.2 matt if (status & AGE_RRD_IPCSUM_NOK)
1463 1.38.2.2 matt m->m_pkthdr.csum_flags |=
1464 1.38.2.2 matt M_CSUM_IPv4_BAD;
1465 1.38.2.2 matt if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
1466 1.38.2.2 matt (status & AGE_RRD_TCP_UDPCSUM_NOK)) {
1467 1.38.2.2 matt m->m_pkthdr.csum_flags |=
1468 1.38.2.2 matt M_CSUM_TCP_UDP_BAD;
1469 1.38.2.2 matt }
1470 1.38.2.2 matt /*
1471 1.38.2.2 matt * Don't mark bad checksum for TCP/UDP frames
1472 1.38.2.2 matt * as fragmented frames may always have set
1473 1.38.2.2 matt * bad checksummed bit of descriptor status.
1474 1.38.2.2 matt */
1475 1.38.2.2 matt }
1476 1.38.2.2 matt #if NVLAN > 0
1477 1.38.2.2 matt /* Check for VLAN tagged frames. */
1478 1.38.2.2 matt if (status & AGE_RRD_VLAN) {
1479 1.38.2.2 matt uint32_t vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
1480 1.38.2.2 matt VLAN_INPUT_TAG(ifp, m, AGE_RX_VLAN_TAG(vtag),
1481 1.38.2.2 matt continue);
1482 1.38.2.2 matt }
1483 1.38.2.2 matt #endif
1484 1.38.2.2 matt
1485 1.38.2.2 matt #if NBPFILTER > 0
1486 1.38.2.2 matt if (ifp->if_bpf)
1487 1.38.2.2 matt bpf_mtap(ifp->if_bpf, m);
1488 1.38.2.2 matt #endif
1489 1.38.2.2 matt /* Pass it on. */
1490 1.38.2.2 matt ether_input(ifp, m);
1491 1.38.2.2 matt
1492 1.38.2.2 matt /* Reset mbuf chains. */
1493 1.38.2.2 matt AGE_RXCHAIN_RESET(sc);
1494 1.38.2.2 matt }
1495 1.38.2.2 matt }
1496 1.38.2.2 matt
1497 1.38.2.2 matt if (count != nsegs) {
1498 1.38.2.2 matt sc->age_cdata.age_rx_cons += nsegs;
1499 1.38.2.2 matt sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
1500 1.38.2.2 matt } else
1501 1.38.2.2 matt sc->age_cdata.age_rx_cons = rx_cons;
1502 1.38.2.2 matt }
1503 1.38.2.2 matt
1504 1.38.2.2 matt static void
1505 1.38.2.2 matt age_rxintr(struct age_softc *sc, int rr_prod)
1506 1.38.2.2 matt {
1507 1.38.2.2 matt struct rx_rdesc *rxrd;
1508 1.38.2.2 matt int rr_cons, nsegs, pktlen, prog;
1509 1.38.2.2 matt
1510 1.38.2.2 matt rr_cons = sc->age_cdata.age_rr_cons;
1511 1.38.2.2 matt if (rr_cons == rr_prod)
1512 1.38.2.2 matt return;
1513 1.38.2.2 matt
1514 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1515 1.38.2.2 matt sc->age_cdata.age_rr_ring_map->dm_mapsize,
1516 1.38.2.2 matt BUS_DMASYNC_POSTREAD);
1517 1.38.2.2 matt
1518 1.38.2.2 matt for (prog = 0; rr_cons != rr_prod; prog++) {
1519 1.38.2.2 matt rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
1520 1.38.2.2 matt nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
1521 1.38.2.2 matt if (nsegs == 0)
1522 1.38.2.2 matt break;
1523 1.38.2.2 matt /*
1524 1.38.2.2 matt * Check number of segments against received bytes
1525 1.38.2.2 matt * Non-matching value would indicate that hardware
1526 1.38.2.2 matt * is still trying to update Rx return descriptors.
1527 1.38.2.2 matt * I'm not sure whether this check is really needed.
1528 1.38.2.2 matt */
1529 1.38.2.2 matt pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
1530 1.38.2.2 matt if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
1531 1.38.2.2 matt (MCLBYTES - ETHER_ALIGN)))
1532 1.38.2.2 matt break;
1533 1.38.2.2 matt
1534 1.38.2.2 matt /* Received a frame. */
1535 1.38.2.2 matt age_rxeof(sc, rxrd);
1536 1.38.2.2 matt
1537 1.38.2.2 matt /* Clear return ring. */
1538 1.38.2.2 matt rxrd->index = 0;
1539 1.38.2.2 matt AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
1540 1.38.2.2 matt }
1541 1.38.2.2 matt
1542 1.38.2.2 matt if (prog > 0) {
1543 1.38.2.2 matt /* Update the consumer index. */
1544 1.38.2.2 matt sc->age_cdata.age_rr_cons = rr_cons;
1545 1.38.2.2 matt
1546 1.38.2.2 matt /* Sync descriptors. */
1547 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
1548 1.38.2.2 matt sc->age_cdata.age_rr_ring_map->dm_mapsize,
1549 1.38.2.2 matt BUS_DMASYNC_PREWRITE);
1550 1.38.2.2 matt
1551 1.38.2.2 matt /* Notify hardware availability of new Rx buffers. */
1552 1.38.2.2 matt AGE_COMMIT_MBOX(sc);
1553 1.38.2.2 matt }
1554 1.38.2.2 matt }
1555 1.38.2.2 matt
1556 1.38.2.2 matt static void
1557 1.38.2.2 matt age_tick(void *xsc)
1558 1.38.2.2 matt {
1559 1.38.2.2 matt struct age_softc *sc = xsc;
1560 1.38.2.2 matt struct mii_data *mii = &sc->sc_miibus;
1561 1.38.2.2 matt int s;
1562 1.38.2.2 matt
1563 1.38.2.2 matt s = splnet();
1564 1.38.2.2 matt mii_tick(mii);
1565 1.38.2.2 matt splx(s);
1566 1.38.2.2 matt
1567 1.38.2.2 matt callout_schedule(&sc->sc_tick_ch, hz);
1568 1.38.2.2 matt }
1569 1.38.2.2 matt
1570 1.38.2.2 matt static void
1571 1.38.2.2 matt age_reset(struct age_softc *sc)
1572 1.38.2.2 matt {
1573 1.38.2.2 matt uint32_t reg;
1574 1.38.2.2 matt int i;
1575 1.38.2.2 matt
1576 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
1577 1.38.2.2 matt CSR_READ_4(sc, AGE_MASTER_CFG);
1578 1.38.2.2 matt DELAY(1000);
1579 1.38.2.2 matt for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1580 1.38.2.2 matt if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1581 1.38.2.2 matt break;
1582 1.38.2.2 matt DELAY(10);
1583 1.38.2.2 matt }
1584 1.38.2.2 matt
1585 1.38.2.2 matt if (i == 0)
1586 1.38.2.2 matt printf("%s: reset timeout(0x%08x)!\n", device_xname(sc->sc_dev),
1587 1.38.2.2 matt reg);
1588 1.38.2.2 matt
1589 1.38.2.2 matt /* Initialize PCIe module. From Linux. */
1590 1.38.2.2 matt CSR_WRITE_4(sc, 0x12FC, 0x6500);
1591 1.38.2.2 matt CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1592 1.38.2.2 matt }
1593 1.38.2.2 matt
1594 1.38.2.2 matt static int
1595 1.38.2.2 matt age_init(struct ifnet *ifp)
1596 1.38.2.2 matt {
1597 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
1598 1.38.2.2 matt struct mii_data *mii;
1599 1.38.2.2 matt uint8_t eaddr[ETHER_ADDR_LEN];
1600 1.38.2.2 matt bus_addr_t paddr;
1601 1.38.2.2 matt uint32_t reg, fsize;
1602 1.38.2.2 matt uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
1603 1.38.2.2 matt int error;
1604 1.38.2.2 matt
1605 1.38.2.2 matt /*
1606 1.38.2.2 matt * Cancel any pending I/O.
1607 1.38.2.2 matt */
1608 1.38.2.2 matt age_stop(ifp, 0);
1609 1.38.2.2 matt
1610 1.38.2.2 matt /*
1611 1.38.2.2 matt * Reset the chip to a known state.
1612 1.38.2.2 matt */
1613 1.38.2.2 matt age_reset(sc);
1614 1.38.2.2 matt
1615 1.38.2.2 matt /* Initialize descriptors. */
1616 1.38.2.2 matt error = age_init_rx_ring(sc);
1617 1.38.2.2 matt if (error != 0) {
1618 1.38.2.2 matt printf("%s: no memory for Rx buffers.\n", device_xname(sc->sc_dev));
1619 1.38.2.2 matt age_stop(ifp, 0);
1620 1.38.2.2 matt return error;
1621 1.38.2.2 matt }
1622 1.38.2.2 matt age_init_rr_ring(sc);
1623 1.38.2.2 matt age_init_tx_ring(sc);
1624 1.38.2.2 matt age_init_cmb_block(sc);
1625 1.38.2.2 matt age_init_smb_block(sc);
1626 1.38.2.2 matt
1627 1.38.2.2 matt /* Reprogram the station address. */
1628 1.38.2.2 matt memcpy(eaddr, CLLADDR(ifp->if_sadl), sizeof(eaddr));
1629 1.38.2.2 matt CSR_WRITE_4(sc, AGE_PAR0,
1630 1.38.2.2 matt eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
1631 1.38.2.2 matt CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
1632 1.38.2.2 matt
1633 1.38.2.2 matt /* Set descriptor base addresses. */
1634 1.38.2.2 matt paddr = sc->age_rdata.age_tx_ring_paddr;
1635 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
1636 1.38.2.2 matt paddr = sc->age_rdata.age_rx_ring_paddr;
1637 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
1638 1.38.2.2 matt paddr = sc->age_rdata.age_rr_ring_paddr;
1639 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
1640 1.38.2.2 matt paddr = sc->age_rdata.age_tx_ring_paddr;
1641 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
1642 1.38.2.2 matt paddr = sc->age_rdata.age_cmb_block_paddr;
1643 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
1644 1.38.2.2 matt paddr = sc->age_rdata.age_smb_block_paddr;
1645 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
1646 1.38.2.2 matt
1647 1.38.2.2 matt /* Set Rx/Rx return descriptor counter. */
1648 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
1649 1.38.2.2 matt ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
1650 1.38.2.2 matt DESC_RRD_CNT_MASK) |
1651 1.38.2.2 matt ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
1652 1.38.2.2 matt
1653 1.38.2.2 matt /* Set Tx descriptor counter. */
1654 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
1655 1.38.2.2 matt (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
1656 1.38.2.2 matt
1657 1.38.2.2 matt /* Tell hardware that we're ready to load descriptors. */
1658 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
1659 1.38.2.2 matt
1660 1.38.2.2 matt /*
1661 1.38.2.2 matt * Initialize mailbox register.
1662 1.38.2.2 matt * Updated producer/consumer index information is exchanged
1663 1.38.2.2 matt * through this mailbox register. However Tx producer and
1664 1.38.2.2 matt * Rx return consumer/Rx producer are all shared such that
1665 1.38.2.2 matt * it's hard to separate code path between Tx and Rx without
1666 1.38.2.2 matt * locking. If L1 hardware have a separate mail box register
1667 1.38.2.2 matt * for Tx and Rx consumer/producer management we could have
1668 1.38.2.2 matt * indepent Tx/Rx handler which in turn Rx handler could have
1669 1.38.2.2 matt * been run without any locking.
1670 1.38.2.2 matt */
1671 1.38.2.2 matt AGE_COMMIT_MBOX(sc);
1672 1.38.2.2 matt
1673 1.38.2.2 matt /* Configure IPG/IFG parameters. */
1674 1.38.2.2 matt CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
1675 1.38.2.2 matt ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
1676 1.38.2.2 matt ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
1677 1.38.2.2 matt ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
1678 1.38.2.2 matt ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
1679 1.38.2.2 matt
1680 1.38.2.2 matt /* Set parameters for half-duplex media. */
1681 1.38.2.2 matt CSR_WRITE_4(sc, AGE_HDPX_CFG,
1682 1.38.2.2 matt ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
1683 1.38.2.2 matt HDPX_CFG_LCOL_MASK) |
1684 1.38.2.2 matt ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
1685 1.38.2.2 matt HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
1686 1.38.2.2 matt ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
1687 1.38.2.2 matt HDPX_CFG_ABEBT_MASK) |
1688 1.38.2.2 matt ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
1689 1.38.2.2 matt HDPX_CFG_JAMIPG_MASK));
1690 1.38.2.2 matt
1691 1.38.2.2 matt /* Configure interrupt moderation timer. */
1692 1.38.2.2 matt sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
1693 1.38.2.2 matt CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
1694 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MASTER_CFG);
1695 1.38.2.2 matt reg &= ~MASTER_MTIMER_ENB;
1696 1.38.2.2 matt if (AGE_USECS(sc->age_int_mod) == 0)
1697 1.38.2.2 matt reg &= ~MASTER_ITIMER_ENB;
1698 1.38.2.2 matt else
1699 1.38.2.2 matt reg |= MASTER_ITIMER_ENB;
1700 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
1701 1.38.2.2 matt if (agedebug)
1702 1.38.2.2 matt printf("%s: interrupt moderation is %d us.\n",
1703 1.38.2.2 matt device_xname(sc->sc_dev), sc->age_int_mod);
1704 1.38.2.2 matt CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
1705 1.38.2.2 matt
1706 1.38.2.2 matt /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
1707 1.38.2.2 matt if (ifp->if_mtu < ETHERMTU)
1708 1.38.2.2 matt sc->age_max_frame_size = ETHERMTU;
1709 1.38.2.2 matt else
1710 1.38.2.2 matt sc->age_max_frame_size = ifp->if_mtu;
1711 1.38.2.2 matt sc->age_max_frame_size += ETHER_HDR_LEN +
1712 1.38.2.2 matt sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
1713 1.38.2.2 matt CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
1714 1.38.2.2 matt
1715 1.38.2.2 matt /* Configure jumbo frame. */
1716 1.38.2.2 matt fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
1717 1.38.2.2 matt CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
1718 1.38.2.2 matt (((fsize / sizeof(uint64_t)) <<
1719 1.38.2.2 matt RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
1720 1.38.2.2 matt ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
1721 1.38.2.2 matt RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
1722 1.38.2.2 matt ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
1723 1.38.2.2 matt RXQ_JUMBO_CFG_RRD_TIMER_MASK));
1724 1.38.2.2 matt
1725 1.38.2.2 matt /* Configure flow-control parameters. From Linux. */
1726 1.38.2.2 matt if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
1727 1.38.2.2 matt /*
1728 1.38.2.2 matt * Magic workaround for old-L1.
1729 1.38.2.2 matt * Don't know which hw revision requires this magic.
1730 1.38.2.2 matt */
1731 1.38.2.2 matt CSR_WRITE_4(sc, 0x12FC, 0x6500);
1732 1.38.2.2 matt /*
1733 1.38.2.2 matt * Another magic workaround for flow-control mode
1734 1.38.2.2 matt * change. From Linux.
1735 1.38.2.2 matt */
1736 1.38.2.2 matt CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
1737 1.38.2.2 matt }
1738 1.38.2.2 matt /*
1739 1.38.2.2 matt * TODO
1740 1.38.2.2 matt * Should understand pause parameter relationships between FIFO
1741 1.38.2.2 matt * size and number of Rx descriptors and Rx return descriptors.
1742 1.38.2.2 matt *
1743 1.38.2.2 matt * Magic parameters came from Linux.
1744 1.38.2.2 matt */
1745 1.38.2.2 matt switch (sc->age_chip_rev) {
1746 1.38.2.2 matt case 0x8001:
1747 1.38.2.2 matt case 0x9001:
1748 1.38.2.2 matt case 0x9002:
1749 1.38.2.2 matt case 0x9003:
1750 1.38.2.2 matt rxf_hi = AGE_RX_RING_CNT / 16;
1751 1.38.2.2 matt rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
1752 1.38.2.2 matt rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
1753 1.38.2.2 matt rrd_lo = AGE_RR_RING_CNT / 16;
1754 1.38.2.2 matt break;
1755 1.38.2.2 matt default:
1756 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
1757 1.38.2.2 matt rxf_lo = reg / 16;
1758 1.38.2.2 matt if (rxf_lo < 192)
1759 1.38.2.2 matt rxf_lo = 192;
1760 1.38.2.2 matt rxf_hi = (reg * 7) / 8;
1761 1.38.2.2 matt if (rxf_hi < rxf_lo)
1762 1.38.2.2 matt rxf_hi = rxf_lo + 16;
1763 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
1764 1.38.2.2 matt rrd_lo = reg / 8;
1765 1.38.2.2 matt rrd_hi = (reg * 7) / 8;
1766 1.38.2.2 matt if (rrd_lo < 2)
1767 1.38.2.2 matt rrd_lo = 2;
1768 1.38.2.2 matt if (rrd_hi < rrd_lo)
1769 1.38.2.2 matt rrd_hi = rrd_lo + 3;
1770 1.38.2.2 matt break;
1771 1.38.2.2 matt }
1772 1.38.2.2 matt CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
1773 1.38.2.2 matt ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
1774 1.38.2.2 matt RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
1775 1.38.2.2 matt ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
1776 1.38.2.2 matt RXQ_FIFO_PAUSE_THRESH_HI_MASK));
1777 1.38.2.2 matt CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
1778 1.38.2.2 matt ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
1779 1.38.2.2 matt RXQ_RRD_PAUSE_THRESH_LO_MASK) |
1780 1.38.2.2 matt ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
1781 1.38.2.2 matt RXQ_RRD_PAUSE_THRESH_HI_MASK));
1782 1.38.2.2 matt
1783 1.38.2.2 matt /* Configure RxQ. */
1784 1.38.2.2 matt CSR_WRITE_4(sc, AGE_RXQ_CFG,
1785 1.38.2.2 matt ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
1786 1.38.2.2 matt RXQ_CFG_RD_BURST_MASK) |
1787 1.38.2.2 matt ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
1788 1.38.2.2 matt RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
1789 1.38.2.2 matt ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
1790 1.38.2.2 matt RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
1791 1.38.2.2 matt RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
1792 1.38.2.2 matt
1793 1.38.2.2 matt /* Configure TxQ. */
1794 1.38.2.2 matt CSR_WRITE_4(sc, AGE_TXQ_CFG,
1795 1.38.2.2 matt ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
1796 1.38.2.2 matt TXQ_CFG_TPD_BURST_MASK) |
1797 1.38.2.2 matt ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
1798 1.38.2.2 matt TXQ_CFG_TX_FIFO_BURST_MASK) |
1799 1.38.2.2 matt ((TXQ_CFG_TPD_FETCH_DEFAULT <<
1800 1.38.2.2 matt TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
1801 1.38.2.2 matt TXQ_CFG_ENB);
1802 1.38.2.2 matt
1803 1.38.2.2 matt /* Configure DMA parameters. */
1804 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DMA_CFG,
1805 1.38.2.2 matt DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
1806 1.38.2.2 matt sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
1807 1.38.2.2 matt sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
1808 1.38.2.2 matt
1809 1.38.2.2 matt /* Configure CMB DMA write threshold. */
1810 1.38.2.2 matt CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
1811 1.38.2.2 matt ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
1812 1.38.2.2 matt CMB_WR_THRESH_RRD_MASK) |
1813 1.38.2.2 matt ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
1814 1.38.2.2 matt CMB_WR_THRESH_TPD_MASK));
1815 1.38.2.2 matt
1816 1.38.2.2 matt /* Set CMB/SMB timer and enable them. */
1817 1.38.2.2 matt CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
1818 1.38.2.2 matt ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
1819 1.38.2.2 matt ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
1820 1.38.2.2 matt
1821 1.38.2.2 matt /* Request SMB updates for every seconds. */
1822 1.38.2.2 matt CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
1823 1.38.2.2 matt CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
1824 1.38.2.2 matt
1825 1.38.2.2 matt /*
1826 1.38.2.2 matt * Disable all WOL bits as WOL can interfere normal Rx
1827 1.38.2.2 matt * operation.
1828 1.38.2.2 matt */
1829 1.38.2.2 matt CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1830 1.38.2.2 matt
1831 1.38.2.2 matt /*
1832 1.38.2.2 matt * Configure Tx/Rx MACs.
1833 1.38.2.2 matt * - Auto-padding for short frames.
1834 1.38.2.2 matt * - Enable CRC generation.
1835 1.38.2.2 matt * Start with full-duplex/1000Mbps media. Actual reconfiguration
1836 1.38.2.2 matt * of MAC is followed after link establishment.
1837 1.38.2.2 matt */
1838 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG,
1839 1.38.2.2 matt MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
1840 1.38.2.2 matt MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
1841 1.38.2.2 matt ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
1842 1.38.2.2 matt MAC_CFG_PREAMBLE_MASK));
1843 1.38.2.2 matt
1844 1.38.2.2 matt /* Set up the receive filter. */
1845 1.38.2.2 matt age_rxfilter(sc);
1846 1.38.2.2 matt age_rxvlan(sc);
1847 1.38.2.2 matt
1848 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MAC_CFG);
1849 1.38.2.2 matt reg |= MAC_CFG_RXCSUM_ENB;
1850 1.38.2.2 matt
1851 1.38.2.2 matt /* Ack all pending interrupts and clear it. */
1852 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
1853 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
1854 1.38.2.2 matt
1855 1.38.2.2 matt /* Finally enable Tx/Rx MAC. */
1856 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
1857 1.38.2.2 matt
1858 1.38.2.2 matt sc->age_flags &= ~AGE_FLAG_LINK;
1859 1.38.2.2 matt
1860 1.38.2.2 matt /* Switch to the current media. */
1861 1.38.2.2 matt mii = &sc->sc_miibus;
1862 1.38.2.2 matt mii_mediachg(mii);
1863 1.38.2.2 matt
1864 1.38.2.2 matt callout_schedule(&sc->sc_tick_ch, hz);
1865 1.38.2.2 matt
1866 1.38.2.2 matt ifp->if_flags |= IFF_RUNNING;
1867 1.38.2.2 matt ifp->if_flags &= ~IFF_OACTIVE;
1868 1.38.2.2 matt
1869 1.38.2.2 matt return 0;
1870 1.38.2.2 matt }
1871 1.38.2.2 matt
1872 1.38.2.2 matt static void
1873 1.38.2.2 matt age_stop(struct ifnet *ifp, int disable)
1874 1.38.2.2 matt {
1875 1.38.2.2 matt struct age_softc *sc = ifp->if_softc;
1876 1.38.2.2 matt struct age_txdesc *txd;
1877 1.38.2.2 matt struct age_rxdesc *rxd;
1878 1.38.2.2 matt uint32_t reg;
1879 1.38.2.2 matt int i;
1880 1.38.2.2 matt
1881 1.38.2.2 matt callout_stop(&sc->sc_tick_ch);
1882 1.38.2.2 matt
1883 1.38.2.2 matt /*
1884 1.38.2.2 matt * Mark the interface down and cancel the watchdog timer.
1885 1.38.2.2 matt */
1886 1.38.2.2 matt ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1887 1.38.2.2 matt ifp->if_timer = 0;
1888 1.38.2.2 matt
1889 1.38.2.2 matt sc->age_flags &= ~AGE_FLAG_LINK;
1890 1.38.2.2 matt
1891 1.38.2.2 matt mii_down(&sc->sc_miibus);
1892 1.38.2.2 matt
1893 1.38.2.2 matt /*
1894 1.38.2.2 matt * Disable interrupts.
1895 1.38.2.2 matt */
1896 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
1897 1.38.2.2 matt CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
1898 1.38.2.2 matt
1899 1.38.2.2 matt /* Stop CMB/SMB updates. */
1900 1.38.2.2 matt CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
1901 1.38.2.2 matt
1902 1.38.2.2 matt /* Stop Rx/Tx MAC. */
1903 1.38.2.2 matt age_stop_rxmac(sc);
1904 1.38.2.2 matt age_stop_txmac(sc);
1905 1.38.2.2 matt
1906 1.38.2.2 matt /* Stop DMA. */
1907 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DMA_CFG,
1908 1.38.2.2 matt CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
1909 1.38.2.2 matt
1910 1.38.2.2 matt /* Stop TxQ/RxQ. */
1911 1.38.2.2 matt CSR_WRITE_4(sc, AGE_TXQ_CFG,
1912 1.38.2.2 matt CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
1913 1.38.2.2 matt CSR_WRITE_4(sc, AGE_RXQ_CFG,
1914 1.38.2.2 matt CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
1915 1.38.2.2 matt for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
1916 1.38.2.2 matt if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
1917 1.38.2.2 matt break;
1918 1.38.2.2 matt DELAY(10);
1919 1.38.2.2 matt }
1920 1.38.2.2 matt if (i == 0)
1921 1.38.2.2 matt printf("%s: stopping Rx/Tx MACs timed out(0x%08x)!\n",
1922 1.38.2.2 matt device_xname(sc->sc_dev), reg);
1923 1.38.2.2 matt
1924 1.38.2.2 matt /* Reclaim Rx buffers that have been processed. */
1925 1.38.2.2 matt if (sc->age_cdata.age_rxhead != NULL)
1926 1.38.2.2 matt m_freem(sc->age_cdata.age_rxhead);
1927 1.38.2.2 matt AGE_RXCHAIN_RESET(sc);
1928 1.38.2.2 matt
1929 1.38.2.2 matt /*
1930 1.38.2.2 matt * Free RX and TX mbufs still in the queues.
1931 1.38.2.2 matt */
1932 1.38.2.2 matt for (i = 0; i < AGE_RX_RING_CNT; i++) {
1933 1.38.2.2 matt rxd = &sc->age_cdata.age_rxdesc[i];
1934 1.38.2.2 matt if (rxd->rx_m != NULL) {
1935 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1936 1.38.2.2 matt m_freem(rxd->rx_m);
1937 1.38.2.2 matt rxd->rx_m = NULL;
1938 1.38.2.2 matt }
1939 1.38.2.2 matt }
1940 1.38.2.2 matt for (i = 0; i < AGE_TX_RING_CNT; i++) {
1941 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[i];
1942 1.38.2.2 matt if (txd->tx_m != NULL) {
1943 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, txd->tx_dmamap);
1944 1.38.2.2 matt m_freem(txd->tx_m);
1945 1.38.2.2 matt txd->tx_m = NULL;
1946 1.38.2.2 matt }
1947 1.38.2.2 matt }
1948 1.38.2.2 matt }
1949 1.38.2.2 matt
1950 1.38.2.2 matt static void
1951 1.38.2.2 matt age_stats_update(struct age_softc *sc)
1952 1.38.2.2 matt {
1953 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
1954 1.38.2.2 matt struct age_stats *stat;
1955 1.38.2.2 matt struct smb *smb;
1956 1.38.2.2 matt
1957 1.38.2.2 matt stat = &sc->age_stat;
1958 1.38.2.2 matt
1959 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
1960 1.38.2.2 matt sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1961 1.38.2.2 matt
1962 1.38.2.2 matt smb = sc->age_rdata.age_smb_block;
1963 1.38.2.2 matt if (smb->updated == 0)
1964 1.38.2.2 matt return;
1965 1.38.2.2 matt
1966 1.38.2.2 matt /* Rx stats. */
1967 1.38.2.2 matt stat->rx_frames += smb->rx_frames;
1968 1.38.2.2 matt stat->rx_bcast_frames += smb->rx_bcast_frames;
1969 1.38.2.2 matt stat->rx_mcast_frames += smb->rx_mcast_frames;
1970 1.38.2.2 matt stat->rx_pause_frames += smb->rx_pause_frames;
1971 1.38.2.2 matt stat->rx_control_frames += smb->rx_control_frames;
1972 1.38.2.2 matt stat->rx_crcerrs += smb->rx_crcerrs;
1973 1.38.2.2 matt stat->rx_lenerrs += smb->rx_lenerrs;
1974 1.38.2.2 matt stat->rx_bytes += smb->rx_bytes;
1975 1.38.2.2 matt stat->rx_runts += smb->rx_runts;
1976 1.38.2.2 matt stat->rx_fragments += smb->rx_fragments;
1977 1.38.2.2 matt stat->rx_pkts_64 += smb->rx_pkts_64;
1978 1.38.2.2 matt stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
1979 1.38.2.2 matt stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
1980 1.38.2.2 matt stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
1981 1.38.2.2 matt stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
1982 1.38.2.2 matt stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
1983 1.38.2.2 matt stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
1984 1.38.2.2 matt stat->rx_pkts_truncated += smb->rx_pkts_truncated;
1985 1.38.2.2 matt stat->rx_fifo_oflows += smb->rx_fifo_oflows;
1986 1.38.2.2 matt stat->rx_desc_oflows += smb->rx_desc_oflows;
1987 1.38.2.2 matt stat->rx_alignerrs += smb->rx_alignerrs;
1988 1.38.2.2 matt stat->rx_bcast_bytes += smb->rx_bcast_bytes;
1989 1.38.2.2 matt stat->rx_mcast_bytes += smb->rx_mcast_bytes;
1990 1.38.2.2 matt stat->rx_pkts_filtered += smb->rx_pkts_filtered;
1991 1.38.2.2 matt
1992 1.38.2.2 matt /* Tx stats. */
1993 1.38.2.2 matt stat->tx_frames += smb->tx_frames;
1994 1.38.2.2 matt stat->tx_bcast_frames += smb->tx_bcast_frames;
1995 1.38.2.2 matt stat->tx_mcast_frames += smb->tx_mcast_frames;
1996 1.38.2.2 matt stat->tx_pause_frames += smb->tx_pause_frames;
1997 1.38.2.2 matt stat->tx_excess_defer += smb->tx_excess_defer;
1998 1.38.2.2 matt stat->tx_control_frames += smb->tx_control_frames;
1999 1.38.2.2 matt stat->tx_deferred += smb->tx_deferred;
2000 1.38.2.2 matt stat->tx_bytes += smb->tx_bytes;
2001 1.38.2.2 matt stat->tx_pkts_64 += smb->tx_pkts_64;
2002 1.38.2.2 matt stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2003 1.38.2.2 matt stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2004 1.38.2.2 matt stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2005 1.38.2.2 matt stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2006 1.38.2.2 matt stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2007 1.38.2.2 matt stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2008 1.38.2.2 matt stat->tx_single_colls += smb->tx_single_colls;
2009 1.38.2.2 matt stat->tx_multi_colls += smb->tx_multi_colls;
2010 1.38.2.2 matt stat->tx_late_colls += smb->tx_late_colls;
2011 1.38.2.2 matt stat->tx_excess_colls += smb->tx_excess_colls;
2012 1.38.2.2 matt stat->tx_underrun += smb->tx_underrun;
2013 1.38.2.2 matt stat->tx_desc_underrun += smb->tx_desc_underrun;
2014 1.38.2.2 matt stat->tx_lenerrs += smb->tx_lenerrs;
2015 1.38.2.2 matt stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2016 1.38.2.2 matt stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2017 1.38.2.2 matt stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2018 1.38.2.2 matt
2019 1.38.2.2 matt /* Update counters in ifnet. */
2020 1.38.2.2 matt ifp->if_opackets += smb->tx_frames;
2021 1.38.2.2 matt
2022 1.38.2.2 matt ifp->if_collisions += smb->tx_single_colls +
2023 1.38.2.2 matt smb->tx_multi_colls + smb->tx_late_colls +
2024 1.38.2.2 matt smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2025 1.38.2.2 matt
2026 1.38.2.2 matt ifp->if_oerrors += smb->tx_excess_colls +
2027 1.38.2.2 matt smb->tx_late_colls + smb->tx_underrun +
2028 1.38.2.2 matt smb->tx_pkts_truncated;
2029 1.38.2.2 matt
2030 1.38.2.2 matt ifp->if_ipackets += smb->rx_frames;
2031 1.38.2.2 matt
2032 1.38.2.2 matt ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2033 1.38.2.2 matt smb->rx_runts + smb->rx_pkts_truncated +
2034 1.38.2.2 matt smb->rx_fifo_oflows + smb->rx_desc_oflows +
2035 1.38.2.2 matt smb->rx_alignerrs;
2036 1.38.2.2 matt
2037 1.38.2.2 matt /* Update done, clear. */
2038 1.38.2.2 matt smb->updated = 0;
2039 1.38.2.2 matt
2040 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2041 1.38.2.2 matt sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2042 1.38.2.2 matt }
2043 1.38.2.2 matt
2044 1.38.2.2 matt static void
2045 1.38.2.2 matt age_stop_txmac(struct age_softc *sc)
2046 1.38.2.2 matt {
2047 1.38.2.2 matt uint32_t reg;
2048 1.38.2.2 matt int i;
2049 1.38.2.2 matt
2050 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MAC_CFG);
2051 1.38.2.2 matt if ((reg & MAC_CFG_TX_ENB) != 0) {
2052 1.38.2.2 matt reg &= ~MAC_CFG_TX_ENB;
2053 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2054 1.38.2.2 matt }
2055 1.38.2.2 matt /* Stop Tx DMA engine. */
2056 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_DMA_CFG);
2057 1.38.2.2 matt if ((reg & DMA_CFG_RD_ENB) != 0) {
2058 1.38.2.2 matt reg &= ~DMA_CFG_RD_ENB;
2059 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2060 1.38.2.2 matt }
2061 1.38.2.2 matt for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2062 1.38.2.2 matt if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2063 1.38.2.2 matt (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2064 1.38.2.2 matt break;
2065 1.38.2.2 matt DELAY(10);
2066 1.38.2.2 matt }
2067 1.38.2.2 matt if (i == 0)
2068 1.38.2.2 matt printf("%s: stopping TxMAC timeout!\n", device_xname(sc->sc_dev));
2069 1.38.2.2 matt }
2070 1.38.2.2 matt
2071 1.38.2.2 matt static void
2072 1.38.2.2 matt age_stop_rxmac(struct age_softc *sc)
2073 1.38.2.2 matt {
2074 1.38.2.2 matt uint32_t reg;
2075 1.38.2.2 matt int i;
2076 1.38.2.2 matt
2077 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MAC_CFG);
2078 1.38.2.2 matt if ((reg & MAC_CFG_RX_ENB) != 0) {
2079 1.38.2.2 matt reg &= ~MAC_CFG_RX_ENB;
2080 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2081 1.38.2.2 matt }
2082 1.38.2.2 matt /* Stop Rx DMA engine. */
2083 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_DMA_CFG);
2084 1.38.2.2 matt if ((reg & DMA_CFG_WR_ENB) != 0) {
2085 1.38.2.2 matt reg &= ~DMA_CFG_WR_ENB;
2086 1.38.2.2 matt CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2087 1.38.2.2 matt }
2088 1.38.2.2 matt for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2089 1.38.2.2 matt if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2090 1.38.2.2 matt (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2091 1.38.2.2 matt break;
2092 1.38.2.2 matt DELAY(10);
2093 1.38.2.2 matt }
2094 1.38.2.2 matt if (i == 0)
2095 1.38.2.2 matt printf("%s: stopping RxMAC timeout!\n", device_xname(sc->sc_dev));
2096 1.38.2.2 matt }
2097 1.38.2.2 matt
2098 1.38.2.2 matt static void
2099 1.38.2.2 matt age_init_tx_ring(struct age_softc *sc)
2100 1.38.2.2 matt {
2101 1.38.2.2 matt struct age_ring_data *rd;
2102 1.38.2.2 matt struct age_txdesc *txd;
2103 1.38.2.2 matt int i;
2104 1.38.2.2 matt
2105 1.38.2.2 matt sc->age_cdata.age_tx_prod = 0;
2106 1.38.2.2 matt sc->age_cdata.age_tx_cons = 0;
2107 1.38.2.2 matt sc->age_cdata.age_tx_cnt = 0;
2108 1.38.2.2 matt
2109 1.38.2.2 matt rd = &sc->age_rdata;
2110 1.38.2.2 matt memset(rd->age_tx_ring, 0, AGE_TX_RING_SZ);
2111 1.38.2.2 matt for (i = 0; i < AGE_TX_RING_CNT; i++) {
2112 1.38.2.2 matt txd = &sc->age_cdata.age_txdesc[i];
2113 1.38.2.2 matt txd->tx_desc = &rd->age_tx_ring[i];
2114 1.38.2.2 matt txd->tx_m = NULL;
2115 1.38.2.2 matt }
2116 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_tx_ring_map, 0,
2117 1.38.2.2 matt sc->age_cdata.age_tx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2118 1.38.2.2 matt }
2119 1.38.2.2 matt
2120 1.38.2.2 matt static int
2121 1.38.2.2 matt age_init_rx_ring(struct age_softc *sc)
2122 1.38.2.2 matt {
2123 1.38.2.2 matt struct age_ring_data *rd;
2124 1.38.2.2 matt struct age_rxdesc *rxd;
2125 1.38.2.2 matt int i;
2126 1.38.2.2 matt
2127 1.38.2.2 matt sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
2128 1.38.2.2 matt rd = &sc->age_rdata;
2129 1.38.2.2 matt memset(rd->age_rx_ring, 0, AGE_RX_RING_SZ);
2130 1.38.2.2 matt for (i = 0; i < AGE_RX_RING_CNT; i++) {
2131 1.38.2.2 matt rxd = &sc->age_cdata.age_rxdesc[i];
2132 1.38.2.2 matt rxd->rx_m = NULL;
2133 1.38.2.2 matt rxd->rx_desc = &rd->age_rx_ring[i];
2134 1.38.2.2 matt if (age_newbuf(sc, rxd, 1) != 0)
2135 1.38.2.2 matt return ENOBUFS;
2136 1.38.2.2 matt }
2137 1.38.2.2 matt
2138 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rx_ring_map, 0,
2139 1.38.2.2 matt sc->age_cdata.age_rx_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2140 1.38.2.2 matt
2141 1.38.2.2 matt return 0;
2142 1.38.2.2 matt }
2143 1.38.2.2 matt
2144 1.38.2.2 matt static void
2145 1.38.2.2 matt age_init_rr_ring(struct age_softc *sc)
2146 1.38.2.2 matt {
2147 1.38.2.2 matt struct age_ring_data *rd;
2148 1.38.2.2 matt
2149 1.38.2.2 matt sc->age_cdata.age_rr_cons = 0;
2150 1.38.2.2 matt AGE_RXCHAIN_RESET(sc);
2151 1.38.2.2 matt
2152 1.38.2.2 matt rd = &sc->age_rdata;
2153 1.38.2.2 matt memset(rd->age_rr_ring, 0, AGE_RR_RING_SZ);
2154 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_rr_ring_map, 0,
2155 1.38.2.2 matt sc->age_cdata.age_rr_ring_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2156 1.38.2.2 matt }
2157 1.38.2.2 matt
2158 1.38.2.2 matt static void
2159 1.38.2.2 matt age_init_cmb_block(struct age_softc *sc)
2160 1.38.2.2 matt {
2161 1.38.2.2 matt struct age_ring_data *rd;
2162 1.38.2.2 matt
2163 1.38.2.2 matt rd = &sc->age_rdata;
2164 1.38.2.2 matt memset(rd->age_cmb_block, 0, AGE_CMB_BLOCK_SZ);
2165 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_cmb_block_map, 0,
2166 1.38.2.2 matt sc->age_cdata.age_cmb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2167 1.38.2.2 matt }
2168 1.38.2.2 matt
2169 1.38.2.2 matt static void
2170 1.38.2.2 matt age_init_smb_block(struct age_softc *sc)
2171 1.38.2.2 matt {
2172 1.38.2.2 matt struct age_ring_data *rd;
2173 1.38.2.2 matt
2174 1.38.2.2 matt rd = &sc->age_rdata;
2175 1.38.2.2 matt memset(rd->age_smb_block, 0, AGE_SMB_BLOCK_SZ);
2176 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, sc->age_cdata.age_smb_block_map, 0,
2177 1.38.2.2 matt sc->age_cdata.age_smb_block_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2178 1.38.2.2 matt }
2179 1.38.2.2 matt
2180 1.38.2.2 matt static int
2181 1.38.2.2 matt age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd, int init)
2182 1.38.2.2 matt {
2183 1.38.2.2 matt struct rx_desc *desc;
2184 1.38.2.2 matt struct mbuf *m;
2185 1.38.2.2 matt bus_dmamap_t map;
2186 1.38.2.2 matt int error;
2187 1.38.2.2 matt
2188 1.38.2.2 matt MGETHDR(m, init ? M_WAITOK : M_DONTWAIT, MT_DATA);
2189 1.38.2.2 matt if (m == NULL)
2190 1.38.2.2 matt return ENOBUFS;
2191 1.38.2.2 matt MCLGET(m, init ? M_WAITOK : M_DONTWAIT);
2192 1.38.2.2 matt if (!(m->m_flags & M_EXT)) {
2193 1.38.2.2 matt m_freem(m);
2194 1.38.2.2 matt return ENOBUFS;
2195 1.38.2.2 matt }
2196 1.38.2.2 matt
2197 1.38.2.2 matt m->m_len = m->m_pkthdr.len = MCLBYTES;
2198 1.38.2.2 matt m_adj(m, ETHER_ALIGN);
2199 1.38.2.2 matt
2200 1.38.2.2 matt error = bus_dmamap_load_mbuf(sc->sc_dmat,
2201 1.38.2.2 matt sc->age_cdata.age_rx_sparemap, m, BUS_DMA_NOWAIT);
2202 1.38.2.2 matt
2203 1.38.2.2 matt if (error != 0) {
2204 1.38.2.2 matt if (!error) {
2205 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat,
2206 1.38.2.2 matt sc->age_cdata.age_rx_sparemap);
2207 1.38.2.2 matt error = EFBIG;
2208 1.38.2.2 matt printf("%s: too many segments?!\n",
2209 1.38.2.2 matt device_xname(sc->sc_dev));
2210 1.38.2.2 matt }
2211 1.38.2.2 matt m_freem(m);
2212 1.38.2.2 matt
2213 1.38.2.2 matt if (init)
2214 1.38.2.2 matt printf("%s: can't load RX mbuf\n", device_xname(sc->sc_dev));
2215 1.38.2.2 matt return error;
2216 1.38.2.2 matt }
2217 1.38.2.2 matt
2218 1.38.2.2 matt if (rxd->rx_m != NULL) {
2219 1.38.2.2 matt bus_dmamap_sync(sc->sc_dmat, rxd->rx_dmamap, 0,
2220 1.38.2.2 matt rxd->rx_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2221 1.38.2.2 matt bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
2222 1.38.2.2 matt }
2223 1.38.2.2 matt map = rxd->rx_dmamap;
2224 1.38.2.2 matt rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
2225 1.38.2.2 matt sc->age_cdata.age_rx_sparemap = map;
2226 1.38.2.2 matt rxd->rx_m = m;
2227 1.38.2.2 matt
2228 1.38.2.2 matt desc = rxd->rx_desc;
2229 1.38.2.2 matt desc->addr = htole64(rxd->rx_dmamap->dm_segs[0].ds_addr);
2230 1.38.2.2 matt desc->len =
2231 1.38.2.2 matt htole32((rxd->rx_dmamap->dm_segs[0].ds_len & AGE_RD_LEN_MASK) <<
2232 1.38.2.2 matt AGE_RD_LEN_SHIFT);
2233 1.38.2.2 matt
2234 1.38.2.2 matt return 0;
2235 1.38.2.2 matt }
2236 1.38.2.2 matt
2237 1.38.2.2 matt static void
2238 1.38.2.2 matt age_rxvlan(struct age_softc *sc)
2239 1.38.2.2 matt {
2240 1.38.2.2 matt uint32_t reg;
2241 1.38.2.2 matt
2242 1.38.2.2 matt reg = CSR_READ_4(sc, AGE_MAC_CFG);
2243 1.38.2.2 matt reg &= ~MAC_CFG_VLAN_TAG_STRIP;
2244 1.38.2.2 matt if (sc->sc_ec.ec_capabilities & ETHERCAP_VLAN_HWTAGGING)
2245 1.38.2.2 matt reg |= MAC_CFG_VLAN_TAG_STRIP;
2246 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2247 1.38.2.2 matt }
2248 1.38.2.2 matt
2249 1.38.2.2 matt static void
2250 1.38.2.2 matt age_rxfilter(struct age_softc *sc)
2251 1.38.2.2 matt {
2252 1.38.2.2 matt struct ethercom *ec = &sc->sc_ec;
2253 1.38.2.2 matt struct ifnet *ifp = &sc->sc_ec.ec_if;
2254 1.38.2.2 matt struct ether_multi *enm;
2255 1.38.2.2 matt struct ether_multistep step;
2256 1.38.2.2 matt uint32_t crc;
2257 1.38.2.2 matt uint32_t mchash[2];
2258 1.38.2.2 matt uint32_t rxcfg;
2259 1.38.2.2 matt
2260 1.38.2.2 matt rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
2261 1.38.2.2 matt rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
2262 1.38.2.2 matt ifp->if_flags &= ~IFF_ALLMULTI;
2263 1.38.2.2 matt
2264 1.38.2.2 matt /*
2265 1.38.2.2 matt * Always accept broadcast frames.
2266 1.38.2.2 matt */
2267 1.38.2.2 matt rxcfg |= MAC_CFG_BCAST;
2268 1.38.2.2 matt
2269 1.38.2.2 matt if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) {
2270 1.38.2.2 matt ifp->if_flags |= IFF_ALLMULTI;
2271 1.38.2.2 matt if (ifp->if_flags & IFF_PROMISC)
2272 1.38.2.2 matt rxcfg |= MAC_CFG_PROMISC;
2273 1.38.2.2 matt else
2274 1.38.2.2 matt rxcfg |= MAC_CFG_ALLMULTI;
2275 1.38.2.2 matt mchash[0] = mchash[1] = 0xFFFFFFFF;
2276 1.38.2.2 matt } else {
2277 1.38.2.2 matt /* Program new filter. */
2278 1.38.2.2 matt memset(mchash, 0, sizeof(mchash));
2279 1.38.2.2 matt
2280 1.38.2.2 matt ETHER_FIRST_MULTI(step, ec, enm);
2281 1.38.2.2 matt while (enm != NULL) {
2282 1.38.2.2 matt crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
2283 1.38.2.2 matt mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
2284 1.38.2.2 matt ETHER_NEXT_MULTI(step, enm);
2285 1.38.2.2 matt }
2286 1.38.2.2 matt }
2287 1.38.2.2 matt
2288 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
2289 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
2290 1.38.2.2 matt CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
2291 1.38.2.2 matt }
2292