dwc_gmac.c revision 1.28.2.5 1 1.28.2.5 skrll /* $NetBSD: dwc_gmac.c,v 1.28.2.5 2016/06/12 08:33:29 skrll Exp $ */
2 1.18 jmcneill
3 1.1 martin /*-
4 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 1.1 martin * All rights reserved.
6 1.1 martin *
7 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
8 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 1.1 martin *
10 1.1 martin * Redistribution and use in source and binary forms, with or without
11 1.1 martin * modification, are permitted provided that the following conditions
12 1.1 martin * are met:
13 1.1 martin * 1. Redistributions of source code must retain the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer.
15 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 martin * notice, this list of conditions and the following disclaimer in the
17 1.1 martin * documentation and/or other materials provided with the distribution.
18 1.1 martin *
19 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
30 1.1 martin */
31 1.1 martin
32 1.1 martin /*
33 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
34 1.1 martin * on Allwinner A20 cores and others.
35 1.1 martin *
36 1.1 martin * Real documentation seems to not be available, the marketing product
37 1.1 martin * documents could be found here:
38 1.1 martin *
39 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 1.1 martin */
41 1.1 martin
42 1.1 martin #include <sys/cdefs.h>
43 1.1 martin
44 1.28.2.5 skrll __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.28.2.5 2016/06/12 08:33:29 skrll Exp $");
45 1.7 martin
46 1.7 martin /* #define DWC_GMAC_DEBUG 1 */
47 1.1 martin
48 1.28.2.4 skrll #ifdef _KERNEL_OPT
49 1.1 martin #include "opt_inet.h"
50 1.28.2.4 skrll #include "opt_net_mpsafe.h"
51 1.28.2.4 skrll #endif
52 1.1 martin
53 1.1 martin #include <sys/param.h>
54 1.1 martin #include <sys/bus.h>
55 1.1 martin #include <sys/device.h>
56 1.1 martin #include <sys/intr.h>
57 1.1 martin #include <sys/systm.h>
58 1.1 martin #include <sys/sockio.h>
59 1.28.2.1 skrll #include <sys/cprng.h>
60 1.1 martin
61 1.1 martin #include <net/if.h>
62 1.1 martin #include <net/if_ether.h>
63 1.1 martin #include <net/if_media.h>
64 1.1 martin #include <net/bpf.h>
65 1.1 martin #ifdef INET
66 1.1 martin #include <netinet/if_inarp.h>
67 1.1 martin #endif
68 1.1 martin
69 1.1 martin #include <dev/mii/miivar.h>
70 1.1 martin
71 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
72 1.1 martin #include <dev/ic/dwc_gmac_var.h>
73 1.1 martin
74 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
77 1.1 martin
78 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
81 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
91 1.28.2.4 skrll static int dwc_gmac_init_locked(struct ifnet *ifp);
92 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 1.28.2.4 skrll static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
95 1.28.2.4 skrll static void dwc_gmac_start_locked(struct ifnet *ifp);
96 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 1.8 martin static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 1.8 martin static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 1.20 jmcneill static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 1.22 martin static int dwc_gmac_ifflags_cb(struct ethercom *);
102 1.22 martin static uint32_t bitrev32(uint32_t x);
103 1.1 martin
104 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
105 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
106 1.8 martin #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
107 1.1 martin
108 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 1.8 martin #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
110 1.8 martin
111 1.8 martin
112 1.8 martin
113 1.11 martin #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 1.8 martin GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 1.8 martin GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116 1.8 martin
117 1.8 martin #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 1.10 martin GMAC_DMA_INT_FBE| \
119 1.8 martin GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 1.8 martin GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 1.10 martin GMAC_DMA_INT_TJE)
122 1.8 martin
123 1.8 martin #define AWIN_DEF_MAC_INTRMASK \
124 1.8 martin (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
125 1.8 martin AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126 1.1 martin
127 1.7 martin
128 1.7 martin #ifdef DWC_GMAC_DEBUG
129 1.7 martin static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
130 1.7 martin static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
131 1.11 martin static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
132 1.8 martin static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
133 1.10 martin static void dwc_dump_status(struct dwc_gmac_softc *sc);
134 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
135 1.7 martin #endif
136 1.7 martin
137 1.28.2.4 skrll #ifdef NET_MPSAFE
138 1.28.2.4 skrll #define DWCGMAC_MPSAFE 1
139 1.28.2.4 skrll #endif
140 1.28.2.4 skrll
141 1.1 martin void
142 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
143 1.1 martin {
144 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
145 1.1 martin uint32_t maclo, machi;
146 1.1 martin struct mii_data * const mii = &sc->sc_mii;
147 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
148 1.5 martin prop_dictionary_t dict;
149 1.1 martin
150 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
151 1.3 martin sc->sc_mii_clk = mii_clk & 7;
152 1.1 martin
153 1.5 martin dict = device_properties(sc->sc_dev);
154 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
155 1.5 martin if (ea != NULL) {
156 1.5 martin /*
157 1.5 martin * If the MAC address is overriden by a device property,
158 1.5 martin * use that.
159 1.5 martin */
160 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
161 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
162 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
163 1.5 martin } else {
164 1.5 martin /*
165 1.5 martin * If we did not get an externaly configure address,
166 1.5 martin * try to read one from the current filter setup,
167 1.5 martin * before resetting the chip.
168 1.5 martin */
169 1.8 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
170 1.8 martin AWIN_GMAC_MAC_ADDR0LO);
171 1.8 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
172 1.8 martin AWIN_GMAC_MAC_ADDR0HI);
173 1.14 jmcneill
174 1.14 jmcneill if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
175 1.28.2.1 skrll /* fake MAC address */
176 1.28.2.1 skrll maclo = 0x00f2 | (cprng_strong32() << 16);
177 1.28.2.1 skrll machi = cprng_strong32();
178 1.14 jmcneill }
179 1.14 jmcneill
180 1.1 martin enaddr[0] = maclo & 0x0ff;
181 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
182 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
183 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
184 1.1 martin enaddr[4] = machi & 0x0ff;
185 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
186 1.1 martin }
187 1.1 martin
188 1.1 martin /*
189 1.21 joerg * Init chip and do initial setup
190 1.1 martin */
191 1.1 martin if (dwc_gmac_reset(sc) != 0)
192 1.1 martin return; /* not much to cleanup, haven't attached yet */
193 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
194 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
195 1.1 martin ether_sprintf(enaddr));
196 1.1 martin
197 1.1 martin /*
198 1.1 martin * Allocate Tx and Rx rings
199 1.1 martin */
200 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
201 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
202 1.1 martin goto fail;
203 1.1 martin }
204 1.28.2.2 skrll
205 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
206 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
207 1.1 martin goto fail;
208 1.1 martin }
209 1.1 martin
210 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
211 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
212 1.1 martin goto fail;
213 1.1 martin }
214 1.1 martin
215 1.28.2.4 skrll sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
216 1.28.2.4 skrll mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
217 1.28.2.4 skrll mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
218 1.28.2.4 skrll
219 1.1 martin /*
220 1.1 martin * Prepare interface data
221 1.1 martin */
222 1.1 martin ifp->if_softc = sc;
223 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
224 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
225 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
226 1.1 martin ifp->if_start = dwc_gmac_start;
227 1.1 martin ifp->if_init = dwc_gmac_init;
228 1.1 martin ifp->if_stop = dwc_gmac_stop;
229 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
230 1.1 martin IFQ_SET_READY(&ifp->if_snd);
231 1.1 martin
232 1.1 martin /*
233 1.1 martin * Attach MII subdevices
234 1.1 martin */
235 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
236 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
237 1.1 martin mii->mii_ifp = ifp;
238 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
239 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
240 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
241 1.25 jmcneill mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
242 1.25 jmcneill MIIF_DOPAUSE);
243 1.1 martin
244 1.28.2.2 skrll if (LIST_EMPTY(&mii->mii_phys)) {
245 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
246 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
247 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
248 1.1 martin } else {
249 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
250 1.1 martin }
251 1.1 martin
252 1.1 martin /*
253 1.28.2.2 skrll * We can support 802.1Q VLAN-sized frames.
254 1.28.2.2 skrll */
255 1.28.2.2 skrll sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
256 1.28.2.2 skrll
257 1.28.2.2 skrll /*
258 1.1 martin * Ready, attach interface
259 1.1 martin */
260 1.28.2.4 skrll /* Attach the interface. */
261 1.28.2.4 skrll if_initialize(ifp);
262 1.28.2.4 skrll sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
263 1.1 martin ether_ifattach(ifp, enaddr);
264 1.22 martin ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
265 1.28.2.4 skrll if_register(ifp);
266 1.1 martin
267 1.1 martin /*
268 1.1 martin * Enable interrupts
269 1.1 martin */
270 1.28.2.4 skrll mutex_enter(sc->sc_lock);
271 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
272 1.8 martin AWIN_DEF_MAC_INTRMASK);
273 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
274 1.8 martin GMAC_DEF_DMA_INT_MASK);
275 1.28.2.4 skrll mutex_exit(sc->sc_lock);
276 1.1 martin
277 1.1 martin return;
278 1.1 martin
279 1.1 martin fail:
280 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
281 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
282 1.1 martin }
283 1.1 martin
284 1.1 martin
285 1.1 martin
286 1.1 martin static int
287 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
288 1.1 martin {
289 1.1 martin size_t cnt;
290 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
291 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
292 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
293 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
294 1.1 martin & GMAC_BUSMODE_RESET) == 0)
295 1.1 martin return 0;
296 1.1 martin delay(10);
297 1.1 martin }
298 1.1 martin
299 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
300 1.1 martin return EIO;
301 1.1 martin }
302 1.1 martin
303 1.1 martin static void
304 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
305 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
306 1.1 martin {
307 1.1 martin uint32_t lo, hi;
308 1.1 martin
309 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
310 1.1 martin | (enaddr[3] << 24);
311 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
312 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
313 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
314 1.1 martin }
315 1.1 martin
316 1.1 martin static int
317 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
318 1.1 martin {
319 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
320 1.6 martin uint16_t mii;
321 1.1 martin size_t cnt;
322 1.1 martin int rv = 0;
323 1.1 martin
324 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
325 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
326 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
327 1.6 martin | GMAC_MII_BUSY;
328 1.1 martin
329 1.1 martin mutex_enter(&sc->sc_mdio_lock);
330 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
331 1.1 martin
332 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
333 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
334 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
335 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
336 1.3 martin AWIN_GMAC_MAC_MIIDATA);
337 1.1 martin break;
338 1.1 martin }
339 1.1 martin delay(10);
340 1.1 martin }
341 1.1 martin
342 1.1 martin mutex_exit(&sc->sc_mdio_lock);
343 1.1 martin
344 1.1 martin return rv;
345 1.1 martin }
346 1.1 martin
347 1.1 martin static void
348 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
349 1.1 martin {
350 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
351 1.6 martin uint16_t mii;
352 1.1 martin size_t cnt;
353 1.1 martin
354 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
355 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
356 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
357 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
358 1.1 martin
359 1.1 martin mutex_enter(&sc->sc_mdio_lock);
360 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
361 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
362 1.1 martin
363 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
364 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
365 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
366 1.1 martin break;
367 1.1 martin delay(10);
368 1.1 martin }
369 1.28.2.2 skrll
370 1.1 martin mutex_exit(&sc->sc_mdio_lock);
371 1.1 martin }
372 1.1 martin
373 1.1 martin static int
374 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
375 1.1 martin struct dwc_gmac_rx_ring *ring)
376 1.1 martin {
377 1.1 martin struct dwc_gmac_rx_data *data;
378 1.1 martin bus_addr_t physaddr;
379 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
380 1.1 martin int error, i, next;
381 1.1 martin
382 1.1 martin ring->r_cur = ring->r_next = 0;
383 1.1 martin memset(ring->r_desc, 0, descsize);
384 1.1 martin
385 1.1 martin /*
386 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
387 1.1 martin */
388 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
389 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
390 1.1 martin
391 1.1 martin data = &sc->sc_rxq.r_data[i];
392 1.1 martin
393 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
394 1.1 martin if (data->rd_m == NULL) {
395 1.1 martin aprint_error_dev(sc->sc_dev,
396 1.1 martin "could not allocate rx mbuf #%d\n", i);
397 1.1 martin error = ENOMEM;
398 1.1 martin goto fail;
399 1.1 martin }
400 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
401 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
402 1.1 martin if (error != 0) {
403 1.1 martin aprint_error_dev(sc->sc_dev,
404 1.1 martin "could not create DMA map\n");
405 1.1 martin data->rd_map = NULL;
406 1.1 martin goto fail;
407 1.1 martin }
408 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
409 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
410 1.1 martin aprint_error_dev(sc->sc_dev,
411 1.1 martin "could not allocate mbuf cluster #%d\n", i);
412 1.1 martin error = ENOMEM;
413 1.1 martin goto fail;
414 1.1 martin }
415 1.1 martin
416 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
417 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
418 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
419 1.1 martin if (error != 0) {
420 1.1 martin aprint_error_dev(sc->sc_dev,
421 1.1 martin "could not load rx buf DMA map #%d", i);
422 1.1 martin goto fail;
423 1.1 martin }
424 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
425 1.1 martin
426 1.1 martin desc = &sc->sc_rxq.r_desc[i];
427 1.1 martin desc->ddesc_data = htole32(physaddr);
428 1.8 martin next = RX_NEXT(i);
429 1.28.2.2 skrll desc->ddesc_next = htole32(ring->r_physaddr
430 1.1 martin + next * sizeof(*desc));
431 1.1 martin desc->ddesc_cntl = htole32(
432 1.11 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
433 1.16 martin DDESC_CNTL_RXCHAIN);
434 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
435 1.1 martin }
436 1.1 martin
437 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
438 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
439 1.27 matt BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
440 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
441 1.6 martin ring->r_physaddr);
442 1.1 martin
443 1.1 martin return 0;
444 1.1 martin
445 1.1 martin fail:
446 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
447 1.1 martin return error;
448 1.1 martin }
449 1.1 martin
450 1.1 martin static void
451 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
452 1.1 martin struct dwc_gmac_rx_ring *ring)
453 1.1 martin {
454 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
455 1.1 martin int i;
456 1.1 martin
457 1.28.2.4 skrll mutex_enter(&ring->r_mtx);
458 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
459 1.1 martin desc = &sc->sc_rxq.r_desc[i];
460 1.1 martin desc->ddesc_cntl = htole32(
461 1.16 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
462 1.16 martin DDESC_CNTL_RXCHAIN);
463 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
464 1.1 martin }
465 1.1 martin
466 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
467 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
468 1.27 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
469 1.1 martin
470 1.1 martin ring->r_cur = ring->r_next = 0;
471 1.11 martin /* reset DMA address to start of ring */
472 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
473 1.11 martin sc->sc_rxq.r_physaddr);
474 1.28.2.4 skrll mutex_exit(&ring->r_mtx);
475 1.1 martin }
476 1.1 martin
477 1.1 martin static int
478 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
479 1.1 martin {
480 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
481 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
482 1.1 martin int error, nsegs;
483 1.1 martin void *rings;
484 1.1 martin
485 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
486 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
487 1.1 martin if (error != 0) {
488 1.1 martin aprint_error_dev(sc->sc_dev,
489 1.1 martin "could not create desc DMA map\n");
490 1.1 martin sc->sc_dma_ring_map = NULL;
491 1.1 martin goto fail;
492 1.1 martin }
493 1.1 martin
494 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
495 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
496 1.1 martin if (error != 0) {
497 1.1 martin aprint_error_dev(sc->sc_dev,
498 1.1 martin "could not map DMA memory\n");
499 1.1 martin goto fail;
500 1.1 martin }
501 1.1 martin
502 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
503 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
504 1.1 martin if (error != 0) {
505 1.1 martin aprint_error_dev(sc->sc_dev,
506 1.1 martin "could not allocate DMA memory\n");
507 1.1 martin goto fail;
508 1.1 martin }
509 1.1 martin
510 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
511 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
512 1.1 martin if (error != 0) {
513 1.1 martin aprint_error_dev(sc->sc_dev,
514 1.1 martin "could not load desc DMA map\n");
515 1.1 martin goto fail;
516 1.1 martin }
517 1.1 martin
518 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
519 1.1 martin sc->sc_rxq.r_desc = rings;
520 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
521 1.1 martin
522 1.1 martin /* and next rings to the TX side */
523 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
524 1.28.2.2 skrll sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
525 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
526 1.1 martin
527 1.1 martin return 0;
528 1.1 martin
529 1.1 martin fail:
530 1.1 martin dwc_gmac_free_dma_rings(sc);
531 1.1 martin return error;
532 1.1 martin }
533 1.1 martin
534 1.1 martin static void
535 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
536 1.1 martin {
537 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
538 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
539 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
540 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
541 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
542 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
543 1.1 martin }
544 1.1 martin
545 1.1 martin static void
546 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
547 1.1 martin {
548 1.1 martin struct dwc_gmac_rx_data *data;
549 1.1 martin int i;
550 1.1 martin
551 1.1 martin if (ring->r_desc == NULL)
552 1.1 martin return;
553 1.1 martin
554 1.1 martin
555 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
556 1.1 martin data = &ring->r_data[i];
557 1.1 martin
558 1.1 martin if (data->rd_map != NULL) {
559 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
560 1.1 martin AWGE_RX_RING_COUNT
561 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
562 1.1 martin BUS_DMASYNC_POSTREAD);
563 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
564 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
565 1.1 martin }
566 1.1 martin if (data->rd_m != NULL)
567 1.1 martin m_freem(data->rd_m);
568 1.1 martin }
569 1.1 martin }
570 1.1 martin
571 1.1 martin static int
572 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
573 1.1 martin struct dwc_gmac_tx_ring *ring)
574 1.1 martin {
575 1.1 martin int i, error = 0;
576 1.1 martin
577 1.1 martin ring->t_queued = 0;
578 1.1 martin ring->t_cur = ring->t_next = 0;
579 1.1 martin
580 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
581 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
582 1.1 martin TX_DESC_OFFSET(0),
583 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
584 1.1 martin BUS_DMASYNC_POSTWRITE);
585 1.1 martin
586 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
587 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
588 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
589 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
590 1.1 martin &ring->t_data[i].td_map);
591 1.1 martin if (error != 0) {
592 1.1 martin aprint_error_dev(sc->sc_dev,
593 1.1 martin "could not create TX DMA map #%d\n", i);
594 1.1 martin ring->t_data[i].td_map = NULL;
595 1.1 martin goto fail;
596 1.1 martin }
597 1.1 martin ring->t_desc[i].ddesc_next = htole32(
598 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
599 1.8 martin *TX_NEXT(i));
600 1.1 martin }
601 1.1 martin
602 1.1 martin return 0;
603 1.1 martin
604 1.1 martin fail:
605 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
606 1.1 martin return error;
607 1.1 martin }
608 1.1 martin
609 1.1 martin static void
610 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
611 1.1 martin {
612 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
613 1.1 martin if (end > start) {
614 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
615 1.1 martin TX_DESC_OFFSET(start),
616 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
617 1.1 martin ops);
618 1.1 martin return;
619 1.1 martin }
620 1.1 martin /* sync from 'start' to end of ring */
621 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
622 1.1 martin TX_DESC_OFFSET(start),
623 1.28.2.1 skrll TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
624 1.1 martin ops);
625 1.1 martin /* sync from start of ring to 'end' */
626 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
627 1.1 martin TX_DESC_OFFSET(0),
628 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
629 1.1 martin ops);
630 1.1 martin }
631 1.1 martin
632 1.1 martin static void
633 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
634 1.1 martin struct dwc_gmac_tx_ring *ring)
635 1.1 martin {
636 1.1 martin int i;
637 1.1 martin
638 1.28.2.4 skrll mutex_enter(&ring->t_mtx);
639 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
640 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
641 1.1 martin
642 1.1 martin if (data->td_m != NULL) {
643 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
644 1.1 martin 0, data->td_active->dm_mapsize,
645 1.1 martin BUS_DMASYNC_POSTWRITE);
646 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
647 1.1 martin m_freem(data->td_m);
648 1.1 martin data->td_m = NULL;
649 1.1 martin }
650 1.1 martin }
651 1.1 martin
652 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
653 1.1 martin TX_DESC_OFFSET(0),
654 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
655 1.27 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
656 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
657 1.6 martin sc->sc_txq.t_physaddr);
658 1.1 martin
659 1.1 martin ring->t_queued = 0;
660 1.1 martin ring->t_cur = ring->t_next = 0;
661 1.28.2.4 skrll mutex_exit(&ring->t_mtx);
662 1.1 martin }
663 1.1 martin
664 1.1 martin static void
665 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
666 1.1 martin struct dwc_gmac_tx_ring *ring)
667 1.1 martin {
668 1.1 martin int i;
669 1.1 martin
670 1.1 martin /* unload the maps */
671 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
672 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
673 1.1 martin
674 1.1 martin if (data->td_m != NULL) {
675 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
676 1.1 martin 0, data->td_map->dm_mapsize,
677 1.1 martin BUS_DMASYNC_POSTWRITE);
678 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
679 1.1 martin m_freem(data->td_m);
680 1.1 martin data->td_m = NULL;
681 1.1 martin }
682 1.1 martin }
683 1.1 martin
684 1.1 martin /* and actually free them */
685 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
686 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
687 1.1 martin
688 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
689 1.1 martin }
690 1.1 martin }
691 1.1 martin
692 1.1 martin static void
693 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
694 1.1 martin {
695 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
696 1.1 martin struct mii_data * const mii = &sc->sc_mii;
697 1.25 jmcneill uint32_t conf, flow;
698 1.1 martin
699 1.1 martin /*
700 1.1 martin * Set MII or GMII interface based on the speed
701 1.28.2.2 skrll * negotiated by the PHY.
702 1.9 martin */
703 1.9 martin conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
704 1.9 martin conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
705 1.9 martin |AWIN_GMAC_MAC_CONF_FULLDPLX);
706 1.11 martin conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
707 1.11 martin | AWIN_GMAC_MAC_CONF_DISABLERXOWN
708 1.25 jmcneill | AWIN_GMAC_MAC_CONF_DISABLEJABBER
709 1.25 jmcneill | AWIN_GMAC_MAC_CONF_ACS
710 1.11 martin | AWIN_GMAC_MAC_CONF_RXENABLE
711 1.11 martin | AWIN_GMAC_MAC_CONF_TXENABLE;
712 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
713 1.1 martin case IFM_10_T:
714 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_MIISEL;
715 1.9 martin break;
716 1.1 martin case IFM_100_TX:
717 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_FES100 |
718 1.12 jmcneill AWIN_GMAC_MAC_CONF_MIISEL;
719 1.1 martin break;
720 1.1 martin case IFM_1000_T:
721 1.1 martin break;
722 1.1 martin }
723 1.25 jmcneill
724 1.25 jmcneill flow = 0;
725 1.25 jmcneill if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
726 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
727 1.25 jmcneill flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
728 1.25 jmcneill }
729 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
730 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
731 1.25 jmcneill }
732 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
733 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
734 1.25 jmcneill }
735 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh,
736 1.25 jmcneill AWIN_GMAC_MAC_FLOWCTRL, flow);
737 1.9 martin
738 1.9 martin #ifdef DWC_GMAC_DEBUG
739 1.9 martin aprint_normal_dev(sc->sc_dev,
740 1.9 martin "setting MAC conf register: %08x\n", conf);
741 1.9 martin #endif
742 1.9 martin
743 1.9 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
744 1.9 martin AWIN_GMAC_MAC_CONF, conf);
745 1.1 martin }
746 1.1 martin
747 1.1 martin static int
748 1.1 martin dwc_gmac_init(struct ifnet *ifp)
749 1.1 martin {
750 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
751 1.28.2.4 skrll
752 1.28.2.4 skrll mutex_enter(sc->sc_lock);
753 1.28.2.4 skrll int ret = dwc_gmac_init_locked(ifp);
754 1.28.2.4 skrll mutex_exit(sc->sc_lock);
755 1.28.2.4 skrll
756 1.28.2.4 skrll return ret;
757 1.28.2.4 skrll }
758 1.28.2.4 skrll
759 1.28.2.4 skrll static int
760 1.28.2.4 skrll dwc_gmac_init_locked(struct ifnet *ifp)
761 1.28.2.4 skrll {
762 1.28.2.4 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
763 1.13 jmcneill uint32_t ffilt;
764 1.1 martin
765 1.1 martin if (ifp->if_flags & IFF_RUNNING)
766 1.1 martin return 0;
767 1.1 martin
768 1.28.2.5 skrll dwc_gmac_stop_locked(ifp, 0);
769 1.1 martin
770 1.1 martin /*
771 1.11 martin * Configure DMA burst/transfer mode and RX/TX priorities.
772 1.11 martin * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
773 1.11 martin */
774 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
775 1.25 jmcneill GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
776 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
777 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_PBL));
778 1.11 martin
779 1.11 martin /*
780 1.13 jmcneill * Set up address filter
781 1.11 martin */
782 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
783 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
784 1.13 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_PR;
785 1.20 jmcneill } else {
786 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
787 1.20 jmcneill }
788 1.20 jmcneill if (ifp->if_flags & IFF_BROADCAST) {
789 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
790 1.20 jmcneill } else {
791 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
792 1.20 jmcneill }
793 1.13 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
794 1.11 martin
795 1.11 martin /*
796 1.20 jmcneill * Set up multicast filter
797 1.20 jmcneill */
798 1.20 jmcneill dwc_gmac_setmulti(sc);
799 1.20 jmcneill
800 1.20 jmcneill /*
801 1.6 martin * Set up dma pointer for RX and TX ring
802 1.1 martin */
803 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
804 1.6 martin sc->sc_rxq.r_physaddr);
805 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
806 1.6 martin sc->sc_txq.t_physaddr);
807 1.6 martin
808 1.6 martin /*
809 1.10 martin * Start RX/TX part
810 1.6 martin */
811 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
812 1.11 martin AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
813 1.25 jmcneill GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
814 1.1 martin
815 1.28.2.4 skrll sc->sc_stopping = false;
816 1.28.2.4 skrll
817 1.1 martin ifp->if_flags |= IFF_RUNNING;
818 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
819 1.1 martin
820 1.1 martin return 0;
821 1.1 martin }
822 1.1 martin
823 1.1 martin static void
824 1.1 martin dwc_gmac_start(struct ifnet *ifp)
825 1.1 martin {
826 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
827 1.28.2.4 skrll
828 1.28.2.4 skrll mutex_enter(sc->sc_lock);
829 1.28.2.4 skrll if (!sc->sc_stopping) {
830 1.28.2.4 skrll mutex_enter(&sc->sc_txq.t_mtx);
831 1.28.2.4 skrll dwc_gmac_start_locked(ifp);
832 1.28.2.4 skrll mutex_exit(&sc->sc_txq.t_mtx);
833 1.28.2.4 skrll }
834 1.28.2.4 skrll mutex_exit(sc->sc_lock);
835 1.28.2.4 skrll }
836 1.28.2.4 skrll
837 1.28.2.4 skrll static void
838 1.28.2.4 skrll dwc_gmac_start_locked(struct ifnet *ifp)
839 1.28.2.4 skrll {
840 1.28.2.4 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
841 1.1 martin int old = sc->sc_txq.t_queued;
842 1.28.2.1 skrll int start = sc->sc_txq.t_cur;
843 1.1 martin struct mbuf *m0;
844 1.1 martin
845 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
846 1.1 martin return;
847 1.1 martin
848 1.1 martin for (;;) {
849 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
850 1.1 martin if (m0 == NULL)
851 1.1 martin break;
852 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
853 1.1 martin ifp->if_flags |= IFF_OACTIVE;
854 1.1 martin break;
855 1.1 martin }
856 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
857 1.1 martin bpf_mtap(ifp, m0);
858 1.28.2.1 skrll if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
859 1.28.2.1 skrll ifp->if_flags |= IFF_OACTIVE;
860 1.28.2.1 skrll break;
861 1.28.2.1 skrll }
862 1.1 martin }
863 1.1 martin
864 1.1 martin if (sc->sc_txq.t_queued != old) {
865 1.1 martin /* packets have been queued, kick it off */
866 1.28.2.1 skrll dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
867 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
868 1.10 martin
869 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
870 1.10 martin AWIN_GMAC_DMA_TXPOLL, ~0U);
871 1.10 martin #ifdef DWC_GMAC_DEBUG
872 1.10 martin dwc_dump_status(sc);
873 1.10 martin #endif
874 1.1 martin }
875 1.1 martin }
876 1.1 martin
877 1.1 martin static void
878 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
879 1.1 martin {
880 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
881 1.1 martin
882 1.28.2.4 skrll mutex_enter(sc->sc_lock);
883 1.28.2.4 skrll dwc_gmac_stop_locked(ifp, disable);
884 1.28.2.4 skrll mutex_exit(sc->sc_lock);
885 1.28.2.4 skrll }
886 1.28.2.4 skrll
887 1.28.2.4 skrll static void
888 1.28.2.4 skrll dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
889 1.28.2.4 skrll {
890 1.28.2.4 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
891 1.28.2.4 skrll
892 1.28.2.4 skrll sc->sc_stopping = true;
893 1.28.2.4 skrll
894 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
895 1.6 martin AWIN_GMAC_DMA_OPMODE,
896 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
897 1.6 martin AWIN_GMAC_DMA_OPMODE)
898 1.6 martin & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
899 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
900 1.6 martin AWIN_GMAC_DMA_OPMODE,
901 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
902 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
903 1.6 martin
904 1.1 martin mii_down(&sc->sc_mii);
905 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
906 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
907 1.1 martin }
908 1.1 martin
909 1.1 martin /*
910 1.1 martin * Add m0 to the TX ring
911 1.1 martin */
912 1.1 martin static int
913 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
914 1.1 martin {
915 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
916 1.1 martin struct dwc_gmac_tx_data *data = NULL;
917 1.1 martin bus_dmamap_t map;
918 1.28.2.1 skrll uint32_t flags, len, status;
919 1.1 martin int error, i, first;
920 1.1 martin
921 1.8 martin #ifdef DWC_GMAC_DEBUG
922 1.8 martin aprint_normal_dev(sc->sc_dev,
923 1.8 martin "dwc_gmac_queue: adding mbuf chain %p\n", m0);
924 1.8 martin #endif
925 1.8 martin
926 1.1 martin first = sc->sc_txq.t_cur;
927 1.1 martin map = sc->sc_txq.t_data[first].td_map;
928 1.1 martin
929 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
930 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
931 1.1 martin if (error != 0) {
932 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
933 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
934 1.1 martin return error;
935 1.1 martin }
936 1.1 martin
937 1.28.2.1 skrll if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
938 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
939 1.1 martin return ENOBUFS;
940 1.1 martin }
941 1.1 martin
942 1.8 martin flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
943 1.28.2.1 skrll status = 0;
944 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
945 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
946 1.8 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
947 1.8 martin
948 1.8 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
949 1.28.2.1 skrll len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
950 1.7 martin
951 1.7 martin #ifdef DWC_GMAC_DEBUG
952 1.7 martin aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
953 1.8 martin "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
954 1.7 martin (unsigned long)map->dm_segs[i].ds_addr,
955 1.8 martin (unsigned long)map->dm_segs[i].ds_len,
956 1.8 martin flags, len);
957 1.7 martin #endif
958 1.7 martin
959 1.6 martin desc->ddesc_cntl = htole32(len|flags);
960 1.6 martin flags &= ~DDESC_CNTL_TXFIRST;
961 1.1 martin
962 1.1 martin /*
963 1.1 martin * Defer passing ownership of the first descriptor
964 1.23 joerg * until we are done.
965 1.1 martin */
966 1.28.2.1 skrll desc->ddesc_status = htole32(status);
967 1.28.2.1 skrll status |= DDESC_STATUS_OWNEDBYDEV;
968 1.8 martin
969 1.6 martin sc->sc_txq.t_queued++;
970 1.8 martin sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
971 1.1 martin }
972 1.1 martin
973 1.28.2.1 skrll desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
974 1.1 martin
975 1.1 martin data->td_m = m0;
976 1.1 martin data->td_active = map;
977 1.1 martin
978 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
979 1.28.2.2 skrll BUS_DMASYNC_PREWRITE);
980 1.1 martin
981 1.28.2.1 skrll /* Pass first to device */
982 1.28.2.1 skrll sc->sc_txq.t_desc[first].ddesc_status =
983 1.28.2.1 skrll htole32(DDESC_STATUS_OWNEDBYDEV);
984 1.28.2.1 skrll
985 1.1 martin return 0;
986 1.1 martin }
987 1.1 martin
988 1.22 martin /*
989 1.22 martin * If the interface is up and running, only modify the receive
990 1.22 martin * filter when setting promiscuous or debug mode. Otherwise fall
991 1.22 martin * through to ether_ioctl, which will reset the chip.
992 1.22 martin */
993 1.22 martin static int
994 1.22 martin dwc_gmac_ifflags_cb(struct ethercom *ec)
995 1.22 martin {
996 1.22 martin struct ifnet *ifp = &ec->ec_if;
997 1.22 martin struct dwc_gmac_softc *sc = ifp->if_softc;
998 1.22 martin int change = ifp->if_flags ^ sc->sc_if_flags;
999 1.22 martin
1000 1.22 martin if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
1001 1.22 martin return ENETRESET;
1002 1.28.2.4 skrll if ((change & IFF_PROMISC) != 0) {
1003 1.28.2.4 skrll mutex_enter(sc->sc_lock);
1004 1.22 martin dwc_gmac_setmulti(sc);
1005 1.28.2.4 skrll mutex_exit(sc->sc_lock);
1006 1.28.2.4 skrll }
1007 1.22 martin return 0;
1008 1.22 martin }
1009 1.22 martin
1010 1.1 martin static int
1011 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1012 1.1 martin {
1013 1.20 jmcneill struct dwc_gmac_softc *sc = ifp->if_softc;
1014 1.28.2.4 skrll int error = 0;
1015 1.1 martin
1016 1.28.2.4 skrll int s = splnet();
1017 1.28.2.4 skrll error = ether_ioctl(ifp, cmd, data);
1018 1.1 martin
1019 1.28.2.4 skrll #ifdef DWCGMAC_MPSAFE
1020 1.28.2.4 skrll splx(s);
1021 1.28.2.4 skrll #endif
1022 1.28.2.4 skrll
1023 1.28.2.4 skrll if (error == ENETRESET) {
1024 1.1 martin error = 0;
1025 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1026 1.1 martin ;
1027 1.22 martin else if (ifp->if_flags & IFF_RUNNING) {
1028 1.22 martin /*
1029 1.22 martin * Multicast list has changed; set the hardware filter
1030 1.22 martin * accordingly.
1031 1.22 martin */
1032 1.28.2.4 skrll mutex_enter(sc->sc_lock);
1033 1.20 jmcneill dwc_gmac_setmulti(sc);
1034 1.28.2.4 skrll mutex_exit(sc->sc_lock);
1035 1.22 martin }
1036 1.1 martin }
1037 1.1 martin
1038 1.22 martin /* Try to get things going again */
1039 1.22 martin if (ifp->if_flags & IFF_UP)
1040 1.22 martin dwc_gmac_start(ifp);
1041 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1042 1.28.2.4 skrll
1043 1.28.2.4 skrll #ifndef DWCGMAC_MPSAFE
1044 1.1 martin splx(s);
1045 1.28.2.4 skrll #endif
1046 1.28.2.4 skrll
1047 1.1 martin return error;
1048 1.1 martin }
1049 1.1 martin
1050 1.8 martin static void
1051 1.8 martin dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1052 1.8 martin {
1053 1.28.2.1 skrll struct ifnet *ifp = &sc->sc_ec.ec_if;
1054 1.8 martin struct dwc_gmac_tx_data *data;
1055 1.8 martin struct dwc_gmac_dev_dmadesc *desc;
1056 1.28.2.1 skrll uint32_t status;
1057 1.28.2.1 skrll int i, nsegs;
1058 1.8 martin
1059 1.28.2.4 skrll mutex_enter(&sc->sc_txq.t_mtx);
1060 1.28.2.4 skrll
1061 1.28.2.1 skrll for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1062 1.8 martin #ifdef DWC_GMAC_DEBUG
1063 1.8 martin aprint_normal_dev(sc->sc_dev,
1064 1.8 martin "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1065 1.8 martin i, sc->sc_txq.t_queued);
1066 1.8 martin #endif
1067 1.8 martin
1068 1.26 martin /*
1069 1.26 martin * i+1 does not need to be a valid descriptor,
1070 1.26 martin * this is just a special notion to just sync
1071 1.26 martin * a single tx descriptor (i)
1072 1.26 martin */
1073 1.26 martin dwc_gmac_txdesc_sync(sc, i, i+1,
1074 1.8 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1075 1.11 martin
1076 1.28.2.1 skrll desc = &sc->sc_txq.t_desc[i];
1077 1.28.2.1 skrll status = le32toh(desc->ddesc_status);
1078 1.28.2.1 skrll if (status & DDESC_STATUS_OWNEDBYDEV)
1079 1.8 martin break;
1080 1.11 martin
1081 1.8 martin data = &sc->sc_txq.t_data[i];
1082 1.8 martin if (data->td_m == NULL)
1083 1.8 martin continue;
1084 1.28.2.1 skrll
1085 1.28.2.1 skrll ifp->if_opackets++;
1086 1.28.2.1 skrll nsegs = data->td_active->dm_nsegs;
1087 1.8 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1088 1.8 martin data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1089 1.8 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
1090 1.8 martin
1091 1.8 martin #ifdef DWC_GMAC_DEBUG
1092 1.8 martin aprint_normal_dev(sc->sc_dev,
1093 1.8 martin "dwc_gmac_tx_intr: done with packet at desc #%d, "
1094 1.8 martin "freeing mbuf %p\n", i, data->td_m);
1095 1.8 martin #endif
1096 1.8 martin
1097 1.8 martin m_freem(data->td_m);
1098 1.8 martin data->td_m = NULL;
1099 1.28.2.1 skrll
1100 1.28.2.1 skrll sc->sc_txq.t_queued -= nsegs;
1101 1.8 martin }
1102 1.8 martin
1103 1.8 martin sc->sc_txq.t_next = i;
1104 1.8 martin
1105 1.8 martin if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1106 1.28.2.1 skrll ifp->if_flags &= ~IFF_OACTIVE;
1107 1.8 martin }
1108 1.28.2.4 skrll mutex_exit(&sc->sc_txq.t_mtx);
1109 1.8 martin }
1110 1.8 martin
1111 1.8 martin static void
1112 1.8 martin dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1113 1.8 martin {
1114 1.11 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
1115 1.11 martin struct dwc_gmac_dev_dmadesc *desc;
1116 1.11 martin struct dwc_gmac_rx_data *data;
1117 1.11 martin bus_addr_t physaddr;
1118 1.11 martin uint32_t status;
1119 1.11 martin struct mbuf *m, *mnew;
1120 1.11 martin int i, len, error;
1121 1.11 martin
1122 1.28.2.4 skrll mutex_enter(&sc->sc_rxq.r_mtx);
1123 1.11 martin for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1124 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1125 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1126 1.11 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1127 1.11 martin desc = &sc->sc_rxq.r_desc[i];
1128 1.11 martin data = &sc->sc_rxq.r_data[i];
1129 1.11 martin
1130 1.11 martin status = le32toh(desc->ddesc_status);
1131 1.15 martin if (status & DDESC_STATUS_OWNEDBYDEV)
1132 1.11 martin break;
1133 1.11 martin
1134 1.11 martin if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1135 1.11 martin #ifdef DWC_GMAC_DEBUG
1136 1.15 martin aprint_normal_dev(sc->sc_dev,
1137 1.15 martin "RX error: descriptor status %08x, skipping\n",
1138 1.15 martin status);
1139 1.11 martin #endif
1140 1.11 martin ifp->if_ierrors++;
1141 1.11 martin goto skip;
1142 1.11 martin }
1143 1.11 martin
1144 1.11 martin len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1145 1.11 martin
1146 1.11 martin #ifdef DWC_GMAC_DEBUG
1147 1.15 martin aprint_normal_dev(sc->sc_dev,
1148 1.15 martin "rx int: device is done with descriptor #%d, len: %d\n",
1149 1.15 martin i, len);
1150 1.11 martin #endif
1151 1.11 martin
1152 1.11 martin /*
1153 1.11 martin * Try to get a new mbuf before passing this one
1154 1.11 martin * up, if that fails, drop the packet and reuse
1155 1.11 martin * the existing one.
1156 1.11 martin */
1157 1.11 martin MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1158 1.11 martin if (mnew == NULL) {
1159 1.11 martin ifp->if_ierrors++;
1160 1.11 martin goto skip;
1161 1.11 martin }
1162 1.11 martin MCLGET(mnew, M_DONTWAIT);
1163 1.11 martin if ((mnew->m_flags & M_EXT) == 0) {
1164 1.11 martin m_freem(mnew);
1165 1.11 martin ifp->if_ierrors++;
1166 1.11 martin goto skip;
1167 1.11 martin }
1168 1.11 martin
1169 1.11 martin /* unload old DMA map */
1170 1.11 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1171 1.11 martin data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1172 1.11 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1173 1.11 martin
1174 1.11 martin /* and reload with new mbuf */
1175 1.11 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1176 1.11 martin mtod(mnew, void*), MCLBYTES, NULL,
1177 1.11 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
1178 1.28.2.4 skrll if (error != 0) {
1179 1.11 martin m_freem(mnew);
1180 1.11 martin /* try to reload old mbuf */
1181 1.11 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1182 1.11 martin mtod(data->rd_m, void*), MCLBYTES, NULL,
1183 1.11 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
1184 1.11 martin if (error != 0) {
1185 1.11 martin panic("%s: could not load old rx mbuf",
1186 1.11 martin device_xname(sc->sc_dev));
1187 1.11 martin }
1188 1.11 martin ifp->if_ierrors++;
1189 1.11 martin goto skip;
1190 1.11 martin }
1191 1.11 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
1192 1.11 martin
1193 1.11 martin /*
1194 1.11 martin * New mbuf loaded, update RX ring and continue
1195 1.11 martin */
1196 1.11 martin m = data->rd_m;
1197 1.11 martin data->rd_m = mnew;
1198 1.11 martin desc->ddesc_data = htole32(physaddr);
1199 1.11 martin
1200 1.11 martin /* finalize mbuf */
1201 1.11 martin m->m_pkthdr.len = m->m_len = len;
1202 1.11 martin m->m_pkthdr.rcvif = ifp;
1203 1.19 matt m->m_flags |= M_HASFCS;
1204 1.11 martin
1205 1.11 martin ifp->if_ipackets++;
1206 1.28.2.4 skrll
1207 1.28.2.4 skrll mutex_exit(&sc->sc_rxq.r_mtx);
1208 1.28.2.4 skrll
1209 1.28.2.4 skrll bpf_mtap(ifp, m);
1210 1.28.2.3 skrll if_percpuq_enqueue(ifp->if_percpuq, m);
1211 1.11 martin
1212 1.28.2.4 skrll mutex_enter(&sc->sc_rxq.r_mtx);
1213 1.28.2.4 skrll
1214 1.11 martin skip:
1215 1.27 matt bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1216 1.27 matt data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1217 1.11 martin desc->ddesc_cntl = htole32(
1218 1.16 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1219 1.16 martin DDESC_CNTL_RXCHAIN);
1220 1.11 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1221 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1222 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1223 1.11 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1224 1.11 martin }
1225 1.11 martin
1226 1.11 martin /* update RX pointer */
1227 1.11 martin sc->sc_rxq.r_cur = i;
1228 1.11 martin
1229 1.28.2.4 skrll mutex_exit(&sc->sc_rxq.r_mtx);
1230 1.8 martin }
1231 1.8 martin
1232 1.22 martin /*
1233 1.24 skrll * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1234 1.22 martin */
1235 1.22 martin static uint32_t
1236 1.22 martin bitrev32(uint32_t x)
1237 1.22 martin {
1238 1.22 martin x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1239 1.22 martin x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1240 1.22 martin x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1241 1.22 martin x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1242 1.22 martin
1243 1.22 martin return (x >> 16) | (x << 16);
1244 1.22 martin }
1245 1.22 martin
1246 1.20 jmcneill static void
1247 1.20 jmcneill dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1248 1.20 jmcneill {
1249 1.20 jmcneill struct ifnet * const ifp = &sc->sc_ec.ec_if;
1250 1.20 jmcneill struct ether_multi *enm;
1251 1.20 jmcneill struct ether_multistep step;
1252 1.20 jmcneill uint32_t hashes[2] = { 0, 0 };
1253 1.22 martin uint32_t ffilt, h;
1254 1.28.2.4 skrll int mcnt;
1255 1.22 martin
1256 1.28.2.4 skrll KASSERT(mutex_owned(sc->sc_lock));
1257 1.20 jmcneill
1258 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1259 1.28.2.2 skrll
1260 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
1261 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1262 1.22 martin goto special_filter;
1263 1.20 jmcneill }
1264 1.20 jmcneill
1265 1.20 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
1266 1.22 martin ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1267 1.20 jmcneill
1268 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1269 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1270 1.20 jmcneill
1271 1.20 jmcneill ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1272 1.20 jmcneill mcnt = 0;
1273 1.20 jmcneill while (enm != NULL) {
1274 1.20 jmcneill if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1275 1.22 martin ETHER_ADDR_LEN) != 0) {
1276 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1277 1.22 martin ifp->if_flags |= IFF_ALLMULTI;
1278 1.22 martin goto special_filter;
1279 1.22 martin }
1280 1.20 jmcneill
1281 1.22 martin h = bitrev32(
1282 1.22 martin ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1283 1.22 martin ) >> 26;
1284 1.20 jmcneill hashes[h >> 5] |= (1 << (h & 0x1f));
1285 1.20 jmcneill
1286 1.20 jmcneill mcnt++;
1287 1.20 jmcneill ETHER_NEXT_MULTI(step, enm);
1288 1.20 jmcneill }
1289 1.20 jmcneill
1290 1.20 jmcneill if (mcnt)
1291 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1292 1.20 jmcneill else
1293 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1294 1.20 jmcneill
1295 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1296 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1297 1.20 jmcneill hashes[0]);
1298 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1299 1.20 jmcneill hashes[1]);
1300 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1301 1.22 martin
1302 1.22 martin #ifdef DWC_GMAC_DEBUG
1303 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1304 1.22 martin #endif
1305 1.22 martin return;
1306 1.22 martin
1307 1.22 martin special_filter:
1308 1.22 martin #ifdef DWC_GMAC_DEBUG
1309 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1310 1.22 martin #endif
1311 1.22 martin /* no MAC hashes, ALLMULTI or PROMISC */
1312 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1313 1.22 martin ffilt);
1314 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1315 1.22 martin 0xffffffff);
1316 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1317 1.22 martin 0xffffffff);
1318 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1319 1.20 jmcneill }
1320 1.20 jmcneill
1321 1.1 martin int
1322 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
1323 1.1 martin {
1324 1.1 martin uint32_t status, dma_status;
1325 1.8 martin int rv = 0;
1326 1.1 martin
1327 1.28.2.4 skrll if (sc->sc_stopping)
1328 1.28.2.4 skrll return 0;
1329 1.28.2.4 skrll
1330 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1331 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
1332 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1333 1.1 martin AWIN_GMAC_MII_STATUS);
1334 1.8 martin rv = 1;
1335 1.2 martin mii_pollstat(&sc->sc_mii);
1336 1.2 martin }
1337 1.1 martin
1338 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1339 1.1 martin AWIN_GMAC_DMA_STATUS);
1340 1.1 martin
1341 1.8 martin if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1342 1.8 martin rv = 1;
1343 1.1 martin
1344 1.8 martin if (dma_status & GMAC_DMA_INT_TIE)
1345 1.8 martin dwc_gmac_tx_intr(sc);
1346 1.1 martin
1347 1.8 martin if (dma_status & GMAC_DMA_INT_RIE)
1348 1.8 martin dwc_gmac_rx_intr(sc);
1349 1.8 martin
1350 1.8 martin /*
1351 1.8 martin * Check error conditions
1352 1.8 martin */
1353 1.8 martin if (dma_status & GMAC_DMA_INT_ERRORS) {
1354 1.8 martin sc->sc_ec.ec_if.if_oerrors++;
1355 1.8 martin #ifdef DWC_GMAC_DEBUG
1356 1.8 martin dwc_dump_and_abort(sc, "interrupt error condition");
1357 1.8 martin #endif
1358 1.8 martin }
1359 1.8 martin
1360 1.8 martin /* ack interrupt */
1361 1.8 martin if (dma_status)
1362 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1363 1.8 martin AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1364 1.8 martin
1365 1.28 martin /*
1366 1.28 martin * Get more packets
1367 1.28 martin */
1368 1.28 martin if (rv)
1369 1.28 martin sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
1370 1.28 martin
1371 1.8 martin return rv;
1372 1.1 martin }
1373 1.7 martin
1374 1.7 martin #ifdef DWC_GMAC_DEBUG
1375 1.7 martin static void
1376 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1377 1.7 martin {
1378 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1379 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1380 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1381 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1382 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1383 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1384 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1385 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1386 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1387 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1388 1.7 martin aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1389 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1390 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1391 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1392 1.7 martin aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1393 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1394 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1395 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1396 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1397 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1398 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1399 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1400 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1401 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1402 1.7 martin }
1403 1.7 martin
1404 1.7 martin static void
1405 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1406 1.7 martin {
1407 1.7 martin int i;
1408 1.7 martin
1409 1.8 martin aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1410 1.8 martin sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1411 1.8 martin aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1412 1.7 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1413 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1414 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1415 1.15 martin "data: %08x next: %08x\n",
1416 1.15 martin i, sc->sc_txq.t_physaddr +
1417 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1418 1.7 martin le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1419 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1420 1.7 martin }
1421 1.7 martin }
1422 1.8 martin
1423 1.8 martin static void
1424 1.11 martin dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1425 1.11 martin {
1426 1.11 martin int i;
1427 1.11 martin
1428 1.11 martin aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1429 1.11 martin sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1430 1.11 martin aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1431 1.11 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1432 1.11 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1433 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1434 1.15 martin "data: %08x next: %08x\n",
1435 1.15 martin i, sc->sc_rxq.r_physaddr +
1436 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1437 1.11 martin le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1438 1.11 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1439 1.11 martin }
1440 1.11 martin }
1441 1.11 martin
1442 1.11 martin static void
1443 1.10 martin dwc_dump_status(struct dwc_gmac_softc *sc)
1444 1.8 martin {
1445 1.8 martin uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1446 1.8 martin AWIN_GMAC_MAC_INTR);
1447 1.8 martin uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1448 1.8 martin AWIN_GMAC_DMA_STATUS);
1449 1.8 martin char buf[200];
1450 1.8 martin
1451 1.8 martin /* print interrupt state */
1452 1.8 martin snprintb(buf, sizeof(buf), "\177\20"
1453 1.10 martin "b\x10""NI\0"
1454 1.10 martin "b\x0f""AI\0"
1455 1.10 martin "b\x0e""ER\0"
1456 1.10 martin "b\x0d""FB\0"
1457 1.10 martin "b\x0a""ET\0"
1458 1.10 martin "b\x09""RW\0"
1459 1.10 martin "b\x08""RS\0"
1460 1.10 martin "b\x07""RU\0"
1461 1.10 martin "b\x06""RI\0"
1462 1.10 martin "b\x05""UN\0"
1463 1.10 martin "b\x04""OV\0"
1464 1.10 martin "b\x03""TJ\0"
1465 1.10 martin "b\x02""TU\0"
1466 1.10 martin "b\x01""TS\0"
1467 1.10 martin "b\x00""TI\0"
1468 1.8 martin "\0", dma_status);
1469 1.10 martin aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1470 1.8 martin status, buf);
1471 1.10 martin }
1472 1.8 martin
1473 1.10 martin static void
1474 1.10 martin dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1475 1.10 martin {
1476 1.10 martin dwc_dump_status(sc);
1477 1.22 martin dwc_gmac_dump_ffilt(sc,
1478 1.22 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1479 1.8 martin dwc_gmac_dump_dma(sc);
1480 1.8 martin dwc_gmac_dump_tx_desc(sc);
1481 1.11 martin dwc_gmac_dump_rx_desc(sc);
1482 1.8 martin
1483 1.21 joerg panic("%s", msg);
1484 1.8 martin }
1485 1.22 martin
1486 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1487 1.22 martin {
1488 1.22 martin char buf[200];
1489 1.22 martin
1490 1.22 martin /* print filter setup */
1491 1.22 martin snprintb(buf, sizeof(buf), "\177\20"
1492 1.22 martin "b\x1f""RA\0"
1493 1.22 martin "b\x0a""HPF\0"
1494 1.22 martin "b\x09""SAF\0"
1495 1.22 martin "b\x08""SAIF\0"
1496 1.22 martin "b\x05""DBF\0"
1497 1.22 martin "b\x04""PM\0"
1498 1.22 martin "b\x03""DAIF\0"
1499 1.22 martin "b\x02""HMC\0"
1500 1.22 martin "b\x01""HUC\0"
1501 1.22 martin "b\x00""PR\0"
1502 1.22 martin "\0", ffilt);
1503 1.22 martin aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1504 1.22 martin }
1505 1.7 martin #endif
1506