dwc_gmac.c revision 1.23 1 1.23 joerg /* $NetBSD: dwc_gmac.c,v 1.23 2014/10/26 20:51:54 joerg Exp $ */
2 1.18 jmcneill
3 1.1 martin /*-
4 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 1.1 martin * All rights reserved.
6 1.1 martin *
7 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
8 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 1.1 martin *
10 1.1 martin * Redistribution and use in source and binary forms, with or without
11 1.1 martin * modification, are permitted provided that the following conditions
12 1.1 martin * are met:
13 1.1 martin * 1. Redistributions of source code must retain the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer.
15 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 martin * notice, this list of conditions and the following disclaimer in the
17 1.1 martin * documentation and/or other materials provided with the distribution.
18 1.1 martin *
19 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
30 1.1 martin */
31 1.1 martin
32 1.1 martin /*
33 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
34 1.1 martin * on Allwinner A20 cores and others.
35 1.1 martin *
36 1.1 martin * Real documentation seems to not be available, the marketing product
37 1.1 martin * documents could be found here:
38 1.1 martin *
39 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 1.1 martin */
41 1.1 martin
42 1.1 martin #include <sys/cdefs.h>
43 1.1 martin
44 1.23 joerg __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.23 2014/10/26 20:51:54 joerg Exp $");
45 1.7 martin
46 1.7 martin /* #define DWC_GMAC_DEBUG 1 */
47 1.1 martin
48 1.1 martin #include "opt_inet.h"
49 1.1 martin
50 1.1 martin #include <sys/param.h>
51 1.1 martin #include <sys/bus.h>
52 1.1 martin #include <sys/device.h>
53 1.1 martin #include <sys/intr.h>
54 1.1 martin #include <sys/systm.h>
55 1.1 martin #include <sys/sockio.h>
56 1.1 martin
57 1.1 martin #include <net/if.h>
58 1.1 martin #include <net/if_ether.h>
59 1.1 martin #include <net/if_media.h>
60 1.1 martin #include <net/bpf.h>
61 1.1 martin #ifdef INET
62 1.1 martin #include <netinet/if_inarp.h>
63 1.1 martin #endif
64 1.1 martin
65 1.1 martin #include <dev/mii/miivar.h>
66 1.1 martin
67 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
68 1.1 martin #include <dev/ic/dwc_gmac_var.h>
69 1.1 martin
70 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
71 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
72 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
73 1.1 martin
74 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
75 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
76 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
77 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
78 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
79 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
86 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
87 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
88 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
89 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
90 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
91 1.8 martin static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
92 1.8 martin static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
93 1.20 jmcneill static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
94 1.22 martin static int dwc_gmac_ifflags_cb(struct ethercom *);
95 1.22 martin static uint32_t bitrev32(uint32_t x);
96 1.1 martin
97 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
98 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
99 1.8 martin #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
100 1.1 martin
101 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
102 1.8 martin #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
103 1.8 martin
104 1.8 martin
105 1.8 martin
106 1.11 martin #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
107 1.8 martin GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
108 1.8 martin GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
109 1.8 martin
110 1.8 martin #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
111 1.10 martin GMAC_DMA_INT_FBE| \
112 1.8 martin GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
113 1.8 martin GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
114 1.10 martin GMAC_DMA_INT_TJE)
115 1.8 martin
116 1.8 martin #define AWIN_DEF_MAC_INTRMASK \
117 1.8 martin (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
118 1.8 martin AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
119 1.1 martin
120 1.7 martin
121 1.7 martin #ifdef DWC_GMAC_DEBUG
122 1.7 martin static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
123 1.7 martin static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
124 1.11 martin static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
125 1.8 martin static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
126 1.10 martin static void dwc_dump_status(struct dwc_gmac_softc *sc);
127 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
128 1.7 martin #endif
129 1.7 martin
130 1.1 martin void
131 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
132 1.1 martin {
133 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
134 1.1 martin uint32_t maclo, machi;
135 1.1 martin struct mii_data * const mii = &sc->sc_mii;
136 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
137 1.5 martin prop_dictionary_t dict;
138 1.17 martin int s;
139 1.1 martin
140 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
141 1.3 martin sc->sc_mii_clk = mii_clk & 7;
142 1.1 martin
143 1.5 martin dict = device_properties(sc->sc_dev);
144 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
145 1.5 martin if (ea != NULL) {
146 1.5 martin /*
147 1.5 martin * If the MAC address is overriden by a device property,
148 1.5 martin * use that.
149 1.5 martin */
150 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
151 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
152 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
153 1.5 martin } else {
154 1.5 martin /*
155 1.5 martin * If we did not get an externaly configure address,
156 1.5 martin * try to read one from the current filter setup,
157 1.5 martin * before resetting the chip.
158 1.5 martin */
159 1.8 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
160 1.8 martin AWIN_GMAC_MAC_ADDR0LO);
161 1.8 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
162 1.8 martin AWIN_GMAC_MAC_ADDR0HI);
163 1.14 jmcneill
164 1.14 jmcneill if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
165 1.14 jmcneill aprint_error_dev(sc->sc_dev,
166 1.14 jmcneill "couldn't read MAC address\n");
167 1.14 jmcneill return;
168 1.14 jmcneill }
169 1.14 jmcneill
170 1.1 martin enaddr[0] = maclo & 0x0ff;
171 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
172 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
173 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
174 1.1 martin enaddr[4] = machi & 0x0ff;
175 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
176 1.1 martin }
177 1.1 martin
178 1.1 martin /*
179 1.21 joerg * Init chip and do initial setup
180 1.1 martin */
181 1.1 martin if (dwc_gmac_reset(sc) != 0)
182 1.1 martin return; /* not much to cleanup, haven't attached yet */
183 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
184 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
185 1.1 martin ether_sprintf(enaddr));
186 1.1 martin
187 1.1 martin /*
188 1.1 martin * Allocate Tx and Rx rings
189 1.1 martin */
190 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
191 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
192 1.1 martin goto fail;
193 1.1 martin }
194 1.1 martin
195 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
196 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
197 1.1 martin goto fail;
198 1.1 martin }
199 1.1 martin
200 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
201 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
202 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
203 1.1 martin goto fail;
204 1.1 martin }
205 1.1 martin
206 1.1 martin /*
207 1.1 martin * Prepare interface data
208 1.1 martin */
209 1.1 martin ifp->if_softc = sc;
210 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
211 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
212 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
213 1.1 martin ifp->if_start = dwc_gmac_start;
214 1.1 martin ifp->if_init = dwc_gmac_init;
215 1.1 martin ifp->if_stop = dwc_gmac_stop;
216 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
217 1.1 martin IFQ_SET_READY(&ifp->if_snd);
218 1.1 martin
219 1.1 martin /*
220 1.1 martin * Attach MII subdevices
221 1.1 martin */
222 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
223 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
224 1.1 martin mii->mii_ifp = ifp;
225 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
226 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
227 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
228 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
229 1.1 martin
230 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
231 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
232 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
233 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
234 1.1 martin } else {
235 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
236 1.1 martin }
237 1.1 martin
238 1.1 martin /*
239 1.1 martin * Ready, attach interface
240 1.1 martin */
241 1.1 martin if_attach(ifp);
242 1.1 martin ether_ifattach(ifp, enaddr);
243 1.22 martin ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
244 1.1 martin
245 1.1 martin /*
246 1.1 martin * Enable interrupts
247 1.1 martin */
248 1.17 martin s = splnet();
249 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
250 1.8 martin AWIN_DEF_MAC_INTRMASK);
251 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
252 1.8 martin GMAC_DEF_DMA_INT_MASK);
253 1.17 martin splx(s);
254 1.1 martin
255 1.1 martin return;
256 1.1 martin
257 1.1 martin fail:
258 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
259 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
260 1.1 martin }
261 1.1 martin
262 1.1 martin
263 1.1 martin
264 1.1 martin static int
265 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
266 1.1 martin {
267 1.1 martin size_t cnt;
268 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
269 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
270 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
271 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
272 1.1 martin & GMAC_BUSMODE_RESET) == 0)
273 1.1 martin return 0;
274 1.1 martin delay(10);
275 1.1 martin }
276 1.1 martin
277 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
278 1.1 martin return EIO;
279 1.1 martin }
280 1.1 martin
281 1.1 martin static void
282 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
283 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
284 1.1 martin {
285 1.1 martin uint32_t lo, hi;
286 1.1 martin
287 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
288 1.1 martin | (enaddr[3] << 24);
289 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
290 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
291 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
292 1.1 martin }
293 1.1 martin
294 1.1 martin static int
295 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
296 1.1 martin {
297 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
298 1.6 martin uint16_t mii;
299 1.1 martin size_t cnt;
300 1.1 martin int rv = 0;
301 1.1 martin
302 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
303 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
304 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
305 1.6 martin | GMAC_MII_BUSY;
306 1.1 martin
307 1.1 martin mutex_enter(&sc->sc_mdio_lock);
308 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
309 1.1 martin
310 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
311 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
312 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
313 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
314 1.3 martin AWIN_GMAC_MAC_MIIDATA);
315 1.1 martin break;
316 1.1 martin }
317 1.1 martin delay(10);
318 1.1 martin }
319 1.1 martin
320 1.1 martin mutex_exit(&sc->sc_mdio_lock);
321 1.1 martin
322 1.1 martin return rv;
323 1.1 martin }
324 1.1 martin
325 1.1 martin static void
326 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
327 1.1 martin {
328 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
329 1.6 martin uint16_t mii;
330 1.1 martin size_t cnt;
331 1.1 martin
332 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
333 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
334 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
335 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
336 1.1 martin
337 1.1 martin mutex_enter(&sc->sc_mdio_lock);
338 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
339 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
340 1.1 martin
341 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
342 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
343 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
344 1.1 martin break;
345 1.1 martin delay(10);
346 1.1 martin }
347 1.1 martin
348 1.1 martin mutex_exit(&sc->sc_mdio_lock);
349 1.1 martin }
350 1.1 martin
351 1.1 martin static int
352 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
353 1.1 martin struct dwc_gmac_rx_ring *ring)
354 1.1 martin {
355 1.1 martin struct dwc_gmac_rx_data *data;
356 1.1 martin bus_addr_t physaddr;
357 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
358 1.1 martin int error, i, next;
359 1.1 martin
360 1.1 martin ring->r_cur = ring->r_next = 0;
361 1.1 martin memset(ring->r_desc, 0, descsize);
362 1.1 martin
363 1.1 martin /*
364 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
365 1.1 martin */
366 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
367 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
368 1.1 martin
369 1.1 martin data = &sc->sc_rxq.r_data[i];
370 1.1 martin
371 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
372 1.1 martin if (data->rd_m == NULL) {
373 1.1 martin aprint_error_dev(sc->sc_dev,
374 1.1 martin "could not allocate rx mbuf #%d\n", i);
375 1.1 martin error = ENOMEM;
376 1.1 martin goto fail;
377 1.1 martin }
378 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
379 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
380 1.1 martin if (error != 0) {
381 1.1 martin aprint_error_dev(sc->sc_dev,
382 1.1 martin "could not create DMA map\n");
383 1.1 martin data->rd_map = NULL;
384 1.1 martin goto fail;
385 1.1 martin }
386 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
387 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
388 1.1 martin aprint_error_dev(sc->sc_dev,
389 1.1 martin "could not allocate mbuf cluster #%d\n", i);
390 1.1 martin error = ENOMEM;
391 1.1 martin goto fail;
392 1.1 martin }
393 1.1 martin
394 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
395 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
396 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
397 1.1 martin if (error != 0) {
398 1.1 martin aprint_error_dev(sc->sc_dev,
399 1.1 martin "could not load rx buf DMA map #%d", i);
400 1.1 martin goto fail;
401 1.1 martin }
402 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
403 1.1 martin
404 1.1 martin desc = &sc->sc_rxq.r_desc[i];
405 1.1 martin desc->ddesc_data = htole32(physaddr);
406 1.8 martin next = RX_NEXT(i);
407 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
408 1.1 martin + next * sizeof(*desc));
409 1.1 martin desc->ddesc_cntl = htole32(
410 1.11 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
411 1.16 martin DDESC_CNTL_RXCHAIN);
412 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
413 1.1 martin }
414 1.1 martin
415 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
416 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
417 1.1 martin BUS_DMASYNC_PREREAD);
418 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
419 1.6 martin ring->r_physaddr);
420 1.1 martin
421 1.1 martin return 0;
422 1.1 martin
423 1.1 martin fail:
424 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
425 1.1 martin return error;
426 1.1 martin }
427 1.1 martin
428 1.1 martin static void
429 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
430 1.1 martin struct dwc_gmac_rx_ring *ring)
431 1.1 martin {
432 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
433 1.1 martin int i;
434 1.1 martin
435 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
436 1.1 martin desc = &sc->sc_rxq.r_desc[i];
437 1.1 martin desc->ddesc_cntl = htole32(
438 1.16 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
439 1.16 martin DDESC_CNTL_RXCHAIN);
440 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
441 1.1 martin }
442 1.1 martin
443 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
444 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
445 1.1 martin BUS_DMASYNC_PREWRITE);
446 1.1 martin
447 1.1 martin ring->r_cur = ring->r_next = 0;
448 1.11 martin /* reset DMA address to start of ring */
449 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
450 1.11 martin sc->sc_rxq.r_physaddr);
451 1.1 martin }
452 1.1 martin
453 1.1 martin static int
454 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
455 1.1 martin {
456 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
457 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
458 1.1 martin int error, nsegs;
459 1.1 martin void *rings;
460 1.1 martin
461 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
462 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
463 1.1 martin if (error != 0) {
464 1.1 martin aprint_error_dev(sc->sc_dev,
465 1.1 martin "could not create desc DMA map\n");
466 1.1 martin sc->sc_dma_ring_map = NULL;
467 1.1 martin goto fail;
468 1.1 martin }
469 1.1 martin
470 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
471 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
472 1.1 martin if (error != 0) {
473 1.1 martin aprint_error_dev(sc->sc_dev,
474 1.1 martin "could not map DMA memory\n");
475 1.1 martin goto fail;
476 1.1 martin }
477 1.1 martin
478 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
479 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
480 1.1 martin if (error != 0) {
481 1.1 martin aprint_error_dev(sc->sc_dev,
482 1.1 martin "could not allocate DMA memory\n");
483 1.1 martin goto fail;
484 1.1 martin }
485 1.1 martin
486 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
487 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
488 1.1 martin if (error != 0) {
489 1.1 martin aprint_error_dev(sc->sc_dev,
490 1.1 martin "could not load desc DMA map\n");
491 1.1 martin goto fail;
492 1.1 martin }
493 1.1 martin
494 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
495 1.1 martin sc->sc_rxq.r_desc = rings;
496 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
497 1.1 martin
498 1.1 martin /* and next rings to the TX side */
499 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
500 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
501 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
502 1.1 martin
503 1.1 martin return 0;
504 1.1 martin
505 1.1 martin fail:
506 1.1 martin dwc_gmac_free_dma_rings(sc);
507 1.1 martin return error;
508 1.1 martin }
509 1.1 martin
510 1.1 martin static void
511 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
512 1.1 martin {
513 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
514 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
515 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
516 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
517 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
518 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
519 1.1 martin }
520 1.1 martin
521 1.1 martin static void
522 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
523 1.1 martin {
524 1.1 martin struct dwc_gmac_rx_data *data;
525 1.1 martin int i;
526 1.1 martin
527 1.1 martin if (ring->r_desc == NULL)
528 1.1 martin return;
529 1.1 martin
530 1.1 martin
531 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
532 1.1 martin data = &ring->r_data[i];
533 1.1 martin
534 1.1 martin if (data->rd_map != NULL) {
535 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
536 1.1 martin AWGE_RX_RING_COUNT
537 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
538 1.1 martin BUS_DMASYNC_POSTREAD);
539 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
540 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
541 1.1 martin }
542 1.1 martin if (data->rd_m != NULL)
543 1.1 martin m_freem(data->rd_m);
544 1.1 martin }
545 1.1 martin }
546 1.1 martin
547 1.1 martin static int
548 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
549 1.1 martin struct dwc_gmac_tx_ring *ring)
550 1.1 martin {
551 1.1 martin int i, error = 0;
552 1.1 martin
553 1.1 martin ring->t_queued = 0;
554 1.1 martin ring->t_cur = ring->t_next = 0;
555 1.1 martin
556 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
557 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
558 1.1 martin TX_DESC_OFFSET(0),
559 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
560 1.1 martin BUS_DMASYNC_POSTWRITE);
561 1.1 martin
562 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
563 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
564 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
565 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
566 1.1 martin &ring->t_data[i].td_map);
567 1.1 martin if (error != 0) {
568 1.1 martin aprint_error_dev(sc->sc_dev,
569 1.1 martin "could not create TX DMA map #%d\n", i);
570 1.1 martin ring->t_data[i].td_map = NULL;
571 1.1 martin goto fail;
572 1.1 martin }
573 1.1 martin ring->t_desc[i].ddesc_next = htole32(
574 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
575 1.8 martin *TX_NEXT(i));
576 1.1 martin }
577 1.1 martin
578 1.1 martin return 0;
579 1.1 martin
580 1.1 martin fail:
581 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
582 1.1 martin return error;
583 1.1 martin }
584 1.1 martin
585 1.1 martin static void
586 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
587 1.1 martin {
588 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
589 1.1 martin if (end > start) {
590 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
591 1.1 martin TX_DESC_OFFSET(start),
592 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
593 1.1 martin ops);
594 1.1 martin return;
595 1.1 martin }
596 1.1 martin /* sync from 'start' to end of ring */
597 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
598 1.1 martin TX_DESC_OFFSET(start),
599 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
600 1.1 martin ops);
601 1.1 martin /* sync from start of ring to 'end' */
602 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
603 1.1 martin TX_DESC_OFFSET(0),
604 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
605 1.1 martin ops);
606 1.1 martin }
607 1.1 martin
608 1.1 martin static void
609 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
610 1.1 martin struct dwc_gmac_tx_ring *ring)
611 1.1 martin {
612 1.1 martin int i;
613 1.1 martin
614 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
615 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
616 1.1 martin
617 1.1 martin if (data->td_m != NULL) {
618 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
619 1.1 martin 0, data->td_active->dm_mapsize,
620 1.1 martin BUS_DMASYNC_POSTWRITE);
621 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
622 1.1 martin m_freem(data->td_m);
623 1.1 martin data->td_m = NULL;
624 1.1 martin }
625 1.1 martin }
626 1.1 martin
627 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
628 1.1 martin TX_DESC_OFFSET(0),
629 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
630 1.1 martin BUS_DMASYNC_PREWRITE);
631 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
632 1.6 martin sc->sc_txq.t_physaddr);
633 1.1 martin
634 1.1 martin ring->t_queued = 0;
635 1.1 martin ring->t_cur = ring->t_next = 0;
636 1.1 martin }
637 1.1 martin
638 1.1 martin static void
639 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
640 1.1 martin struct dwc_gmac_tx_ring *ring)
641 1.1 martin {
642 1.1 martin int i;
643 1.1 martin
644 1.1 martin /* unload the maps */
645 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
646 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
647 1.1 martin
648 1.1 martin if (data->td_m != NULL) {
649 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
650 1.1 martin 0, data->td_map->dm_mapsize,
651 1.1 martin BUS_DMASYNC_POSTWRITE);
652 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
653 1.1 martin m_freem(data->td_m);
654 1.1 martin data->td_m = NULL;
655 1.1 martin }
656 1.1 martin }
657 1.1 martin
658 1.1 martin /* and actually free them */
659 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
660 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
661 1.1 martin
662 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
663 1.1 martin }
664 1.1 martin }
665 1.1 martin
666 1.1 martin static void
667 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
668 1.1 martin {
669 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
670 1.1 martin struct mii_data * const mii = &sc->sc_mii;
671 1.9 martin uint32_t conf;
672 1.1 martin
673 1.1 martin /*
674 1.1 martin * Set MII or GMII interface based on the speed
675 1.1 martin * negotiated by the PHY.
676 1.9 martin */
677 1.9 martin conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
678 1.9 martin conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
679 1.9 martin |AWIN_GMAC_MAC_CONF_FULLDPLX);
680 1.11 martin conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
681 1.11 martin | AWIN_GMAC_MAC_CONF_DISABLERXOWN
682 1.11 martin | AWIN_GMAC_MAC_CONF_RXENABLE
683 1.11 martin | AWIN_GMAC_MAC_CONF_TXENABLE;
684 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
685 1.1 martin case IFM_10_T:
686 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_MIISEL;
687 1.9 martin break;
688 1.1 martin case IFM_100_TX:
689 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_FES100 |
690 1.12 jmcneill AWIN_GMAC_MAC_CONF_MIISEL;
691 1.1 martin break;
692 1.1 martin case IFM_1000_T:
693 1.1 martin break;
694 1.1 martin }
695 1.9 martin if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
696 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
697 1.9 martin
698 1.9 martin #ifdef DWC_GMAC_DEBUG
699 1.9 martin aprint_normal_dev(sc->sc_dev,
700 1.9 martin "setting MAC conf register: %08x\n", conf);
701 1.9 martin #endif
702 1.9 martin
703 1.9 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
704 1.9 martin AWIN_GMAC_MAC_CONF, conf);
705 1.1 martin }
706 1.1 martin
707 1.1 martin static int
708 1.1 martin dwc_gmac_init(struct ifnet *ifp)
709 1.1 martin {
710 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
711 1.13 jmcneill uint32_t ffilt;
712 1.1 martin
713 1.1 martin if (ifp->if_flags & IFF_RUNNING)
714 1.1 martin return 0;
715 1.1 martin
716 1.1 martin dwc_gmac_stop(ifp, 0);
717 1.1 martin
718 1.1 martin /*
719 1.11 martin * Configure DMA burst/transfer mode and RX/TX priorities.
720 1.11 martin * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
721 1.11 martin */
722 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
723 1.11 martin GMAC_BUSMODE_FIXEDBURST |
724 1.11 martin __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
725 1.11 martin __SHIFTIN(8, GMCA_BUSMODE_PBL));
726 1.11 martin
727 1.11 martin /*
728 1.13 jmcneill * Set up address filter
729 1.11 martin */
730 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
731 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
732 1.13 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_PR;
733 1.20 jmcneill } else {
734 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
735 1.20 jmcneill }
736 1.20 jmcneill if (ifp->if_flags & IFF_BROADCAST) {
737 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
738 1.20 jmcneill } else {
739 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
740 1.20 jmcneill }
741 1.13 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
742 1.11 martin
743 1.11 martin /*
744 1.20 jmcneill * Set up multicast filter
745 1.20 jmcneill */
746 1.20 jmcneill dwc_gmac_setmulti(sc);
747 1.20 jmcneill
748 1.20 jmcneill /*
749 1.6 martin * Set up dma pointer for RX and TX ring
750 1.1 martin */
751 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
752 1.6 martin sc->sc_rxq.r_physaddr);
753 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
754 1.6 martin sc->sc_txq.t_physaddr);
755 1.6 martin
756 1.6 martin /*
757 1.10 martin * Start RX/TX part
758 1.6 martin */
759 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
760 1.11 martin AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
761 1.11 martin GMAC_DMA_OP_STOREFORWARD);
762 1.1 martin
763 1.1 martin ifp->if_flags |= IFF_RUNNING;
764 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
765 1.1 martin
766 1.1 martin return 0;
767 1.1 martin }
768 1.1 martin
769 1.1 martin static void
770 1.1 martin dwc_gmac_start(struct ifnet *ifp)
771 1.1 martin {
772 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
773 1.1 martin int old = sc->sc_txq.t_queued;
774 1.1 martin struct mbuf *m0;
775 1.1 martin
776 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
777 1.1 martin return;
778 1.1 martin
779 1.1 martin for (;;) {
780 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
781 1.1 martin if (m0 == NULL)
782 1.1 martin break;
783 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
784 1.1 martin ifp->if_flags |= IFF_OACTIVE;
785 1.1 martin break;
786 1.1 martin }
787 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
788 1.1 martin bpf_mtap(ifp, m0);
789 1.1 martin }
790 1.1 martin
791 1.1 martin if (sc->sc_txq.t_queued != old) {
792 1.1 martin /* packets have been queued, kick it off */
793 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
794 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
795 1.10 martin
796 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
797 1.10 martin AWIN_GMAC_DMA_TXPOLL, ~0U);
798 1.10 martin #ifdef DWC_GMAC_DEBUG
799 1.10 martin dwc_dump_status(sc);
800 1.10 martin #endif
801 1.1 martin }
802 1.1 martin }
803 1.1 martin
804 1.1 martin static void
805 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
806 1.1 martin {
807 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
808 1.1 martin
809 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
810 1.6 martin AWIN_GMAC_DMA_OPMODE,
811 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
812 1.6 martin AWIN_GMAC_DMA_OPMODE)
813 1.6 martin & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
814 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
815 1.6 martin AWIN_GMAC_DMA_OPMODE,
816 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
817 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
818 1.6 martin
819 1.1 martin mii_down(&sc->sc_mii);
820 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
821 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
822 1.1 martin }
823 1.1 martin
824 1.1 martin /*
825 1.1 martin * Add m0 to the TX ring
826 1.1 martin */
827 1.1 martin static int
828 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
829 1.1 martin {
830 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
831 1.1 martin struct dwc_gmac_tx_data *data = NULL;
832 1.1 martin bus_dmamap_t map;
833 1.6 martin uint32_t flags, len;
834 1.1 martin int error, i, first;
835 1.1 martin
836 1.8 martin #ifdef DWC_GMAC_DEBUG
837 1.8 martin aprint_normal_dev(sc->sc_dev,
838 1.8 martin "dwc_gmac_queue: adding mbuf chain %p\n", m0);
839 1.8 martin #endif
840 1.8 martin
841 1.1 martin first = sc->sc_txq.t_cur;
842 1.1 martin map = sc->sc_txq.t_data[first].td_map;
843 1.1 martin flags = 0;
844 1.1 martin
845 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
846 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
847 1.1 martin if (error != 0) {
848 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
849 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
850 1.1 martin return error;
851 1.1 martin }
852 1.1 martin
853 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
854 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
855 1.1 martin return ENOBUFS;
856 1.1 martin }
857 1.1 martin
858 1.1 martin data = NULL;
859 1.8 martin flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
860 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
861 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
862 1.8 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
863 1.8 martin
864 1.8 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
865 1.8 martin len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
866 1.8 martin if (i == map->dm_nsegs-1)
867 1.8 martin flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
868 1.7 martin
869 1.7 martin #ifdef DWC_GMAC_DEBUG
870 1.7 martin aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
871 1.8 martin "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
872 1.7 martin (unsigned long)map->dm_segs[i].ds_addr,
873 1.8 martin (unsigned long)map->dm_segs[i].ds_len,
874 1.8 martin flags, len);
875 1.7 martin #endif
876 1.7 martin
877 1.6 martin desc->ddesc_cntl = htole32(len|flags);
878 1.6 martin flags &= ~DDESC_CNTL_TXFIRST;
879 1.1 martin
880 1.1 martin /*
881 1.1 martin * Defer passing ownership of the first descriptor
882 1.23 joerg * until we are done.
883 1.1 martin */
884 1.6 martin if (i)
885 1.6 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
886 1.8 martin
887 1.6 martin sc->sc_txq.t_queued++;
888 1.8 martin sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
889 1.1 martin }
890 1.1 martin
891 1.6 martin /* Pass first to device */
892 1.6 martin sc->sc_txq.t_desc[first].ddesc_status
893 1.6 martin = htole32(DDESC_STATUS_OWNEDBYDEV);
894 1.1 martin
895 1.1 martin data->td_m = m0;
896 1.1 martin data->td_active = map;
897 1.1 martin
898 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
899 1.1 martin BUS_DMASYNC_PREWRITE);
900 1.1 martin
901 1.1 martin return 0;
902 1.1 martin }
903 1.1 martin
904 1.22 martin /*
905 1.22 martin * If the interface is up and running, only modify the receive
906 1.22 martin * filter when setting promiscuous or debug mode. Otherwise fall
907 1.22 martin * through to ether_ioctl, which will reset the chip.
908 1.22 martin */
909 1.22 martin static int
910 1.22 martin dwc_gmac_ifflags_cb(struct ethercom *ec)
911 1.22 martin {
912 1.22 martin struct ifnet *ifp = &ec->ec_if;
913 1.22 martin struct dwc_gmac_softc *sc = ifp->if_softc;
914 1.22 martin int change = ifp->if_flags ^ sc->sc_if_flags;
915 1.22 martin
916 1.22 martin if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
917 1.22 martin return ENETRESET;
918 1.22 martin if ((change & IFF_PROMISC) != 0)
919 1.22 martin dwc_gmac_setmulti(sc);
920 1.22 martin return 0;
921 1.22 martin }
922 1.22 martin
923 1.1 martin static int
924 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
925 1.1 martin {
926 1.20 jmcneill struct dwc_gmac_softc *sc = ifp->if_softc;
927 1.1 martin int s, error = 0;
928 1.1 martin
929 1.1 martin s = splnet();
930 1.1 martin
931 1.22 martin if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
932 1.1 martin error = 0;
933 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
934 1.1 martin ;
935 1.22 martin else if (ifp->if_flags & IFF_RUNNING) {
936 1.22 martin /*
937 1.22 martin * Multicast list has changed; set the hardware filter
938 1.22 martin * accordingly.
939 1.22 martin */
940 1.20 jmcneill dwc_gmac_setmulti(sc);
941 1.22 martin }
942 1.1 martin }
943 1.1 martin
944 1.22 martin /* Try to get things going again */
945 1.22 martin if (ifp->if_flags & IFF_UP)
946 1.22 martin dwc_gmac_start(ifp);
947 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
948 1.1 martin splx(s);
949 1.1 martin return error;
950 1.1 martin }
951 1.1 martin
952 1.8 martin static void
953 1.8 martin dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
954 1.8 martin {
955 1.8 martin struct dwc_gmac_tx_data *data;
956 1.8 martin struct dwc_gmac_dev_dmadesc *desc;
957 1.8 martin uint32_t flags;
958 1.8 martin int i;
959 1.8 martin
960 1.8 martin for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
961 1.8 martin i = TX_NEXT(i), sc->sc_txq.t_queued--) {
962 1.8 martin
963 1.8 martin #ifdef DWC_GMAC_DEBUG
964 1.8 martin aprint_normal_dev(sc->sc_dev,
965 1.8 martin "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
966 1.8 martin i, sc->sc_txq.t_queued);
967 1.8 martin #endif
968 1.8 martin
969 1.8 martin desc = &sc->sc_txq.t_desc[i];
970 1.8 martin dwc_gmac_txdesc_sync(sc, i, i+1,
971 1.8 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
972 1.8 martin flags = le32toh(desc->ddesc_status);
973 1.11 martin
974 1.8 martin if (flags & DDESC_STATUS_OWNEDBYDEV)
975 1.8 martin break;
976 1.11 martin
977 1.8 martin data = &sc->sc_txq.t_data[i];
978 1.8 martin if (data->td_m == NULL)
979 1.8 martin continue;
980 1.8 martin sc->sc_ec.ec_if.if_opackets++;
981 1.8 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
982 1.8 martin data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
983 1.8 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
984 1.8 martin
985 1.8 martin #ifdef DWC_GMAC_DEBUG
986 1.8 martin aprint_normal_dev(sc->sc_dev,
987 1.8 martin "dwc_gmac_tx_intr: done with packet at desc #%d, "
988 1.8 martin "freeing mbuf %p\n", i, data->td_m);
989 1.8 martin #endif
990 1.8 martin
991 1.8 martin m_freem(data->td_m);
992 1.8 martin data->td_m = NULL;
993 1.8 martin }
994 1.8 martin
995 1.8 martin sc->sc_txq.t_next = i;
996 1.8 martin
997 1.8 martin if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
998 1.8 martin sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
999 1.8 martin }
1000 1.8 martin }
1001 1.8 martin
1002 1.8 martin static void
1003 1.8 martin dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1004 1.8 martin {
1005 1.11 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
1006 1.11 martin struct dwc_gmac_dev_dmadesc *desc;
1007 1.11 martin struct dwc_gmac_rx_data *data;
1008 1.11 martin bus_addr_t physaddr;
1009 1.11 martin uint32_t status;
1010 1.11 martin struct mbuf *m, *mnew;
1011 1.11 martin int i, len, error;
1012 1.11 martin
1013 1.11 martin for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1014 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1015 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1016 1.11 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1017 1.11 martin desc = &sc->sc_rxq.r_desc[i];
1018 1.11 martin data = &sc->sc_rxq.r_data[i];
1019 1.11 martin
1020 1.11 martin status = le32toh(desc->ddesc_status);
1021 1.15 martin if (status & DDESC_STATUS_OWNEDBYDEV)
1022 1.11 martin break;
1023 1.11 martin
1024 1.11 martin if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1025 1.11 martin #ifdef DWC_GMAC_DEBUG
1026 1.15 martin aprint_normal_dev(sc->sc_dev,
1027 1.15 martin "RX error: descriptor status %08x, skipping\n",
1028 1.15 martin status);
1029 1.11 martin #endif
1030 1.11 martin ifp->if_ierrors++;
1031 1.11 martin goto skip;
1032 1.11 martin }
1033 1.11 martin
1034 1.11 martin len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1035 1.11 martin
1036 1.11 martin #ifdef DWC_GMAC_DEBUG
1037 1.15 martin aprint_normal_dev(sc->sc_dev,
1038 1.15 martin "rx int: device is done with descriptor #%d, len: %d\n",
1039 1.15 martin i, len);
1040 1.11 martin #endif
1041 1.11 martin
1042 1.11 martin /*
1043 1.11 martin * Try to get a new mbuf before passing this one
1044 1.11 martin * up, if that fails, drop the packet and reuse
1045 1.11 martin * the existing one.
1046 1.11 martin */
1047 1.11 martin MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1048 1.11 martin if (mnew == NULL) {
1049 1.11 martin ifp->if_ierrors++;
1050 1.11 martin goto skip;
1051 1.11 martin }
1052 1.11 martin MCLGET(mnew, M_DONTWAIT);
1053 1.11 martin if ((mnew->m_flags & M_EXT) == 0) {
1054 1.11 martin m_freem(mnew);
1055 1.11 martin ifp->if_ierrors++;
1056 1.11 martin goto skip;
1057 1.11 martin }
1058 1.11 martin
1059 1.11 martin /* unload old DMA map */
1060 1.11 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1061 1.11 martin data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1062 1.11 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1063 1.11 martin
1064 1.11 martin /* and reload with new mbuf */
1065 1.11 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1066 1.11 martin mtod(mnew, void*), MCLBYTES, NULL,
1067 1.11 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
1068 1.11 martin if (error != 0) {
1069 1.11 martin m_freem(mnew);
1070 1.11 martin /* try to reload old mbuf */
1071 1.11 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1072 1.11 martin mtod(data->rd_m, void*), MCLBYTES, NULL,
1073 1.11 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
1074 1.11 martin if (error != 0) {
1075 1.11 martin panic("%s: could not load old rx mbuf",
1076 1.11 martin device_xname(sc->sc_dev));
1077 1.11 martin }
1078 1.11 martin ifp->if_ierrors++;
1079 1.11 martin goto skip;
1080 1.11 martin }
1081 1.11 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
1082 1.11 martin
1083 1.11 martin /*
1084 1.11 martin * New mbuf loaded, update RX ring and continue
1085 1.11 martin */
1086 1.11 martin m = data->rd_m;
1087 1.11 martin data->rd_m = mnew;
1088 1.11 martin desc->ddesc_data = htole32(physaddr);
1089 1.11 martin
1090 1.11 martin /* finalize mbuf */
1091 1.11 martin m->m_pkthdr.len = m->m_len = len;
1092 1.11 martin m->m_pkthdr.rcvif = ifp;
1093 1.19 matt m->m_flags |= M_HASFCS;
1094 1.11 martin
1095 1.11 martin bpf_mtap(ifp, m);
1096 1.11 martin ifp->if_ipackets++;
1097 1.11 martin (*ifp->if_input)(ifp, m);
1098 1.11 martin
1099 1.11 martin skip:
1100 1.11 martin desc->ddesc_cntl = htole32(
1101 1.16 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1102 1.16 martin DDESC_CNTL_RXCHAIN);
1103 1.11 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1104 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1105 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1106 1.11 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1107 1.11 martin }
1108 1.11 martin
1109 1.11 martin /* update RX pointer */
1110 1.11 martin sc->sc_rxq.r_cur = i;
1111 1.11 martin
1112 1.8 martin }
1113 1.8 martin
1114 1.22 martin /*
1115 1.22 martin * Revers order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1116 1.22 martin */
1117 1.22 martin static uint32_t
1118 1.22 martin bitrev32(uint32_t x)
1119 1.22 martin {
1120 1.22 martin x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1121 1.22 martin x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1122 1.22 martin x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1123 1.22 martin x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1124 1.22 martin
1125 1.22 martin return (x >> 16) | (x << 16);
1126 1.22 martin }
1127 1.22 martin
1128 1.20 jmcneill static void
1129 1.20 jmcneill dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1130 1.20 jmcneill {
1131 1.20 jmcneill struct ifnet * const ifp = &sc->sc_ec.ec_if;
1132 1.20 jmcneill struct ether_multi *enm;
1133 1.20 jmcneill struct ether_multistep step;
1134 1.20 jmcneill uint32_t hashes[2] = { 0, 0 };
1135 1.22 martin uint32_t ffilt, h;
1136 1.22 martin int mcnt, s;
1137 1.22 martin
1138 1.22 martin s = splnet();
1139 1.20 jmcneill
1140 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1141 1.20 jmcneill
1142 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
1143 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1144 1.22 martin goto special_filter;
1145 1.20 jmcneill }
1146 1.20 jmcneill
1147 1.20 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
1148 1.22 martin ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1149 1.20 jmcneill
1150 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1151 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1152 1.20 jmcneill
1153 1.20 jmcneill ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1154 1.20 jmcneill mcnt = 0;
1155 1.20 jmcneill while (enm != NULL) {
1156 1.20 jmcneill if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1157 1.22 martin ETHER_ADDR_LEN) != 0) {
1158 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1159 1.22 martin ifp->if_flags |= IFF_ALLMULTI;
1160 1.22 martin goto special_filter;
1161 1.22 martin }
1162 1.20 jmcneill
1163 1.22 martin h = bitrev32(
1164 1.22 martin ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1165 1.22 martin ) >> 26;
1166 1.20 jmcneill hashes[h >> 5] |= (1 << (h & 0x1f));
1167 1.20 jmcneill
1168 1.20 jmcneill mcnt++;
1169 1.20 jmcneill ETHER_NEXT_MULTI(step, enm);
1170 1.20 jmcneill }
1171 1.20 jmcneill
1172 1.20 jmcneill if (mcnt)
1173 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1174 1.20 jmcneill else
1175 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1176 1.20 jmcneill
1177 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1178 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1179 1.20 jmcneill hashes[0]);
1180 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1181 1.20 jmcneill hashes[1]);
1182 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1183 1.22 martin
1184 1.22 martin splx(s);
1185 1.22 martin
1186 1.22 martin #ifdef DWC_GMAC_DEBUG
1187 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1188 1.22 martin #endif
1189 1.22 martin return;
1190 1.22 martin
1191 1.22 martin special_filter:
1192 1.22 martin #ifdef DWC_GMAC_DEBUG
1193 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1194 1.22 martin #endif
1195 1.22 martin /* no MAC hashes, ALLMULTI or PROMISC */
1196 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1197 1.22 martin ffilt);
1198 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1199 1.22 martin 0xffffffff);
1200 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1201 1.22 martin 0xffffffff);
1202 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1203 1.22 martin splx(s);
1204 1.20 jmcneill }
1205 1.20 jmcneill
1206 1.1 martin int
1207 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
1208 1.1 martin {
1209 1.1 martin uint32_t status, dma_status;
1210 1.8 martin int rv = 0;
1211 1.1 martin
1212 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1213 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
1214 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1215 1.1 martin AWIN_GMAC_MII_STATUS);
1216 1.8 martin rv = 1;
1217 1.2 martin mii_pollstat(&sc->sc_mii);
1218 1.2 martin }
1219 1.1 martin
1220 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1221 1.1 martin AWIN_GMAC_DMA_STATUS);
1222 1.1 martin
1223 1.8 martin if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1224 1.8 martin rv = 1;
1225 1.1 martin
1226 1.8 martin if (dma_status & GMAC_DMA_INT_TIE)
1227 1.8 martin dwc_gmac_tx_intr(sc);
1228 1.1 martin
1229 1.8 martin if (dma_status & GMAC_DMA_INT_RIE)
1230 1.8 martin dwc_gmac_rx_intr(sc);
1231 1.8 martin
1232 1.8 martin /*
1233 1.8 martin * Check error conditions
1234 1.8 martin */
1235 1.8 martin if (dma_status & GMAC_DMA_INT_ERRORS) {
1236 1.8 martin sc->sc_ec.ec_if.if_oerrors++;
1237 1.8 martin #ifdef DWC_GMAC_DEBUG
1238 1.8 martin dwc_dump_and_abort(sc, "interrupt error condition");
1239 1.8 martin #endif
1240 1.8 martin }
1241 1.8 martin
1242 1.8 martin /* ack interrupt */
1243 1.8 martin if (dma_status)
1244 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1245 1.8 martin AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1246 1.8 martin
1247 1.8 martin return rv;
1248 1.1 martin }
1249 1.7 martin
1250 1.7 martin #ifdef DWC_GMAC_DEBUG
1251 1.7 martin static void
1252 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1253 1.7 martin {
1254 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1255 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1256 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1257 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1258 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1259 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1260 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1261 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1262 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1263 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1264 1.7 martin aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1265 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1266 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1267 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1268 1.7 martin aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1269 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1270 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1271 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1272 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1273 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1274 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1275 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1276 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1277 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1278 1.7 martin }
1279 1.7 martin
1280 1.7 martin static void
1281 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1282 1.7 martin {
1283 1.7 martin int i;
1284 1.7 martin
1285 1.8 martin aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1286 1.8 martin sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1287 1.8 martin aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1288 1.7 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1289 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1290 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1291 1.15 martin "data: %08x next: %08x\n",
1292 1.15 martin i, sc->sc_txq.t_physaddr +
1293 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1294 1.7 martin le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1295 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1296 1.7 martin }
1297 1.7 martin }
1298 1.8 martin
1299 1.8 martin static void
1300 1.11 martin dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1301 1.11 martin {
1302 1.11 martin int i;
1303 1.11 martin
1304 1.11 martin aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1305 1.11 martin sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1306 1.11 martin aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1307 1.11 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1308 1.11 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1309 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1310 1.15 martin "data: %08x next: %08x\n",
1311 1.15 martin i, sc->sc_rxq.r_physaddr +
1312 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1313 1.11 martin le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1314 1.11 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1315 1.11 martin }
1316 1.11 martin }
1317 1.11 martin
1318 1.11 martin static void
1319 1.10 martin dwc_dump_status(struct dwc_gmac_softc *sc)
1320 1.8 martin {
1321 1.8 martin uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1322 1.8 martin AWIN_GMAC_MAC_INTR);
1323 1.8 martin uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1324 1.8 martin AWIN_GMAC_DMA_STATUS);
1325 1.8 martin char buf[200];
1326 1.8 martin
1327 1.8 martin /* print interrupt state */
1328 1.8 martin snprintb(buf, sizeof(buf), "\177\20"
1329 1.10 martin "b\x10""NI\0"
1330 1.10 martin "b\x0f""AI\0"
1331 1.10 martin "b\x0e""ER\0"
1332 1.10 martin "b\x0d""FB\0"
1333 1.10 martin "b\x0a""ET\0"
1334 1.10 martin "b\x09""RW\0"
1335 1.10 martin "b\x08""RS\0"
1336 1.10 martin "b\x07""RU\0"
1337 1.10 martin "b\x06""RI\0"
1338 1.10 martin "b\x05""UN\0"
1339 1.10 martin "b\x04""OV\0"
1340 1.10 martin "b\x03""TJ\0"
1341 1.10 martin "b\x02""TU\0"
1342 1.10 martin "b\x01""TS\0"
1343 1.10 martin "b\x00""TI\0"
1344 1.8 martin "\0", dma_status);
1345 1.10 martin aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1346 1.8 martin status, buf);
1347 1.10 martin }
1348 1.8 martin
1349 1.10 martin static void
1350 1.10 martin dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1351 1.10 martin {
1352 1.10 martin dwc_dump_status(sc);
1353 1.22 martin dwc_gmac_dump_ffilt(sc,
1354 1.22 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1355 1.8 martin dwc_gmac_dump_dma(sc);
1356 1.8 martin dwc_gmac_dump_tx_desc(sc);
1357 1.11 martin dwc_gmac_dump_rx_desc(sc);
1358 1.8 martin
1359 1.21 joerg panic("%s", msg);
1360 1.8 martin }
1361 1.22 martin
1362 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1363 1.22 martin {
1364 1.22 martin char buf[200];
1365 1.22 martin
1366 1.22 martin /* print filter setup */
1367 1.22 martin snprintb(buf, sizeof(buf), "\177\20"
1368 1.22 martin "b\x1f""RA\0"
1369 1.22 martin "b\x0a""HPF\0"
1370 1.22 martin "b\x09""SAF\0"
1371 1.22 martin "b\x08""SAIF\0"
1372 1.22 martin "b\x05""DBF\0"
1373 1.22 martin "b\x04""PM\0"
1374 1.22 martin "b\x03""DAIF\0"
1375 1.22 martin "b\x02""HMC\0"
1376 1.22 martin "b\x01""HUC\0"
1377 1.22 martin "b\x00""PR\0"
1378 1.22 martin "\0", ffilt);
1379 1.22 martin aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1380 1.22 martin }
1381 1.7 martin #endif
1382