dwc_gmac.c revision 1.53 1 /* $NetBSD: dwc_gmac.c,v 1.53 2018/09/17 20:25:49 aymeric Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.53 2018/09/17 20:25:49 aymeric Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t x);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static int dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static int dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 .tx_set_len = dwc_gmac_desc_std_set_len,
125 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 .rx_set_len = dwc_gmac_desc_std_set_len,
131 .rx_get_len = dwc_gmac_desc_std_get_len,
132 .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 .tx_set_len = dwc_gmac_desc_enh_set_len,
140 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 .rx_set_len = dwc_gmac_desc_enh_set_len,
146 .rx_get_len = dwc_gmac_desc_enh_get_len,
147 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149
150
151 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154
155 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157
158
159
160 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
161 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
162 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
163
164 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
165 GMAC_DMA_INT_FBE| \
166 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
167 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
168 GMAC_DMA_INT_TJE)
169
170 #define AWIN_DEF_MAC_INTRMASK \
171 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 AWIN_GMAC_MAC_INT_LINKCHG)
173
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
179 static void dwc_dump_status(struct dwc_gmac_softc *sc);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
181 #endif
182
183 #ifdef NET_MPSAFE
184 #define DWCGMAC_MPSAFE 1
185 #endif
186
187 int
188 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
189 {
190 uint8_t enaddr[ETHER_ADDR_LEN];
191 uint32_t maclo, machi;
192 struct mii_data * const mii = &sc->sc_mii;
193 struct ifnet * const ifp = &sc->sc_ec.ec_if;
194 prop_dictionary_t dict;
195 int rv;
196
197 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
198 sc->sc_mii_clk = mii_clk & 7;
199
200 dict = device_properties(sc->sc_dev);
201 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
202 if (ea != NULL) {
203 /*
204 * If the MAC address is overriden by a device property,
205 * use that.
206 */
207 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
208 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
209 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
210 } else {
211 /*
212 * If we did not get an externaly configure address,
213 * try to read one from the current filter setup,
214 * before resetting the chip.
215 */
216 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
217 AWIN_GMAC_MAC_ADDR0LO);
218 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
219 AWIN_GMAC_MAC_ADDR0HI);
220
221 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
222 /* fake MAC address */
223 maclo = 0x00f2 | (cprng_strong32() << 16);
224 machi = cprng_strong32();
225 }
226
227 enaddr[0] = maclo & 0x0ff;
228 enaddr[1] = (maclo >> 8) & 0x0ff;
229 enaddr[2] = (maclo >> 16) & 0x0ff;
230 enaddr[3] = (maclo >> 24) & 0x0ff;
231 enaddr[4] = machi & 0x0ff;
232 enaddr[5] = (machi >> 8) & 0x0ff;
233 }
234
235 /*
236 * Init chip and do initial setup
237 */
238 if (dwc_gmac_reset(sc) != 0)
239 return ENXIO; /* not much to cleanup, haven't attached yet */
240 dwc_gmac_write_hwaddr(sc, enaddr);
241 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
242 ether_sprintf(enaddr));
243
244 if (bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_HWFEATURES) &
245 GMAC_DMA_FEAT_ENHANCED_DESC)
246 sc->sc_descm = &desc_methods_enhanced;
247 else
248 sc->sc_descm = &desc_methods_standard;
249
250 /*
251 * Allocate Tx and Rx rings
252 */
253 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
254 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
255 goto fail;
256 }
257
258 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
259 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
260 goto fail;
261 }
262
263 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
264 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
265 goto fail;
266 }
267
268 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
269 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
270 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
271
272 /*
273 * Prepare interface data
274 */
275 ifp->if_softc = sc;
276 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
277 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 #ifdef DWCGMAC_MPSAFE
279 ifp->if_extflags = IFEF_MPSAFE;
280 #endif
281 ifp->if_ioctl = dwc_gmac_ioctl;
282 ifp->if_start = dwc_gmac_start;
283 ifp->if_init = dwc_gmac_init;
284 ifp->if_stop = dwc_gmac_stop;
285 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
286 IFQ_SET_READY(&ifp->if_snd);
287
288 /*
289 * Attach MII subdevices
290 */
291 sc->sc_ec.ec_mii = &sc->sc_mii;
292 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
293 mii->mii_ifp = ifp;
294 mii->mii_readreg = dwc_gmac_miibus_read_reg;
295 mii->mii_writereg = dwc_gmac_miibus_write_reg;
296 mii->mii_statchg = dwc_gmac_miibus_statchg;
297 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
298 MIIF_DOPAUSE);
299
300 if (LIST_EMPTY(&mii->mii_phys)) {
301 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
302 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
303 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
304 } else {
305 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
306 }
307
308 /*
309 * We can support 802.1Q VLAN-sized frames.
310 */
311 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
312
313 /*
314 * Ready, attach interface
315 */
316 /* Attach the interface. */
317 rv = if_initialize(ifp);
318 if (rv != 0)
319 goto fail_2;
320 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
321 if_deferred_start_init(ifp, NULL);
322 ether_ifattach(ifp, enaddr);
323 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
324 if_register(ifp);
325
326 /*
327 * Enable interrupts
328 */
329 mutex_enter(sc->sc_lock);
330 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
331 AWIN_DEF_MAC_INTRMASK);
332 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
333 GMAC_DEF_DMA_INT_MASK);
334 mutex_exit(sc->sc_lock);
335
336 return 0;
337
338 fail_2:
339 ifmedia_removeall(&mii->mii_media);
340 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
341 mutex_destroy(&sc->sc_txq.t_mtx);
342 mutex_destroy(&sc->sc_rxq.r_mtx);
343 mutex_obj_free(sc->sc_lock);
344 fail:
345 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
346 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
347 dwc_gmac_free_dma_rings(sc);
348 mutex_destroy(&sc->sc_mdio_lock);
349
350 return ENXIO;
351 }
352
353
354
355 static int
356 dwc_gmac_reset(struct dwc_gmac_softc *sc)
357 {
358 size_t cnt;
359 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
360 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
361 for (cnt = 0; cnt < 3000; cnt++) {
362 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
363 & GMAC_BUSMODE_RESET) == 0)
364 return 0;
365 delay(10);
366 }
367
368 aprint_error_dev(sc->sc_dev, "reset timed out\n");
369 return EIO;
370 }
371
372 static void
373 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
374 uint8_t enaddr[ETHER_ADDR_LEN])
375 {
376 uint32_t hi, lo;
377
378 hi = enaddr[4] | (enaddr[5] << 8);
379 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
380 | (enaddr[3] << 24);
381 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
382 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
383 }
384
385 static int
386 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
387 {
388 struct dwc_gmac_softc * const sc = device_private(self);
389 uint16_t mii;
390 size_t cnt;
391 int rv = 0;
392
393 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
394 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
395 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
396 | GMAC_MII_BUSY;
397
398 mutex_enter(&sc->sc_mdio_lock);
399 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
400
401 for (cnt = 0; cnt < 1000; cnt++) {
402 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
403 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
404 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
405 AWIN_GMAC_MAC_MIIDATA);
406 break;
407 }
408 delay(10);
409 }
410
411 mutex_exit(&sc->sc_mdio_lock);
412
413 return rv;
414 }
415
416 static void
417 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
418 {
419 struct dwc_gmac_softc * const sc = device_private(self);
420 uint16_t mii;
421 size_t cnt;
422
423 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
424 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
425 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
426 | GMAC_MII_BUSY | GMAC_MII_WRITE;
427
428 mutex_enter(&sc->sc_mdio_lock);
429 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
430 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
431
432 for (cnt = 0; cnt < 1000; cnt++) {
433 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
434 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
435 break;
436 delay(10);
437 }
438
439 mutex_exit(&sc->sc_mdio_lock);
440 }
441
442 static int
443 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
444 struct dwc_gmac_rx_ring *ring)
445 {
446 struct dwc_gmac_rx_data *data;
447 bus_addr_t physaddr;
448 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
449 int error, i, next;
450
451 ring->r_cur = ring->r_next = 0;
452 memset(ring->r_desc, 0, descsize);
453
454 /*
455 * Pre-allocate Rx buffers and populate Rx ring.
456 */
457 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
458 struct dwc_gmac_dev_dmadesc *desc;
459
460 data = &sc->sc_rxq.r_data[i];
461
462 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
463 if (data->rd_m == NULL) {
464 aprint_error_dev(sc->sc_dev,
465 "could not allocate rx mbuf #%d\n", i);
466 error = ENOMEM;
467 goto fail;
468 }
469 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
470 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
471 if (error != 0) {
472 aprint_error_dev(sc->sc_dev,
473 "could not create DMA map\n");
474 data->rd_map = NULL;
475 goto fail;
476 }
477 MCLGET(data->rd_m, M_DONTWAIT);
478 if (!(data->rd_m->m_flags & M_EXT)) {
479 aprint_error_dev(sc->sc_dev,
480 "could not allocate mbuf cluster #%d\n", i);
481 error = ENOMEM;
482 goto fail;
483 }
484
485 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
486 mtod(data->rd_m, void *), MCLBYTES, NULL,
487 BUS_DMA_READ | BUS_DMA_NOWAIT);
488 if (error != 0) {
489 aprint_error_dev(sc->sc_dev,
490 "could not load rx buf DMA map #%d", i);
491 goto fail;
492 }
493 physaddr = data->rd_map->dm_segs[0].ds_addr;
494
495 desc = &sc->sc_rxq.r_desc[i];
496 desc->ddesc_data = htole32(physaddr);
497 next = RX_NEXT(i);
498 desc->ddesc_next = htole32(ring->r_physaddr
499 + next * sizeof(*desc));
500 sc->sc_descm->rx_init_flags(desc);
501 sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
502 sc->sc_descm->rx_set_owned_by_dev(desc);
503 }
504
505 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
506 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
507 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
508 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
509 ring->r_physaddr);
510
511 return 0;
512
513 fail:
514 dwc_gmac_free_rx_ring(sc, ring);
515 return error;
516 }
517
518 static void
519 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
520 struct dwc_gmac_rx_ring *ring)
521 {
522 struct dwc_gmac_dev_dmadesc *desc;
523 int i;
524
525 mutex_enter(&ring->r_mtx);
526 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
527 desc = &sc->sc_rxq.r_desc[i];
528 sc->sc_descm->rx_init_flags(desc);
529 sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
530 sc->sc_descm->rx_set_owned_by_dev(desc);
531 }
532
533 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
534 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
535 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
536
537 ring->r_cur = ring->r_next = 0;
538 /* reset DMA address to start of ring */
539 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
540 sc->sc_rxq.r_physaddr);
541 mutex_exit(&ring->r_mtx);
542 }
543
544 static int
545 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
546 {
547 const size_t descsize = AWGE_TOTAL_RING_COUNT *
548 sizeof(struct dwc_gmac_dev_dmadesc);
549 int error, nsegs;
550 void *rings;
551
552 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
553 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
554 if (error != 0) {
555 aprint_error_dev(sc->sc_dev,
556 "could not create desc DMA map\n");
557 sc->sc_dma_ring_map = NULL;
558 goto fail;
559 }
560
561 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
562 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
563 if (error != 0) {
564 aprint_error_dev(sc->sc_dev,
565 "could not map DMA memory\n");
566 goto fail;
567 }
568
569 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
570 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
571 if (error != 0) {
572 aprint_error_dev(sc->sc_dev,
573 "could not allocate DMA memory\n");
574 goto fail;
575 }
576
577 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
578 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
579 if (error != 0) {
580 aprint_error_dev(sc->sc_dev,
581 "could not load desc DMA map\n");
582 goto fail;
583 }
584
585 /* give first AWGE_RX_RING_COUNT to the RX side */
586 sc->sc_rxq.r_desc = rings;
587 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
588
589 /* and next rings to the TX side */
590 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
591 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
592 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
593
594 return 0;
595
596 fail:
597 dwc_gmac_free_dma_rings(sc);
598 return error;
599 }
600
601 static void
602 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
603 {
604 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
605 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
606 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
607 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
608 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
609 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
610 }
611
612 static void
613 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
614 {
615 struct dwc_gmac_rx_data *data;
616 int i;
617
618 if (ring->r_desc == NULL)
619 return;
620
621
622 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
623 data = &ring->r_data[i];
624
625 if (data->rd_map != NULL) {
626 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
627 AWGE_RX_RING_COUNT
628 *sizeof(struct dwc_gmac_dev_dmadesc),
629 BUS_DMASYNC_POSTREAD);
630 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
631 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
632 }
633 if (data->rd_m != NULL)
634 m_freem(data->rd_m);
635 }
636 }
637
638 static int
639 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
640 struct dwc_gmac_tx_ring *ring)
641 {
642 int i, error = 0;
643
644 ring->t_queued = 0;
645 ring->t_cur = ring->t_next = 0;
646
647 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
648 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
649 TX_DESC_OFFSET(0),
650 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
651 BUS_DMASYNC_POSTWRITE);
652
653 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
654 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
655 AWGE_TX_RING_COUNT, MCLBYTES, 0,
656 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
657 &ring->t_data[i].td_map);
658 if (error != 0) {
659 aprint_error_dev(sc->sc_dev,
660 "could not create TX DMA map #%d\n", i);
661 ring->t_data[i].td_map = NULL;
662 goto fail;
663 }
664 ring->t_desc[i].ddesc_next = htole32(
665 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
666 *TX_NEXT(i));
667 }
668
669 return 0;
670
671 fail:
672 dwc_gmac_free_tx_ring(sc, ring);
673 return error;
674 }
675
676 static void
677 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
678 {
679 /* 'end' is pointing one descriptor beyound the last we want to sync */
680 if (end > start) {
681 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
682 TX_DESC_OFFSET(start),
683 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
684 ops);
685 return;
686 }
687 /* sync from 'start' to end of ring */
688 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
689 TX_DESC_OFFSET(start),
690 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
691 ops);
692 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
693 /* sync from start of ring to 'end' */
694 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
695 TX_DESC_OFFSET(0),
696 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
697 ops);
698 }
699 }
700
701 static void
702 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
703 struct dwc_gmac_tx_ring *ring)
704 {
705 int i;
706
707 mutex_enter(&ring->t_mtx);
708 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
709 struct dwc_gmac_tx_data *data = &ring->t_data[i];
710
711 if (data->td_m != NULL) {
712 bus_dmamap_sync(sc->sc_dmat, data->td_active,
713 0, data->td_active->dm_mapsize,
714 BUS_DMASYNC_POSTWRITE);
715 bus_dmamap_unload(sc->sc_dmat, data->td_active);
716 m_freem(data->td_m);
717 data->td_m = NULL;
718 }
719 }
720
721 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
722 TX_DESC_OFFSET(0),
723 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
724 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
725 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
726 sc->sc_txq.t_physaddr);
727
728 ring->t_queued = 0;
729 ring->t_cur = ring->t_next = 0;
730 mutex_exit(&ring->t_mtx);
731 }
732
733 static void
734 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
735 struct dwc_gmac_tx_ring *ring)
736 {
737 int i;
738
739 /* unload the maps */
740 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
741 struct dwc_gmac_tx_data *data = &ring->t_data[i];
742
743 if (data->td_m != NULL) {
744 bus_dmamap_sync(sc->sc_dmat, data->td_active,
745 0, data->td_map->dm_mapsize,
746 BUS_DMASYNC_POSTWRITE);
747 bus_dmamap_unload(sc->sc_dmat, data->td_active);
748 m_freem(data->td_m);
749 data->td_m = NULL;
750 }
751 }
752
753 /* and actually free them */
754 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
755 struct dwc_gmac_tx_data *data = &ring->t_data[i];
756
757 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
758 }
759 }
760
761 static void
762 dwc_gmac_miibus_statchg(struct ifnet *ifp)
763 {
764 struct dwc_gmac_softc * const sc = ifp->if_softc;
765 struct mii_data * const mii = &sc->sc_mii;
766 uint32_t conf, flow;
767
768 /*
769 * Set MII or GMII interface based on the speed
770 * negotiated by the PHY.
771 */
772 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
773 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
774 |AWIN_GMAC_MAC_CONF_FULLDPLX);
775 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
776 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
777 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
778 | AWIN_GMAC_MAC_CONF_ACS
779 | AWIN_GMAC_MAC_CONF_RXENABLE
780 | AWIN_GMAC_MAC_CONF_TXENABLE;
781 switch (IFM_SUBTYPE(mii->mii_media_active)) {
782 case IFM_10_T:
783 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
784 break;
785 case IFM_100_TX:
786 conf |= AWIN_GMAC_MAC_CONF_FES100 |
787 AWIN_GMAC_MAC_CONF_MIISEL;
788 break;
789 case IFM_1000_T:
790 break;
791 }
792 if (sc->sc_set_speed)
793 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
794
795 flow = 0;
796 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
797 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
798 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
799 }
800 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
801 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
802 }
803 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
804 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
805 }
806 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
807 AWIN_GMAC_MAC_FLOWCTRL, flow);
808
809 #ifdef DWC_GMAC_DEBUG
810 aprint_normal_dev(sc->sc_dev,
811 "setting MAC conf register: %08x\n", conf);
812 #endif
813
814 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
815 AWIN_GMAC_MAC_CONF, conf);
816 }
817
818 static int
819 dwc_gmac_init(struct ifnet *ifp)
820 {
821 struct dwc_gmac_softc *sc = ifp->if_softc;
822
823 mutex_enter(sc->sc_lock);
824 int ret = dwc_gmac_init_locked(ifp);
825 mutex_exit(sc->sc_lock);
826
827 return ret;
828 }
829
830 static int
831 dwc_gmac_init_locked(struct ifnet *ifp)
832 {
833 struct dwc_gmac_softc *sc = ifp->if_softc;
834 uint32_t ffilt;
835
836 if (ifp->if_flags & IFF_RUNNING)
837 return 0;
838
839 dwc_gmac_stop_locked(ifp, 0);
840
841 /*
842 * Configure DMA burst/transfer mode and RX/TX priorities.
843 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
844 */
845 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
846 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
847 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
848 __SHIFTIN(2, GMAC_BUSMODE_PBL));
849
850 /*
851 * Set up address filter
852 */
853 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
854 if (ifp->if_flags & IFF_PROMISC) {
855 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
856 } else {
857 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
858 }
859 if (ifp->if_flags & IFF_BROADCAST) {
860 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
861 } else {
862 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
863 }
864 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
865
866 /*
867 * Set up multicast filter
868 */
869 dwc_gmac_setmulti(sc);
870
871 /*
872 * Set up dma pointer for RX and TX ring
873 */
874 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
875 sc->sc_rxq.r_physaddr);
876 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
877 sc->sc_txq.t_physaddr);
878
879 /*
880 * Start RX/TX part
881 */
882 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
883 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
884 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
885 }
886 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
887
888 sc->sc_stopping = false;
889
890 ifp->if_flags |= IFF_RUNNING;
891 ifp->if_flags &= ~IFF_OACTIVE;
892
893 return 0;
894 }
895
896 static void
897 dwc_gmac_start(struct ifnet *ifp)
898 {
899 struct dwc_gmac_softc *sc = ifp->if_softc;
900 #ifdef DWCGMAC_MPSAFE
901 KASSERT(if_is_mpsafe(ifp));
902 #endif
903
904 mutex_enter(sc->sc_lock);
905 if (!sc->sc_stopping) {
906 mutex_enter(&sc->sc_txq.t_mtx);
907 dwc_gmac_start_locked(ifp);
908 mutex_exit(&sc->sc_txq.t_mtx);
909 }
910 mutex_exit(sc->sc_lock);
911 }
912
913 static void
914 dwc_gmac_start_locked(struct ifnet *ifp)
915 {
916 struct dwc_gmac_softc *sc = ifp->if_softc;
917 int old = sc->sc_txq.t_queued;
918 int start = sc->sc_txq.t_cur;
919 struct mbuf *m0;
920
921 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
922 return;
923
924 for (;;) {
925 IFQ_POLL(&ifp->if_snd, m0);
926 if (m0 == NULL)
927 break;
928 if (dwc_gmac_queue(sc, m0) != 0) {
929 ifp->if_flags |= IFF_OACTIVE;
930 break;
931 }
932 IFQ_DEQUEUE(&ifp->if_snd, m0);
933 bpf_mtap(ifp, m0, BPF_D_OUT);
934 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
935 ifp->if_flags |= IFF_OACTIVE;
936 break;
937 }
938 }
939
940 if (sc->sc_txq.t_queued != old) {
941 /* packets have been queued, kick it off */
942 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
943 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
944
945 #ifdef DWC_GMAC_DEBUG
946 dwc_dump_status(sc);
947 #endif
948 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
949 AWIN_GMAC_DMA_TXPOLL, ~0U);
950 }
951 }
952
953 static void
954 dwc_gmac_stop(struct ifnet *ifp, int disable)
955 {
956 struct dwc_gmac_softc *sc = ifp->if_softc;
957
958 mutex_enter(sc->sc_lock);
959 dwc_gmac_stop_locked(ifp, disable);
960 mutex_exit(sc->sc_lock);
961 }
962
963 static void
964 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
965 {
966 struct dwc_gmac_softc *sc = ifp->if_softc;
967
968 sc->sc_stopping = true;
969
970 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
971 AWIN_GMAC_DMA_OPMODE,
972 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
973 AWIN_GMAC_DMA_OPMODE)
974 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
975 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
976 AWIN_GMAC_DMA_OPMODE,
977 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
978 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
979
980 mii_down(&sc->sc_mii);
981 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
982 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
983
984 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
985 }
986
987 /*
988 * Add m0 to the TX ring
989 */
990 static int
991 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
992 {
993 struct dwc_gmac_dev_dmadesc *desc = NULL;
994 struct dwc_gmac_tx_data *data = NULL;
995 bus_dmamap_t map;
996 int error, i, first;
997
998 #ifdef DWC_GMAC_DEBUG
999 aprint_normal_dev(sc->sc_dev,
1000 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1001 #endif
1002
1003 first = sc->sc_txq.t_cur;
1004 map = sc->sc_txq.t_data[first].td_map;
1005
1006 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1007 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1008 if (error != 0) {
1009 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1010 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1011 return error;
1012 }
1013
1014 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1015 bus_dmamap_unload(sc->sc_dmat, map);
1016 return ENOBUFS;
1017 }
1018
1019 for (i = 0; i < map->dm_nsegs; i++) {
1020 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1021 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1022
1023 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1024
1025 #ifdef DWC_GMAC_DEBUG
1026 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1027 "len %lu\n", sc->sc_txq.t_cur,
1028 (unsigned long)map->dm_segs[i].ds_addr,
1029 (unsigned long)map->dm_segs[i].ds_len);
1030 #endif
1031
1032 sc->sc_descm->tx_init_flags(desc);
1033 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1034
1035 if (i == 0)
1036 sc->sc_descm->tx_set_first_frag(desc);
1037
1038 /*
1039 * Defer passing ownership of the first descriptor
1040 * until we are done.
1041 */
1042 if (i != 0)
1043 sc->sc_descm->tx_set_owned_by_dev(desc);
1044
1045 sc->sc_txq.t_queued++;
1046 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1047 }
1048
1049 sc->sc_descm->tx_set_last_frag(desc);
1050
1051 data->td_m = m0;
1052 data->td_active = map;
1053
1054 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1055 BUS_DMASYNC_PREWRITE);
1056
1057 /* Pass first to device */
1058 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1059
1060 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1061 BUS_DMASYNC_PREWRITE);
1062
1063 return 0;
1064 }
1065
1066 /*
1067 * If the interface is up and running, only modify the receive
1068 * filter when setting promiscuous or debug mode. Otherwise fall
1069 * through to ether_ioctl, which will reset the chip.
1070 */
1071 static int
1072 dwc_gmac_ifflags_cb(struct ethercom *ec)
1073 {
1074 struct ifnet *ifp = &ec->ec_if;
1075 struct dwc_gmac_softc *sc = ifp->if_softc;
1076 int ret = 0;
1077
1078 mutex_enter(sc->sc_lock);
1079 int change = ifp->if_flags ^ sc->sc_if_flags;
1080 sc->sc_if_flags = ifp->if_flags;
1081
1082 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1083 ret = ENETRESET;
1084 goto out;
1085 }
1086 if ((change & IFF_PROMISC) != 0) {
1087 dwc_gmac_setmulti(sc);
1088 }
1089 out:
1090 mutex_exit(sc->sc_lock);
1091
1092 return ret;
1093 }
1094
1095 static int
1096 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1097 {
1098 struct dwc_gmac_softc *sc = ifp->if_softc;
1099 int error = 0;
1100
1101 int s = splnet();
1102 error = ether_ioctl(ifp, cmd, data);
1103
1104 #ifdef DWCGMAC_MPSAFE
1105 splx(s);
1106 #endif
1107
1108 if (error == ENETRESET) {
1109 error = 0;
1110 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1111 ;
1112 else if (ifp->if_flags & IFF_RUNNING) {
1113 /*
1114 * Multicast list has changed; set the hardware filter
1115 * accordingly.
1116 */
1117 mutex_enter(sc->sc_lock);
1118 dwc_gmac_setmulti(sc);
1119 mutex_exit(sc->sc_lock);
1120 }
1121 }
1122
1123 /* Try to get things going again */
1124 if (ifp->if_flags & IFF_UP)
1125 dwc_gmac_start(ifp);
1126 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1127
1128 #ifndef DWCGMAC_MPSAFE
1129 splx(s);
1130 #endif
1131
1132 return error;
1133 }
1134
1135 static void
1136 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1137 {
1138 struct ifnet *ifp = &sc->sc_ec.ec_if;
1139 struct dwc_gmac_tx_data *data;
1140 struct dwc_gmac_dev_dmadesc *desc;
1141 int i, nsegs;
1142
1143 mutex_enter(&sc->sc_txq.t_mtx);
1144
1145 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1146 #ifdef DWC_GMAC_DEBUG
1147 aprint_normal_dev(sc->sc_dev,
1148 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1149 i, sc->sc_txq.t_queued);
1150 #endif
1151
1152 /*
1153 * i+1 does not need to be a valid descriptor,
1154 * this is just a special notion to just sync
1155 * a single tx descriptor (i)
1156 */
1157 dwc_gmac_txdesc_sync(sc, i, i+1,
1158 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1159
1160 desc = &sc->sc_txq.t_desc[i];
1161 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1162 break;
1163
1164 data = &sc->sc_txq.t_data[i];
1165 if (data->td_m == NULL)
1166 continue;
1167
1168 ifp->if_opackets++;
1169 nsegs = data->td_active->dm_nsegs;
1170 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1171 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1172 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1173
1174 #ifdef DWC_GMAC_DEBUG
1175 aprint_normal_dev(sc->sc_dev,
1176 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1177 "freeing mbuf %p\n", i, data->td_m);
1178 #endif
1179
1180 m_freem(data->td_m);
1181 data->td_m = NULL;
1182
1183 sc->sc_txq.t_queued -= nsegs;
1184 }
1185
1186 sc->sc_txq.t_next = i;
1187
1188 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1189 ifp->if_flags &= ~IFF_OACTIVE;
1190 }
1191 mutex_exit(&sc->sc_txq.t_mtx);
1192 }
1193
1194 static void
1195 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1196 {
1197 struct ifnet *ifp = &sc->sc_ec.ec_if;
1198 struct dwc_gmac_dev_dmadesc *desc;
1199 struct dwc_gmac_rx_data *data;
1200 bus_addr_t physaddr;
1201 struct mbuf *m, *mnew;
1202 int i, len, error;
1203
1204 mutex_enter(&sc->sc_rxq.r_mtx);
1205 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1206 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1207 RX_DESC_OFFSET(i), sizeof(*desc),
1208 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1209 desc = &sc->sc_rxq.r_desc[i];
1210 data = &sc->sc_rxq.r_data[i];
1211
1212 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1213 break;
1214
1215 if (sc->sc_descm->rx_has_error(desc)) {
1216 #ifdef DWC_GMAC_DEBUG
1217 aprint_normal_dev(sc->sc_dev,
1218 "RX error: descriptor status %08x, skipping\n",
1219 le32toh(desc->ddesc_status0));
1220 #endif
1221 ifp->if_ierrors++;
1222 goto skip;
1223 }
1224
1225 len = sc->sc_descm->rx_get_len(desc);
1226
1227 #ifdef DWC_GMAC_DEBUG
1228 aprint_normal_dev(sc->sc_dev,
1229 "rx int: device is done with descriptor #%d, len: %d\n",
1230 i, len);
1231 #endif
1232
1233 /*
1234 * Try to get a new mbuf before passing this one
1235 * up, if that fails, drop the packet and reuse
1236 * the existing one.
1237 */
1238 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1239 if (mnew == NULL) {
1240 ifp->if_ierrors++;
1241 goto skip;
1242 }
1243 MCLGET(mnew, M_DONTWAIT);
1244 if ((mnew->m_flags & M_EXT) == 0) {
1245 m_freem(mnew);
1246 ifp->if_ierrors++;
1247 goto skip;
1248 }
1249
1250 /* unload old DMA map */
1251 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1252 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1253 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1254
1255 /* and reload with new mbuf */
1256 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1257 mtod(mnew, void*), MCLBYTES, NULL,
1258 BUS_DMA_READ | BUS_DMA_NOWAIT);
1259 if (error != 0) {
1260 m_freem(mnew);
1261 /* try to reload old mbuf */
1262 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1263 mtod(data->rd_m, void*), MCLBYTES, NULL,
1264 BUS_DMA_READ | BUS_DMA_NOWAIT);
1265 if (error != 0) {
1266 panic("%s: could not load old rx mbuf",
1267 device_xname(sc->sc_dev));
1268 }
1269 ifp->if_ierrors++;
1270 goto skip;
1271 }
1272 physaddr = data->rd_map->dm_segs[0].ds_addr;
1273
1274 /*
1275 * New mbuf loaded, update RX ring and continue
1276 */
1277 m = data->rd_m;
1278 data->rd_m = mnew;
1279 desc->ddesc_data = htole32(physaddr);
1280
1281 /* finalize mbuf */
1282 m->m_pkthdr.len = m->m_len = len;
1283 m_set_rcvif(m, ifp);
1284 m->m_flags |= M_HASFCS;
1285
1286 if_percpuq_enqueue(sc->sc_ipq, m);
1287
1288 skip:
1289 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1290 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1291
1292 sc->sc_descm->rx_init_flags(desc);
1293 sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
1294 sc->sc_descm->rx_set_owned_by_dev(desc);
1295
1296 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1297 RX_DESC_OFFSET(i), sizeof(*desc),
1298 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1299 }
1300
1301 /* update RX pointer */
1302 sc->sc_rxq.r_cur = i;
1303
1304 mutex_exit(&sc->sc_rxq.r_mtx);
1305 }
1306
1307 /*
1308 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1309 */
1310 static uint32_t
1311 bitrev32(uint32_t x)
1312 {
1313 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1314 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1315 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1316 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1317
1318 return (x >> 16) | (x << 16);
1319 }
1320
1321 static void
1322 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1323 {
1324 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1325 struct ether_multi *enm;
1326 struct ether_multistep step;
1327 uint32_t hashes[2] = { 0, 0 };
1328 uint32_t ffilt, h;
1329 int mcnt;
1330
1331 KASSERT(mutex_owned(sc->sc_lock));
1332
1333 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1334
1335 if (ifp->if_flags & IFF_PROMISC) {
1336 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1337 goto special_filter;
1338 }
1339
1340 ifp->if_flags &= ~IFF_ALLMULTI;
1341 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1342
1343 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1344 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1345
1346 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1347 mcnt = 0;
1348 while (enm != NULL) {
1349 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1350 ETHER_ADDR_LEN) != 0) {
1351 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1352 ifp->if_flags |= IFF_ALLMULTI;
1353 goto special_filter;
1354 }
1355
1356 h = bitrev32(
1357 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1358 ) >> 26;
1359 hashes[h >> 5] |= (1 << (h & 0x1f));
1360
1361 mcnt++;
1362 ETHER_NEXT_MULTI(step, enm);
1363 }
1364
1365 if (mcnt)
1366 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1367 else
1368 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1369
1370 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1371 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1372 hashes[0]);
1373 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1374 hashes[1]);
1375 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1376
1377 #ifdef DWC_GMAC_DEBUG
1378 dwc_gmac_dump_ffilt(sc, ffilt);
1379 #endif
1380 return;
1381
1382 special_filter:
1383 #ifdef DWC_GMAC_DEBUG
1384 dwc_gmac_dump_ffilt(sc, ffilt);
1385 #endif
1386 /* no MAC hashes, ALLMULTI or PROMISC */
1387 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1388 ffilt);
1389 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1390 0xffffffff);
1391 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1392 0xffffffff);
1393 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1394 }
1395
1396 int
1397 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1398 {
1399 uint32_t status, dma_status;
1400 int rv = 0;
1401
1402 if (sc->sc_stopping)
1403 return 0;
1404
1405 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1406 if (status & AWIN_GMAC_MII_IRQ) {
1407 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1408 AWIN_GMAC_MII_STATUS);
1409 rv = 1;
1410 mii_pollstat(&sc->sc_mii);
1411 }
1412
1413 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1414 AWIN_GMAC_DMA_STATUS);
1415
1416 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1417 rv = 1;
1418
1419 if (dma_status & GMAC_DMA_INT_TIE)
1420 dwc_gmac_tx_intr(sc);
1421
1422 if (dma_status & GMAC_DMA_INT_RIE)
1423 dwc_gmac_rx_intr(sc);
1424
1425 /*
1426 * Check error conditions
1427 */
1428 if (dma_status & GMAC_DMA_INT_ERRORS) {
1429 sc->sc_ec.ec_if.if_oerrors++;
1430 #ifdef DWC_GMAC_DEBUG
1431 dwc_dump_and_abort(sc, "interrupt error condition");
1432 #endif
1433 }
1434
1435 /* ack interrupt */
1436 if (dma_status)
1437 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1438 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1439
1440 /*
1441 * Get more packets
1442 */
1443 if (rv)
1444 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1445
1446 return rv;
1447 }
1448
1449 static void
1450 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc) {
1451 desc->ddesc_status0 |= DDESC_STATUS_OWNEDBYDEV;
1452 }
1453
1454 static int
1455 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc) {
1456 return !!(desc->ddesc_status0 & DDESC_STATUS_OWNEDBYDEV);
1457 }
1458
1459 static void
1460 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len) {
1461 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1462
1463 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1464 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1465 }
1466
1467 static int
1468 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc) {
1469 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1470 }
1471
1472 static void
1473 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1474 desc->ddesc_status0 = 0;
1475 desc->ddesc_cntl1 = DDESC_CNTL_TXCHAIN;
1476 }
1477
1478 static void
1479 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc) {
1480 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1481
1482 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1483 }
1484
1485 static void
1486 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc) {
1487 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1488
1489 desc->ddesc_cntl1 = htole32(cntl |
1490 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1491 }
1492
1493 static void
1494 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1495 desc->ddesc_status0 = 0;
1496 desc->ddesc_cntl1 = DDESC_CNTL_TXCHAIN;
1497 }
1498
1499 static int
1500 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1501 return !!(le32toh(desc->ddesc_status0) &
1502 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1503 }
1504
1505 static void
1506 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len) {
1507 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1508
1509 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1510 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1511 }
1512
1513 static int
1514 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc) {
1515 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1516 }
1517
1518 static void
1519 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1520 desc->ddesc_status0 = DDESC_TDES0_TCH;
1521 desc->ddesc_cntl1 = 0;
1522 }
1523
1524 static void
1525 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc) {
1526 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1527
1528 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1529 }
1530
1531 static void
1532 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc) {
1533 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1534
1535 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1536 }
1537
1538 static void
1539 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1540 desc->ddesc_status0 = 0;
1541 desc->ddesc_cntl1 = DDESC_RDES1_RCH;
1542 }
1543
1544 static int
1545 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1546 return !!(le32toh(desc->ddesc_status0) &
1547 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1548 }
1549
1550 #ifdef DWC_GMAC_DEBUG
1551 static void
1552 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1553 {
1554 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1555 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1556 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1557 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1558 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1559 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1560 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1561 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1562 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1563 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1564 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1565 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1566 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1567 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1568 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1569 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1570 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1571 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1572 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1573 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1574 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1575 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1576 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1577 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1578 }
1579
1580 static void
1581 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1582 {
1583 int i;
1584
1585 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1586 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1587 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1588 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1589 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1590 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1591 "data: %08x next: %08x\n",
1592 i, sc->sc_txq.t_physaddr +
1593 i*sizeof(struct dwc_gmac_dev_dmadesc),
1594 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1595 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1596 }
1597 }
1598
1599 static void
1600 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1601 {
1602 int i;
1603
1604 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1605 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1606 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1607 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1608 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1609 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1610 "data: %08x next: %08x\n",
1611 i, sc->sc_rxq.r_physaddr +
1612 i*sizeof(struct dwc_gmac_dev_dmadesc),
1613 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1614 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1615 }
1616 }
1617
1618 static void
1619 dwc_dump_status(struct dwc_gmac_softc *sc)
1620 {
1621 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1622 AWIN_GMAC_MAC_INTR);
1623 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1624 AWIN_GMAC_DMA_STATUS);
1625 char buf[200];
1626
1627 /* print interrupt state */
1628 snprintb(buf, sizeof(buf), "\177\20"
1629 "b\x10""NI\0"
1630 "b\x0f""AI\0"
1631 "b\x0e""ER\0"
1632 "b\x0d""FB\0"
1633 "b\x0a""ET\0"
1634 "b\x09""RW\0"
1635 "b\x08""RS\0"
1636 "b\x07""RU\0"
1637 "b\x06""RI\0"
1638 "b\x05""UN\0"
1639 "b\x04""OV\0"
1640 "b\x03""TJ\0"
1641 "b\x02""TU\0"
1642 "b\x01""TS\0"
1643 "b\x00""TI\0"
1644 "\0", dma_status);
1645 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1646 status, buf);
1647 }
1648
1649 static void
1650 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1651 {
1652 dwc_dump_status(sc);
1653 dwc_gmac_dump_ffilt(sc,
1654 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1655 dwc_gmac_dump_dma(sc);
1656 dwc_gmac_dump_tx_desc(sc);
1657 dwc_gmac_dump_rx_desc(sc);
1658
1659 panic("%s", msg);
1660 }
1661
1662 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1663 {
1664 char buf[200];
1665
1666 /* print filter setup */
1667 snprintb(buf, sizeof(buf), "\177\20"
1668 "b\x1f""RA\0"
1669 "b\x0a""HPF\0"
1670 "b\x09""SAF\0"
1671 "b\x08""SAIF\0"
1672 "b\x05""DBF\0"
1673 "b\x04""PM\0"
1674 "b\x03""DAIF\0"
1675 "b\x02""HMC\0"
1676 "b\x01""HUC\0"
1677 "b\x00""PR\0"
1678 "\0", ffilt);
1679 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1680 }
1681 #endif
1682