dwc_gmac.c revision 1.73 1 /* $NetBSD: dwc_gmac.c,v 1.73 2021/05/13 05:56:39 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.73 2021/05/13 05:56:39 msaitoh Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 #include <sys/rndsource.h>
61
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_media.h>
65 #include <net/bpf.h>
66 #ifdef INET
67 #include <netinet/if_inarp.h>
68 #endif
69
70 #include <dev/mii/miivar.h>
71
72 #include <dev/ic/dwc_gmac_reg.h>
73 #include <dev/ic/dwc_gmac_var.h>
74
75 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 static void dwc_gmac_miibus_statchg(struct ifnet *);
78
79 static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 static int dwc_gmac_init(struct ifnet *);
91 static int dwc_gmac_init_locked(struct ifnet *);
92 static void dwc_gmac_stop(struct ifnet *, int);
93 static void dwc_gmac_stop_locked(struct ifnet *, int);
94 static void dwc_gmac_start(struct ifnet *);
95 static void dwc_gmac_start_locked(struct ifnet *);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 .tx_set_len = dwc_gmac_desc_std_set_len,
125 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 .rx_set_len = dwc_gmac_desc_std_set_len,
131 .rx_get_len = dwc_gmac_desc_std_get_len,
132 .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 .tx_set_len = dwc_gmac_desc_enh_set_len,
140 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 .rx_set_len = dwc_gmac_desc_enh_set_len,
146 .rx_get_len = dwc_gmac_desc_enh_get_len,
147 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149
150
151 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154
155 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157
158
159
160 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163
164 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 GMAC_DMA_INT_FBE | \
166 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 GMAC_DMA_INT_TJE)
169
170 #define AWIN_DEF_MAC_INTRMASK \
171 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 AWIN_GMAC_MAC_INT_LINKCHG)
173
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 static void dwc_dump_status(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 #endif
182
183 int
184 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 {
186 uint8_t enaddr[ETHER_ADDR_LEN];
187 uint32_t maclo, machi, ver, hwft;
188 struct mii_data * const mii = &sc->sc_mii;
189 struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 prop_dictionary_t dict;
191 int rv;
192
193 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
194 sc->sc_mii_clk = mii_clk & 7;
195
196 dict = device_properties(sc->sc_dev);
197 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
198 if (ea != NULL) {
199 /*
200 * If the MAC address is overriden by a device property,
201 * use that.
202 */
203 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
204 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
205 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
206 } else {
207 /*
208 * If we did not get an externaly configure address,
209 * try to read one from the current filter setup,
210 * before resetting the chip.
211 */
212 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
213 AWIN_GMAC_MAC_ADDR0LO);
214 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
215 AWIN_GMAC_MAC_ADDR0HI);
216
217 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
218 /* fake MAC address */
219 maclo = 0x00f2 | (cprng_strong32() << 16);
220 machi = cprng_strong32();
221 }
222
223 enaddr[0] = maclo & 0x0ff;
224 enaddr[1] = (maclo >> 8) & 0x0ff;
225 enaddr[2] = (maclo >> 16) & 0x0ff;
226 enaddr[3] = (maclo >> 24) & 0x0ff;
227 enaddr[4] = machi & 0x0ff;
228 enaddr[5] = (machi >> 8) & 0x0ff;
229 }
230
231 ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
232 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
233
234 /*
235 * Init chip and do initial setup
236 */
237 if (dwc_gmac_reset(sc) != 0)
238 return ENXIO; /* not much to cleanup, haven't attached yet */
239 dwc_gmac_write_hwaddr(sc, enaddr);
240 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 ether_sprintf(enaddr));
242
243 hwft = 0;
244 if (ver >= 0x35) {
245 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 AWIN_GMAC_DMA_HWFEATURES);
247 aprint_normal_dev(sc->sc_dev,
248 "HW feature mask: %x\n", hwft);
249 }
250 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
251 aprint_normal_dev(sc->sc_dev,
252 "Using enhanced descriptor format\n");
253 sc->sc_descm = &desc_methods_enhanced;
254 } else {
255 sc->sc_descm = &desc_methods_standard;
256 }
257 if (hwft & GMAC_DMA_FEAT_RMON) {
258 uint32_t val;
259
260 /* Mask all MMC interrupts */
261 val = 0xffffffff;
262 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
263 GMAC_MMC_RX_INT_MSK, val);
264 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
265 GMAC_MMC_TX_INT_MSK, val);
266 }
267
268 /*
269 * Allocate Tx and Rx rings
270 */
271 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
272 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
273 goto fail;
274 }
275
276 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
277 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
278 goto fail;
279 }
280
281 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
282 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
283 goto fail;
284 }
285
286 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
287 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
288 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
289
290 /*
291 * Prepare interface data
292 */
293 ifp->if_softc = sc;
294 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
295 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
296 #ifdef DWCGMAC_MPSAFE
297 ifp->if_extflags = IFEF_MPSAFE;
298 #endif
299 ifp->if_ioctl = dwc_gmac_ioctl;
300 ifp->if_start = dwc_gmac_start;
301 ifp->if_init = dwc_gmac_init;
302 ifp->if_stop = dwc_gmac_stop;
303 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
304 IFQ_SET_READY(&ifp->if_snd);
305
306 /*
307 * Attach MII subdevices
308 */
309 sc->sc_ec.ec_mii = &sc->sc_mii;
310 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
311 mii->mii_ifp = ifp;
312 mii->mii_readreg = dwc_gmac_miibus_read_reg;
313 mii->mii_writereg = dwc_gmac_miibus_write_reg;
314 mii->mii_statchg = dwc_gmac_miibus_statchg;
315 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
316 MIIF_DOPAUSE);
317
318 if (LIST_EMPTY(&mii->mii_phys)) {
319 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
320 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
321 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
322 } else {
323 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
324 }
325
326 /*
327 * We can support 802.1Q VLAN-sized frames.
328 */
329 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
330
331 /*
332 * Ready, attach interface
333 */
334 /* Attach the interface. */
335 rv = if_initialize(ifp);
336 if (rv != 0)
337 goto fail_2;
338 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
339 if_deferred_start_init(ifp, NULL);
340 ether_ifattach(ifp, enaddr);
341 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
342 if_register(ifp);
343 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
344 RND_TYPE_NET, RND_FLAG_DEFAULT);
345
346 /*
347 * Enable interrupts
348 */
349 mutex_enter(sc->sc_lock);
350 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
351 AWIN_DEF_MAC_INTRMASK);
352 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
353 GMAC_DEF_DMA_INT_MASK);
354 mutex_exit(sc->sc_lock);
355
356 return 0;
357
358 fail_2:
359 ifmedia_removeall(&mii->mii_media);
360 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
361 mutex_destroy(&sc->sc_txq.t_mtx);
362 mutex_destroy(&sc->sc_rxq.r_mtx);
363 mutex_obj_free(sc->sc_lock);
364 fail:
365 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
366 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
367 dwc_gmac_free_dma_rings(sc);
368 mutex_destroy(&sc->sc_mdio_lock);
369
370 return ENXIO;
371 }
372
373
374
375 static int
376 dwc_gmac_reset(struct dwc_gmac_softc *sc)
377 {
378 size_t cnt;
379 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
380 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
381 | GMAC_BUSMODE_RESET);
382 for (cnt = 0; cnt < 30000; cnt++) {
383 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
384 & GMAC_BUSMODE_RESET) == 0)
385 return 0;
386 delay(10);
387 }
388
389 aprint_error_dev(sc->sc_dev, "reset timed out\n");
390 return EIO;
391 }
392
393 static void
394 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
395 uint8_t enaddr[ETHER_ADDR_LEN])
396 {
397 uint32_t hi, lo;
398
399 hi = enaddr[4] | (enaddr[5] << 8);
400 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
401 | ((uint32_t)enaddr[3] << 24);
402 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
403 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
404 }
405
406 static int
407 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
408 {
409 struct dwc_gmac_softc * const sc = device_private(self);
410 uint16_t mii;
411 size_t cnt;
412
413 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
414 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
415 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
416 | GMAC_MII_BUSY;
417
418 mutex_enter(&sc->sc_mdio_lock);
419 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
420
421 for (cnt = 0; cnt < 1000; cnt++) {
422 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
423 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
424 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
425 AWIN_GMAC_MAC_MIIDATA);
426 break;
427 }
428 delay(10);
429 }
430
431 mutex_exit(&sc->sc_mdio_lock);
432
433 if (cnt >= 1000)
434 return ETIMEDOUT;
435
436 return 0;
437 }
438
439 static int
440 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
441 {
442 struct dwc_gmac_softc * const sc = device_private(self);
443 uint16_t mii;
444 size_t cnt;
445
446 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
447 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
448 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
449 | GMAC_MII_BUSY | GMAC_MII_WRITE;
450
451 mutex_enter(&sc->sc_mdio_lock);
452 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
453 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
454
455 for (cnt = 0; cnt < 1000; cnt++) {
456 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
457 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
458 break;
459 delay(10);
460 }
461
462 mutex_exit(&sc->sc_mdio_lock);
463
464 if (cnt >= 1000)
465 return ETIMEDOUT;
466
467 return 0;
468 }
469
470 static int
471 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
472 struct dwc_gmac_rx_ring *ring)
473 {
474 struct dwc_gmac_rx_data *data;
475 bus_addr_t physaddr;
476 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
477 int error, i, next;
478
479 ring->r_cur = ring->r_next = 0;
480 memset(ring->r_desc, 0, descsize);
481
482 /*
483 * Pre-allocate Rx buffers and populate Rx ring.
484 */
485 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
486 struct dwc_gmac_dev_dmadesc *desc;
487
488 data = &sc->sc_rxq.r_data[i];
489
490 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
491 if (data->rd_m == NULL) {
492 aprint_error_dev(sc->sc_dev,
493 "could not allocate rx mbuf #%d\n", i);
494 error = ENOMEM;
495 goto fail;
496 }
497 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
498 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
499 if (error != 0) {
500 aprint_error_dev(sc->sc_dev,
501 "could not create DMA map\n");
502 data->rd_map = NULL;
503 goto fail;
504 }
505 MCLGET(data->rd_m, M_DONTWAIT);
506 if (!(data->rd_m->m_flags & M_EXT)) {
507 aprint_error_dev(sc->sc_dev,
508 "could not allocate mbuf cluster #%d\n", i);
509 error = ENOMEM;
510 goto fail;
511 }
512 data->rd_m->m_len = data->rd_m->m_pkthdr.len
513 = data->rd_m->m_ext.ext_size;
514 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
515 data->rd_m->m_len = data->rd_m->m_pkthdr.len
516 = AWGE_MAX_PACKET;
517 }
518
519 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
520 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
521 if (error != 0) {
522 aprint_error_dev(sc->sc_dev,
523 "could not load rx buf DMA map #%d", i);
524 goto fail;
525 }
526 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
527 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
528 physaddr = data->rd_map->dm_segs[0].ds_addr;
529
530 desc = &sc->sc_rxq.r_desc[i];
531 desc->ddesc_data = htole32(physaddr);
532 next = RX_NEXT(i);
533 desc->ddesc_next = htole32(ring->r_physaddr
534 + next * sizeof(*desc));
535 sc->sc_descm->rx_init_flags(desc);
536 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
537 sc->sc_descm->rx_set_owned_by_dev(desc);
538 }
539
540 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
541 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
542 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
543 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
544 ring->r_physaddr);
545
546 return 0;
547
548 fail:
549 dwc_gmac_free_rx_ring(sc, ring);
550 return error;
551 }
552
553 static void
554 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
555 struct dwc_gmac_rx_ring *ring)
556 {
557 struct dwc_gmac_dev_dmadesc *desc;
558 struct dwc_gmac_rx_data *data;
559 int i;
560
561 mutex_enter(&ring->r_mtx);
562 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
563 desc = &sc->sc_rxq.r_desc[i];
564 data = &sc->sc_rxq.r_data[i];
565 sc->sc_descm->rx_init_flags(desc);
566 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
567 sc->sc_descm->rx_set_owned_by_dev(desc);
568 }
569
570 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
571 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
572 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
573
574 ring->r_cur = ring->r_next = 0;
575 /* reset DMA address to start of ring */
576 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
577 sc->sc_rxq.r_physaddr);
578 mutex_exit(&ring->r_mtx);
579 }
580
581 static int
582 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
583 {
584 const size_t descsize = AWGE_TOTAL_RING_COUNT *
585 sizeof(struct dwc_gmac_dev_dmadesc);
586 int error, nsegs;
587 void *rings;
588
589 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
590 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
591 if (error != 0) {
592 aprint_error_dev(sc->sc_dev,
593 "could not create desc DMA map\n");
594 sc->sc_dma_ring_map = NULL;
595 goto fail;
596 }
597
598 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
599 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
600 if (error != 0) {
601 aprint_error_dev(sc->sc_dev,
602 "could not map DMA memory\n");
603 goto fail;
604 }
605
606 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
607 descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
608 if (error != 0) {
609 aprint_error_dev(sc->sc_dev,
610 "could not allocate DMA memory\n");
611 goto fail;
612 }
613
614 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
615 descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
616 if (error != 0) {
617 aprint_error_dev(sc->sc_dev,
618 "could not load desc DMA map\n");
619 goto fail;
620 }
621
622 /* give first AWGE_RX_RING_COUNT to the RX side */
623 sc->sc_rxq.r_desc = rings;
624 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
625
626 /* and next rings to the TX side */
627 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
628 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
629 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
630
631 return 0;
632
633 fail:
634 dwc_gmac_free_dma_rings(sc);
635 return error;
636 }
637
638 static void
639 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
640 {
641 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
642 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
643 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
644 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
645 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
646 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
647 }
648
649 static void
650 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
651 {
652 struct dwc_gmac_rx_data *data;
653 int i;
654
655 if (ring->r_desc == NULL)
656 return;
657
658
659 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
660 data = &ring->r_data[i];
661
662 if (data->rd_map != NULL) {
663 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
664 AWGE_RX_RING_COUNT
665 *sizeof(struct dwc_gmac_dev_dmadesc),
666 BUS_DMASYNC_POSTREAD);
667 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
668 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
669 }
670 if (data->rd_m != NULL)
671 m_freem(data->rd_m);
672 }
673 }
674
675 static int
676 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
677 struct dwc_gmac_tx_ring *ring)
678 {
679 int i, error = 0;
680
681 ring->t_queued = 0;
682 ring->t_cur = ring->t_next = 0;
683
684 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
685 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
686 TX_DESC_OFFSET(0),
687 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
688 BUS_DMASYNC_POSTWRITE);
689
690 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
691 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
692 AWGE_TX_RING_COUNT, MCLBYTES, 0,
693 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
694 &ring->t_data[i].td_map);
695 if (error != 0) {
696 aprint_error_dev(sc->sc_dev,
697 "could not create TX DMA map #%d\n", i);
698 ring->t_data[i].td_map = NULL;
699 goto fail;
700 }
701 ring->t_desc[i].ddesc_next = htole32(
702 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
703 *TX_NEXT(i));
704 }
705
706 return 0;
707
708 fail:
709 dwc_gmac_free_tx_ring(sc, ring);
710 return error;
711 }
712
713 static void
714 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
715 {
716 /* 'end' is pointing one descriptor beyond the last we want to sync */
717 if (end > start) {
718 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
719 TX_DESC_OFFSET(start),
720 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
721 ops);
722 return;
723 }
724 /* sync from 'start' to end of ring */
725 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
726 TX_DESC_OFFSET(start),
727 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
728 ops);
729 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
730 /* sync from start of ring to 'end' */
731 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
732 TX_DESC_OFFSET(0),
733 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
734 ops);
735 }
736 }
737
738 static void
739 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
740 struct dwc_gmac_tx_ring *ring)
741 {
742 int i;
743
744 mutex_enter(&ring->t_mtx);
745 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
746 struct dwc_gmac_tx_data *data = &ring->t_data[i];
747
748 if (data->td_m != NULL) {
749 bus_dmamap_sync(sc->sc_dmat, data->td_active,
750 0, data->td_active->dm_mapsize,
751 BUS_DMASYNC_POSTWRITE);
752 bus_dmamap_unload(sc->sc_dmat, data->td_active);
753 m_freem(data->td_m);
754 data->td_m = NULL;
755 }
756 }
757
758 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
759 TX_DESC_OFFSET(0),
760 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
761 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
762 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
763 sc->sc_txq.t_physaddr);
764
765 ring->t_queued = 0;
766 ring->t_cur = ring->t_next = 0;
767 mutex_exit(&ring->t_mtx);
768 }
769
770 static void
771 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
772 struct dwc_gmac_tx_ring *ring)
773 {
774 int i;
775
776 /* unload the maps */
777 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
778 struct dwc_gmac_tx_data *data = &ring->t_data[i];
779
780 if (data->td_m != NULL) {
781 bus_dmamap_sync(sc->sc_dmat, data->td_active,
782 0, data->td_map->dm_mapsize,
783 BUS_DMASYNC_POSTWRITE);
784 bus_dmamap_unload(sc->sc_dmat, data->td_active);
785 m_freem(data->td_m);
786 data->td_m = NULL;
787 }
788 }
789
790 /* and actually free them */
791 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
792 struct dwc_gmac_tx_data *data = &ring->t_data[i];
793
794 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
795 }
796 }
797
798 static void
799 dwc_gmac_miibus_statchg(struct ifnet *ifp)
800 {
801 struct dwc_gmac_softc * const sc = ifp->if_softc;
802 struct mii_data * const mii = &sc->sc_mii;
803 uint32_t conf, flow;
804
805 /*
806 * Set MII or GMII interface based on the speed
807 * negotiated by the PHY.
808 */
809 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
810 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
811 | AWIN_GMAC_MAC_CONF_FULLDPLX);
812 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
813 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
814 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
815 | AWIN_GMAC_MAC_CONF_ACS
816 | AWIN_GMAC_MAC_CONF_RXENABLE
817 | AWIN_GMAC_MAC_CONF_TXENABLE;
818 switch (IFM_SUBTYPE(mii->mii_media_active)) {
819 case IFM_10_T:
820 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
821 break;
822 case IFM_100_TX:
823 conf |= AWIN_GMAC_MAC_CONF_FES100 |
824 AWIN_GMAC_MAC_CONF_MIISEL;
825 break;
826 case IFM_1000_T:
827 break;
828 }
829 if (sc->sc_set_speed)
830 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
831
832 flow = 0;
833 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
834 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
835 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
836 }
837 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
838 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
839 }
840 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
841 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
842 }
843 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
844 AWIN_GMAC_MAC_FLOWCTRL, flow);
845
846 #ifdef DWC_GMAC_DEBUG
847 aprint_normal_dev(sc->sc_dev,
848 "setting MAC conf register: %08x\n", conf);
849 #endif
850
851 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
852 AWIN_GMAC_MAC_CONF, conf);
853 }
854
855 static int
856 dwc_gmac_init(struct ifnet *ifp)
857 {
858 struct dwc_gmac_softc *sc = ifp->if_softc;
859
860 mutex_enter(sc->sc_lock);
861 int ret = dwc_gmac_init_locked(ifp);
862 mutex_exit(sc->sc_lock);
863
864 return ret;
865 }
866
867 static int
868 dwc_gmac_init_locked(struct ifnet *ifp)
869 {
870 struct dwc_gmac_softc *sc = ifp->if_softc;
871 uint32_t ffilt;
872
873 if (ifp->if_flags & IFF_RUNNING)
874 return 0;
875
876 dwc_gmac_stop_locked(ifp, 0);
877
878 /*
879 * Configure DMA burst/transfer mode and RX/TX priorities.
880 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
881 */
882 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
883 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
884 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
885 __SHIFTIN(2, GMAC_BUSMODE_PBL));
886
887 /*
888 * Set up address filter
889 */
890 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
891 if (ifp->if_flags & IFF_PROMISC) {
892 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
893 } else {
894 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
895 }
896 if (ifp->if_flags & IFF_BROADCAST) {
897 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
898 } else {
899 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
900 }
901 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
902
903 /*
904 * Set up multicast filter
905 */
906 dwc_gmac_setmulti(sc);
907
908 /*
909 * Set up dma pointer for RX and TX ring
910 */
911 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
912 sc->sc_rxq.r_physaddr);
913 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
914 sc->sc_txq.t_physaddr);
915
916 /*
917 * Start RX/TX part
918 */
919 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
920 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
921 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
922 }
923 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
924
925 sc->sc_stopping = false;
926
927 ifp->if_flags |= IFF_RUNNING;
928 ifp->if_flags &= ~IFF_OACTIVE;
929
930 return 0;
931 }
932
933 static void
934 dwc_gmac_start(struct ifnet *ifp)
935 {
936 struct dwc_gmac_softc *sc = ifp->if_softc;
937 #ifdef DWCGMAC_MPSAFE
938 KASSERT(if_is_mpsafe(ifp));
939 #endif
940
941 mutex_enter(sc->sc_lock);
942 if (!sc->sc_stopping) {
943 mutex_enter(&sc->sc_txq.t_mtx);
944 dwc_gmac_start_locked(ifp);
945 mutex_exit(&sc->sc_txq.t_mtx);
946 }
947 mutex_exit(sc->sc_lock);
948 }
949
950 static void
951 dwc_gmac_start_locked(struct ifnet *ifp)
952 {
953 struct dwc_gmac_softc *sc = ifp->if_softc;
954 int old = sc->sc_txq.t_queued;
955 int start = sc->sc_txq.t_cur;
956 struct mbuf *m0;
957
958 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
959 return;
960
961 for (;;) {
962 IFQ_POLL(&ifp->if_snd, m0);
963 if (m0 == NULL)
964 break;
965 if (dwc_gmac_queue(sc, m0) != 0) {
966 ifp->if_flags |= IFF_OACTIVE;
967 break;
968 }
969 IFQ_DEQUEUE(&ifp->if_snd, m0);
970 bpf_mtap(ifp, m0, BPF_D_OUT);
971 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
972 ifp->if_flags |= IFF_OACTIVE;
973 break;
974 }
975 }
976
977 if (sc->sc_txq.t_queued != old) {
978 /* packets have been queued, kick it off */
979 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
980 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
981
982 #ifdef DWC_GMAC_DEBUG
983 dwc_dump_status(sc);
984 #endif
985 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
986 AWIN_GMAC_DMA_TXPOLL, ~0U);
987 }
988 }
989
990 static void
991 dwc_gmac_stop(struct ifnet *ifp, int disable)
992 {
993 struct dwc_gmac_softc *sc = ifp->if_softc;
994
995 mutex_enter(sc->sc_lock);
996 dwc_gmac_stop_locked(ifp, disable);
997 mutex_exit(sc->sc_lock);
998 }
999
1000 static void
1001 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1002 {
1003 struct dwc_gmac_softc *sc = ifp->if_softc;
1004
1005 sc->sc_stopping = true;
1006
1007 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1008 AWIN_GMAC_DMA_OPMODE,
1009 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1010 AWIN_GMAC_DMA_OPMODE)
1011 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1012 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1013 AWIN_GMAC_DMA_OPMODE,
1014 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1015 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1016
1017 mii_down(&sc->sc_mii);
1018 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1019 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1020
1021 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1022 }
1023
1024 /*
1025 * Add m0 to the TX ring
1026 */
1027 static int
1028 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1029 {
1030 struct dwc_gmac_dev_dmadesc *desc = NULL;
1031 struct dwc_gmac_tx_data *data = NULL;
1032 bus_dmamap_t map;
1033 int error, i, first;
1034
1035 #ifdef DWC_GMAC_DEBUG
1036 aprint_normal_dev(sc->sc_dev,
1037 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1038 #endif
1039
1040 first = sc->sc_txq.t_cur;
1041 map = sc->sc_txq.t_data[first].td_map;
1042
1043 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1044 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1045 if (error != 0) {
1046 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1047 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1048 return error;
1049 }
1050
1051 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1052 bus_dmamap_unload(sc->sc_dmat, map);
1053 return ENOBUFS;
1054 }
1055
1056 for (i = 0; i < map->dm_nsegs; i++) {
1057 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1058 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1059
1060 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1061
1062 #ifdef DWC_GMAC_DEBUG
1063 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1064 "len %lu\n", sc->sc_txq.t_cur,
1065 (unsigned long)map->dm_segs[i].ds_addr,
1066 (unsigned long)map->dm_segs[i].ds_len);
1067 #endif
1068
1069 sc->sc_descm->tx_init_flags(desc);
1070 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1071
1072 if (i == 0)
1073 sc->sc_descm->tx_set_first_frag(desc);
1074
1075 /*
1076 * Defer passing ownership of the first descriptor
1077 * until we are done.
1078 */
1079 if (i != 0)
1080 sc->sc_descm->tx_set_owned_by_dev(desc);
1081
1082 sc->sc_txq.t_queued++;
1083 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1084 }
1085
1086 sc->sc_descm->tx_set_last_frag(desc);
1087
1088 data->td_m = m0;
1089 data->td_active = map;
1090
1091 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1092 BUS_DMASYNC_PREWRITE);
1093
1094 /* Pass first to device */
1095 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1096
1097 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1098 BUS_DMASYNC_PREWRITE);
1099
1100 return 0;
1101 }
1102
1103 /*
1104 * If the interface is up and running, only modify the receive
1105 * filter when setting promiscuous or debug mode. Otherwise fall
1106 * through to ether_ioctl, which will reset the chip.
1107 */
1108 static int
1109 dwc_gmac_ifflags_cb(struct ethercom *ec)
1110 {
1111 struct ifnet *ifp = &ec->ec_if;
1112 struct dwc_gmac_softc *sc = ifp->if_softc;
1113 int ret = 0;
1114
1115 mutex_enter(sc->sc_lock);
1116 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1117 sc->sc_if_flags = ifp->if_flags;
1118
1119 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1120 ret = ENETRESET;
1121 goto out;
1122 }
1123 if ((change & IFF_PROMISC) != 0) {
1124 dwc_gmac_setmulti(sc);
1125 }
1126 out:
1127 mutex_exit(sc->sc_lock);
1128
1129 return ret;
1130 }
1131
1132 static int
1133 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1134 {
1135 struct dwc_gmac_softc *sc = ifp->if_softc;
1136 int error = 0;
1137
1138 int s = splnet();
1139 error = ether_ioctl(ifp, cmd, data);
1140
1141 #ifdef DWCGMAC_MPSAFE
1142 splx(s);
1143 #endif
1144
1145 if (error == ENETRESET) {
1146 error = 0;
1147 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1148 ;
1149 else if (ifp->if_flags & IFF_RUNNING) {
1150 /*
1151 * Multicast list has changed; set the hardware filter
1152 * accordingly.
1153 */
1154 mutex_enter(sc->sc_lock);
1155 dwc_gmac_setmulti(sc);
1156 mutex_exit(sc->sc_lock);
1157 }
1158 }
1159
1160 /* Try to get things going again */
1161 if (ifp->if_flags & IFF_UP)
1162 dwc_gmac_start(ifp);
1163 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1164
1165 #ifndef DWCGMAC_MPSAFE
1166 splx(s);
1167 #endif
1168
1169 return error;
1170 }
1171
1172 static void
1173 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1174 {
1175 struct ifnet *ifp = &sc->sc_ec.ec_if;
1176 struct dwc_gmac_tx_data *data;
1177 struct dwc_gmac_dev_dmadesc *desc;
1178 int i, nsegs;
1179
1180 mutex_enter(&sc->sc_txq.t_mtx);
1181
1182 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1183 #ifdef DWC_GMAC_DEBUG
1184 aprint_normal_dev(sc->sc_dev,
1185 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1186 i, sc->sc_txq.t_queued);
1187 #endif
1188
1189 /*
1190 * i+1 does not need to be a valid descriptor,
1191 * this is just a special notion to just sync
1192 * a single tx descriptor (i)
1193 */
1194 dwc_gmac_txdesc_sync(sc, i, i+1,
1195 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1196
1197 desc = &sc->sc_txq.t_desc[i];
1198 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1199 break;
1200
1201 data = &sc->sc_txq.t_data[i];
1202 if (data->td_m == NULL)
1203 continue;
1204
1205 if_statinc(ifp, if_opackets);
1206 nsegs = data->td_active->dm_nsegs;
1207 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1208 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1209 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1210
1211 #ifdef DWC_GMAC_DEBUG
1212 aprint_normal_dev(sc->sc_dev,
1213 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1214 "freeing mbuf %p\n", i, data->td_m);
1215 #endif
1216
1217 m_freem(data->td_m);
1218 data->td_m = NULL;
1219
1220 sc->sc_txq.t_queued -= nsegs;
1221 }
1222
1223 sc->sc_txq.t_next = i;
1224
1225 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1226 ifp->if_flags &= ~IFF_OACTIVE;
1227 }
1228 mutex_exit(&sc->sc_txq.t_mtx);
1229 }
1230
1231 static void
1232 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1233 {
1234 struct ifnet *ifp = &sc->sc_ec.ec_if;
1235 struct dwc_gmac_dev_dmadesc *desc;
1236 struct dwc_gmac_rx_data *data;
1237 bus_addr_t physaddr;
1238 struct mbuf *m, *mnew;
1239 int i, len, error;
1240
1241 mutex_enter(&sc->sc_rxq.r_mtx);
1242 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1243 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1244 RX_DESC_OFFSET(i), sizeof(*desc),
1245 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1246 desc = &sc->sc_rxq.r_desc[i];
1247 data = &sc->sc_rxq.r_data[i];
1248
1249 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1250 break;
1251
1252 if (sc->sc_descm->rx_has_error(desc)) {
1253 #ifdef DWC_GMAC_DEBUG
1254 aprint_normal_dev(sc->sc_dev,
1255 "RX error: descriptor status %08x, skipping\n",
1256 le32toh(desc->ddesc_status0));
1257 #endif
1258 if_statinc(ifp, if_ierrors);
1259 goto skip;
1260 }
1261
1262 len = sc->sc_descm->rx_get_len(desc);
1263
1264 #ifdef DWC_GMAC_DEBUG
1265 aprint_normal_dev(sc->sc_dev,
1266 "rx int: device is done with descriptor #%d, len: %d\n",
1267 i, len);
1268 #endif
1269
1270 /*
1271 * Try to get a new mbuf before passing this one
1272 * up, if that fails, drop the packet and reuse
1273 * the existing one.
1274 */
1275 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1276 if (mnew == NULL) {
1277 if_statinc(ifp, if_ierrors);
1278 goto skip;
1279 }
1280 MCLGET(mnew, M_DONTWAIT);
1281 if ((mnew->m_flags & M_EXT) == 0) {
1282 m_freem(mnew);
1283 if_statinc(ifp, if_ierrors);
1284 goto skip;
1285 }
1286 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1287 if (mnew->m_len > AWGE_MAX_PACKET) {
1288 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1289 }
1290
1291 /* unload old DMA map */
1292 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1293 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1294 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1295
1296 /* and reload with new mbuf */
1297 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1298 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1299 if (error != 0) {
1300 m_freem(mnew);
1301 /* try to reload old mbuf */
1302 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1303 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1304 if (error != 0) {
1305 panic("%s: could not load old rx mbuf",
1306 device_xname(sc->sc_dev));
1307 }
1308 if_statinc(ifp, if_ierrors);
1309 goto skip;
1310 }
1311 physaddr = data->rd_map->dm_segs[0].ds_addr;
1312
1313 /*
1314 * New mbuf loaded, update RX ring and continue
1315 */
1316 m = data->rd_m;
1317 data->rd_m = mnew;
1318 desc->ddesc_data = htole32(physaddr);
1319
1320 /* finalize mbuf */
1321 m->m_pkthdr.len = m->m_len = len;
1322 m_set_rcvif(m, ifp);
1323 m->m_flags |= M_HASFCS;
1324
1325 if_percpuq_enqueue(sc->sc_ipq, m);
1326
1327 skip:
1328 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1329 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1330
1331 sc->sc_descm->rx_init_flags(desc);
1332 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1333 sc->sc_descm->rx_set_owned_by_dev(desc);
1334
1335 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1336 RX_DESC_OFFSET(i), sizeof(*desc),
1337 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1338 }
1339
1340 /* update RX pointer */
1341 sc->sc_rxq.r_cur = i;
1342
1343 mutex_exit(&sc->sc_rxq.r_mtx);
1344 }
1345
1346 /*
1347 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1348 */
1349 static uint32_t
1350 bitrev32(uint32_t x)
1351 {
1352 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1353 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1354 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1355 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1356
1357 return (x >> 16) | (x << 16);
1358 }
1359
1360 static void
1361 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1362 {
1363 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1364 struct ether_multi *enm;
1365 struct ether_multistep step;
1366 struct ethercom *ec = &sc->sc_ec;
1367 uint32_t hashes[2] = { 0, 0 };
1368 uint32_t ffilt, h;
1369 int mcnt;
1370
1371 KASSERT(mutex_owned(sc->sc_lock));
1372
1373 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1374
1375 if (ifp->if_flags & IFF_PROMISC) {
1376 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1377 goto special_filter;
1378 }
1379
1380 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1381
1382 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1383 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1384
1385 ETHER_LOCK(ec);
1386 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1387 ETHER_FIRST_MULTI(step, ec, enm);
1388 mcnt = 0;
1389 while (enm != NULL) {
1390 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1391 ETHER_ADDR_LEN) != 0) {
1392 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1393 ec->ec_flags |= ETHER_F_ALLMULTI;
1394 ETHER_UNLOCK(ec);
1395 goto special_filter;
1396 }
1397
1398 h = bitrev32(
1399 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1400 ) >> 26;
1401 hashes[h >> 5] |= (1 << (h & 0x1f));
1402
1403 mcnt++;
1404 ETHER_NEXT_MULTI(step, enm);
1405 }
1406 ETHER_UNLOCK(ec);
1407
1408 if (mcnt)
1409 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1410 else
1411 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1412
1413 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1414 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1415 hashes[0]);
1416 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1417 hashes[1]);
1418 sc->sc_if_flags = ifp->if_flags;
1419
1420 #ifdef DWC_GMAC_DEBUG
1421 dwc_gmac_dump_ffilt(sc, ffilt);
1422 #endif
1423 return;
1424
1425 special_filter:
1426 #ifdef DWC_GMAC_DEBUG
1427 dwc_gmac_dump_ffilt(sc, ffilt);
1428 #endif
1429 /* no MAC hashes, ALLMULTI or PROMISC */
1430 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1431 ffilt);
1432 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1433 0xffffffff);
1434 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1435 0xffffffff);
1436 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1437 }
1438
1439 int
1440 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1441 {
1442 uint32_t status, dma_status;
1443 int rv = 0;
1444
1445 if (sc->sc_stopping)
1446 return 0;
1447
1448 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1449 if (status & AWIN_GMAC_MII_IRQ) {
1450 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1451 AWIN_GMAC_MII_STATUS);
1452 rv = 1;
1453 mii_pollstat(&sc->sc_mii);
1454 }
1455
1456 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1457 AWIN_GMAC_DMA_STATUS);
1458
1459 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1460 rv = 1;
1461
1462 if (dma_status & GMAC_DMA_INT_TIE)
1463 dwc_gmac_tx_intr(sc);
1464
1465 if (dma_status & GMAC_DMA_INT_RIE)
1466 dwc_gmac_rx_intr(sc);
1467
1468 /*
1469 * Check error conditions
1470 */
1471 if (dma_status & GMAC_DMA_INT_ERRORS) {
1472 if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1473 #ifdef DWC_GMAC_DEBUG
1474 dwc_dump_and_abort(sc, "interrupt error condition");
1475 #endif
1476 }
1477
1478 rnd_add_uint32(&sc->rnd_source, dma_status);
1479
1480 /* ack interrupt */
1481 if (dma_status)
1482 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1483 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1484
1485 /*
1486 * Get more packets
1487 */
1488 if (rv)
1489 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1490
1491 return rv;
1492 }
1493
1494 static void
1495 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1496 {
1497
1498 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1499 }
1500
1501 static int
1502 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1503 {
1504
1505 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1506 }
1507
1508 static void
1509 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1510 {
1511 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1512
1513 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1514 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1515 }
1516
1517 static uint32_t
1518 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1519 {
1520
1521 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1522 }
1523
1524 static void
1525 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1526 {
1527
1528 desc->ddesc_status0 = 0;
1529 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1530 }
1531
1532 static void
1533 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1534 {
1535 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1536
1537 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1538 }
1539
1540 static void
1541 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1542 {
1543 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1544
1545 desc->ddesc_cntl1 = htole32(cntl |
1546 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1547 }
1548
1549 static void
1550 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1551 {
1552
1553 desc->ddesc_status0 = 0;
1554 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1555 }
1556
1557 static int
1558 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1559 return !!(le32toh(desc->ddesc_status0) &
1560 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1561 }
1562
1563 static void
1564 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1565 {
1566 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1567
1568 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1569 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1570 }
1571
1572 static uint32_t
1573 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1574 {
1575
1576 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1577 }
1578
1579 static void
1580 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1581 {
1582
1583 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1584 desc->ddesc_cntl1 = 0;
1585 }
1586
1587 static void
1588 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1589 {
1590 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1591
1592 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1593 }
1594
1595 static void
1596 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1597 {
1598 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1599
1600 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1601 }
1602
1603 static void
1604 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1605 {
1606
1607 desc->ddesc_status0 = 0;
1608 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1609 }
1610
1611 static int
1612 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1613 {
1614
1615 return !!(le32toh(desc->ddesc_status0) &
1616 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1617 }
1618
1619 #ifdef DWC_GMAC_DEBUG
1620 static void
1621 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1622 {
1623 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1624 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1625 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1626 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1627 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1628 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1629 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1630 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1631 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1632 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1633 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1634 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1635 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1636 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1637 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1638 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1639 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1640 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1641 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1642 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1643 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1644 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1645 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1646 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1647 }
1648
1649 static void
1650 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1651 {
1652 int i;
1653
1654 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1655 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1656 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1657 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1658 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1659 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1660 "data: %08x next: %08x\n",
1661 i, sc->sc_txq.t_physaddr +
1662 i*sizeof(struct dwc_gmac_dev_dmadesc),
1663 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1664 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1665 }
1666 }
1667
1668 static void
1669 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1670 {
1671 int i;
1672
1673 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1674 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1675 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1676 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1677 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1678 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1679 "data: %08x next: %08x\n",
1680 i, sc->sc_rxq.r_physaddr +
1681 i*sizeof(struct dwc_gmac_dev_dmadesc),
1682 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1683 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1684 }
1685 }
1686
1687 static void
1688 dwc_dump_status(struct dwc_gmac_softc *sc)
1689 {
1690 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1691 AWIN_GMAC_MAC_INTR);
1692 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1693 AWIN_GMAC_DMA_STATUS);
1694 char buf[200];
1695
1696 /* print interrupt state */
1697 snprintb(buf, sizeof(buf), "\177\20"
1698 "b\x10""NI\0"
1699 "b\x0f""AI\0"
1700 "b\x0e""ER\0"
1701 "b\x0d""FB\0"
1702 "b\x0a""ET\0"
1703 "b\x09""RW\0"
1704 "b\x08""RS\0"
1705 "b\x07""RU\0"
1706 "b\x06""RI\0"
1707 "b\x05""UN\0"
1708 "b\x04""OV\0"
1709 "b\x03""TJ\0"
1710 "b\x02""TU\0"
1711 "b\x01""TS\0"
1712 "b\x00""TI\0"
1713 "\0", dma_status);
1714 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1715 status, buf);
1716 }
1717
1718 static void
1719 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1720 {
1721 dwc_dump_status(sc);
1722 dwc_gmac_dump_ffilt(sc,
1723 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1724 dwc_gmac_dump_dma(sc);
1725 dwc_gmac_dump_tx_desc(sc);
1726 dwc_gmac_dump_rx_desc(sc);
1727
1728 panic("%s", msg);
1729 }
1730
1731 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1732 {
1733 char buf[200];
1734
1735 /* print filter setup */
1736 snprintb(buf, sizeof(buf), "\177\20"
1737 "b\x1f""RA\0"
1738 "b\x0a""HPF\0"
1739 "b\x09""SAF\0"
1740 "b\x08""SAIF\0"
1741 "b\x05""DBF\0"
1742 "b\x04""PM\0"
1743 "b\x03""DAIF\0"
1744 "b\x02""HMC\0"
1745 "b\x01""HUC\0"
1746 "b\x00""PR\0"
1747 "\0", ffilt);
1748 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1749 }
1750 #endif
1751