dwc_gmac.c revision 1.83 1 /* $NetBSD: dwc_gmac.c,v 1.83 2024/02/27 08:28:56 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.83 2024/02/27 08:28:56 skrll Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #endif
51
52 #include <sys/param.h>
53 #include <sys/bus.h>
54 #include <sys/device.h>
55 #include <sys/intr.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/cprng.h>
59 #include <sys/rndsource.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
89 static int dwc_gmac_init(struct ifnet *);
90 static int dwc_gmac_init_locked(struct ifnet *);
91 static void dwc_gmac_stop(struct ifnet *, int);
92 static void dwc_gmac_stop_locked(struct ifnet *, int);
93 static void dwc_gmac_start(struct ifnet *);
94 static void dwc_gmac_start_locked(struct ifnet *);
95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
100 static int dwc_gmac_ifflags_cb(struct ethercom *);
101 static uint32_t bitrev32(uint32_t);
102 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
103 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
105 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
106 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
110 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
111 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
112 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
113 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
117 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
118
119 static const struct dwc_gmac_desc_methods desc_methods_standard = {
120 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
121 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
122 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
123 .tx_set_len = dwc_gmac_desc_std_set_len,
124 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
125 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
126 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
127 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
128 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
129 .rx_set_len = dwc_gmac_desc_std_set_len,
130 .rx_get_len = dwc_gmac_desc_std_get_len,
131 .rx_has_error = dwc_gmac_desc_std_rx_has_error
132 };
133
134 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
135 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
136 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
137 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
138 .tx_set_len = dwc_gmac_desc_enh_set_len,
139 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
140 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
141 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
142 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
143 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
144 .rx_set_len = dwc_gmac_desc_enh_set_len,
145 .rx_get_len = dwc_gmac_desc_enh_get_len,
146 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
147 };
148
149
150 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT + (N)) \
151 * sizeof(struct dwc_gmac_dev_dmadesc))
152 #define TX_NEXT(N) (((N) + 1) & (AWGE_TX_RING_COUNT - 1))
153
154 #define RX_DESC_OFFSET(N) ((N) * sizeof(struct dwc_gmac_dev_dmadesc))
155 #define RX_NEXT(N) (((N) + 1) & (AWGE_RX_RING_COUNT - 1))
156
157
158
159 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
160 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
161 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
162
163 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
164 GMAC_DMA_INT_FBE | \
165 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
166 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
167 GMAC_DMA_INT_TJE)
168
169 #define AWIN_DEF_MAC_INTRMASK \
170 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
171 AWIN_GMAC_MAC_INT_LINKCHG)
172
173 #ifdef DWC_GMAC_DEBUG
174 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
175 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
177 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
178 static void dwc_dump_status(struct dwc_gmac_softc *);
179 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
180 #endif
181
182 int
183 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
184 {
185 uint8_t enaddr[ETHER_ADDR_LEN];
186 uint32_t maclo, machi, ver, hwft;
187 struct mii_data * const mii = &sc->sc_mii;
188 struct ifnet * const ifp = &sc->sc_ec.ec_if;
189 prop_dictionary_t dict;
190
191 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
192 sc->sc_mii_clk = mii_clk & 7;
193
194 dict = device_properties(sc->sc_dev);
195 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
196 if (ea != NULL) {
197 /*
198 * If the MAC address is overridden by a device property,
199 * use that.
200 */
201 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
202 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
203 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
204 } else {
205 /*
206 * If we did not get an externaly configure address,
207 * try to read one from the current filter setup,
208 * before resetting the chip.
209 */
210 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
211 AWIN_GMAC_MAC_ADDR0LO);
212 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
213 AWIN_GMAC_MAC_ADDR0HI);
214
215 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
216 /* fake MAC address */
217 maclo = 0x00f2 | (cprng_strong32() << 16);
218 machi = cprng_strong32();
219 }
220
221 enaddr[0] = maclo & 0x0ff;
222 enaddr[1] = (maclo >> 8) & 0x0ff;
223 enaddr[2] = (maclo >> 16) & 0x0ff;
224 enaddr[3] = (maclo >> 24) & 0x0ff;
225 enaddr[4] = machi & 0x0ff;
226 enaddr[5] = (machi >> 8) & 0x0ff;
227 }
228
229 ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
230 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
231
232 /*
233 * Init chip and do initial setup
234 */
235 if (dwc_gmac_reset(sc) != 0)
236 return ENXIO; /* not much to cleanup, haven't attached yet */
237 dwc_gmac_write_hwaddr(sc, enaddr);
238 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
239 ether_sprintf(enaddr));
240
241 hwft = 0;
242 if (ver >= 0x35) {
243 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
244 AWIN_GMAC_DMA_HWFEATURES);
245 aprint_normal_dev(sc->sc_dev,
246 "HW feature mask: %x\n", hwft);
247 }
248
249 if (sizeof(bus_addr_t) > 4) {
250 int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
251 &sc->sc_dmat, BUS_DMA_WAITOK);
252 if (error != 0) {
253 aprint_error_dev(sc->sc_dev,
254 "failed to create DMA subregion\n");
255 return ENOMEM;
256 }
257 }
258
259 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
260 aprint_normal_dev(sc->sc_dev,
261 "Using enhanced descriptor format\n");
262 sc->sc_descm = &desc_methods_enhanced;
263 } else {
264 sc->sc_descm = &desc_methods_standard;
265 }
266 if (hwft & GMAC_DMA_FEAT_RMON) {
267 uint32_t val;
268
269 /* Mask all MMC interrupts */
270 val = 0xffffffff;
271 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
272 GMAC_MMC_RX_INT_MSK, val);
273 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
274 GMAC_MMC_TX_INT_MSK, val);
275 }
276
277 /*
278 * Allocate Tx and Rx rings
279 */
280 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
281 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
282 goto fail;
283 }
284
285 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
286 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
287 goto fail;
288 }
289
290 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
291 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
292 goto fail;
293 }
294
295 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
296 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
297 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
298
299 /*
300 * Prepare interface data
301 */
302 ifp->if_softc = sc;
303 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
304 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
305 #ifdef DWCGMAC_MPSAFE
306 ifp->if_extflags = IFEF_MPSAFE;
307 #endif
308 ifp->if_ioctl = dwc_gmac_ioctl;
309 ifp->if_start = dwc_gmac_start;
310 ifp->if_init = dwc_gmac_init;
311 ifp->if_stop = dwc_gmac_stop;
312 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
313 IFQ_SET_READY(&ifp->if_snd);
314
315 /*
316 * Attach MII subdevices
317 */
318 sc->sc_ec.ec_mii = &sc->sc_mii;
319 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
320 mii->mii_ifp = ifp;
321 mii->mii_readreg = dwc_gmac_miibus_read_reg;
322 mii->mii_writereg = dwc_gmac_miibus_write_reg;
323 mii->mii_statchg = dwc_gmac_miibus_statchg;
324 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
325 MIIF_DOPAUSE);
326
327 if (LIST_EMPTY(&mii->mii_phys)) {
328 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
329 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
330 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
331 } else {
332 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
333 }
334
335 /*
336 * We can support 802.1Q VLAN-sized frames.
337 */
338 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
339
340 /*
341 * Ready, attach interface
342 */
343 /* Attach the interface. */
344 if_initialize(ifp);
345 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
346 if_deferred_start_init(ifp, NULL);
347 ether_ifattach(ifp, enaddr);
348 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
349 if_register(ifp);
350 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
351 RND_TYPE_NET, RND_FLAG_DEFAULT);
352
353 /*
354 * Enable interrupts
355 */
356 mutex_enter(sc->sc_lock);
357 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
358 AWIN_DEF_MAC_INTRMASK);
359 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
360 GMAC_DEF_DMA_INT_MASK);
361 mutex_exit(sc->sc_lock);
362
363 return 0;
364
365 fail:
366 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
367 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
368 dwc_gmac_free_dma_rings(sc);
369 mutex_destroy(&sc->sc_mdio_lock);
370
371 return ENXIO;
372 }
373
374
375
376 static int
377 dwc_gmac_reset(struct dwc_gmac_softc *sc)
378 {
379 size_t cnt;
380 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
381 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
382 | GMAC_BUSMODE_RESET);
383 for (cnt = 0; cnt < 30000; cnt++) {
384 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
385 & GMAC_BUSMODE_RESET) == 0)
386 return 0;
387 delay(10);
388 }
389
390 aprint_error_dev(sc->sc_dev, "reset timed out\n");
391 return EIO;
392 }
393
394 static void
395 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
396 uint8_t enaddr[ETHER_ADDR_LEN])
397 {
398 uint32_t hi, lo;
399
400 hi = enaddr[4] | (enaddr[5] << 8);
401 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
402 | ((uint32_t)enaddr[3] << 24);
403 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
404 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
405 }
406
407 static int
408 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
409 {
410 struct dwc_gmac_softc * const sc = device_private(self);
411 uint16_t mii;
412 size_t cnt;
413
414 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
415 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
416 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
417 | GMAC_MII_BUSY;
418
419 mutex_enter(&sc->sc_mdio_lock);
420 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
421
422 for (cnt = 0; cnt < 1000; cnt++) {
423 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
424 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
425 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
426 AWIN_GMAC_MAC_MIIDATA);
427 break;
428 }
429 delay(10);
430 }
431
432 mutex_exit(&sc->sc_mdio_lock);
433
434 if (cnt >= 1000)
435 return ETIMEDOUT;
436
437 return 0;
438 }
439
440 static int
441 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
442 {
443 struct dwc_gmac_softc * const sc = device_private(self);
444 uint16_t mii;
445 size_t cnt;
446
447 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
448 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
449 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
450 | GMAC_MII_BUSY | GMAC_MII_WRITE;
451
452 mutex_enter(&sc->sc_mdio_lock);
453 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
454 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
455
456 for (cnt = 0; cnt < 1000; cnt++) {
457 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
458 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
459 break;
460 delay(10);
461 }
462
463 mutex_exit(&sc->sc_mdio_lock);
464
465 if (cnt >= 1000)
466 return ETIMEDOUT;
467
468 return 0;
469 }
470
471 static int
472 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
473 struct dwc_gmac_rx_ring *ring)
474 {
475 struct dwc_gmac_rx_data *data;
476 bus_addr_t physaddr;
477 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
478 int error, i, next;
479
480 ring->r_cur = ring->r_next = 0;
481 memset(ring->r_desc, 0, descsize);
482
483 /*
484 * Pre-allocate Rx buffers and populate Rx ring.
485 */
486 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
487 struct dwc_gmac_dev_dmadesc *desc;
488
489 data = &sc->sc_rxq.r_data[i];
490
491 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
492 if (data->rd_m == NULL) {
493 aprint_error_dev(sc->sc_dev,
494 "could not allocate rx mbuf #%d\n", i);
495 error = ENOMEM;
496 goto fail;
497 }
498 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
499 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
500 if (error != 0) {
501 aprint_error_dev(sc->sc_dev,
502 "could not create DMA map\n");
503 data->rd_map = NULL;
504 goto fail;
505 }
506 MCLGET(data->rd_m, M_DONTWAIT);
507 if (!(data->rd_m->m_flags & M_EXT)) {
508 aprint_error_dev(sc->sc_dev,
509 "could not allocate mbuf cluster #%d\n", i);
510 error = ENOMEM;
511 goto fail;
512 }
513 data->rd_m->m_len = data->rd_m->m_pkthdr.len
514 = data->rd_m->m_ext.ext_size;
515 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
516 data->rd_m->m_len = data->rd_m->m_pkthdr.len
517 = AWGE_MAX_PACKET;
518 }
519
520 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
521 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
522 if (error != 0) {
523 aprint_error_dev(sc->sc_dev,
524 "could not load rx buf DMA map #%d", i);
525 goto fail;
526 }
527 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
528 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
529 physaddr = data->rd_map->dm_segs[0].ds_addr;
530
531 desc = &sc->sc_rxq.r_desc[i];
532 desc->ddesc_data = htole32(physaddr);
533 next = RX_NEXT(i);
534 desc->ddesc_next = htole32(ring->r_physaddr
535 + next * sizeof(*desc));
536 sc->sc_descm->rx_init_flags(desc);
537 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
538 sc->sc_descm->rx_set_owned_by_dev(desc);
539 }
540
541 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
542 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
543 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
544 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
545 ring->r_physaddr);
546
547 return 0;
548
549 fail:
550 dwc_gmac_free_rx_ring(sc, ring);
551 return error;
552 }
553
554 static void
555 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
556 struct dwc_gmac_rx_ring *ring)
557 {
558 struct dwc_gmac_dev_dmadesc *desc;
559 struct dwc_gmac_rx_data *data;
560 int i;
561
562 mutex_enter(&ring->r_mtx);
563 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
564 desc = &sc->sc_rxq.r_desc[i];
565 data = &sc->sc_rxq.r_data[i];
566 sc->sc_descm->rx_init_flags(desc);
567 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
568 sc->sc_descm->rx_set_owned_by_dev(desc);
569 }
570
571 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
572 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
573 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
574
575 ring->r_cur = ring->r_next = 0;
576 /* reset DMA address to start of ring */
577 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
578 sc->sc_rxq.r_physaddr);
579 mutex_exit(&ring->r_mtx);
580 }
581
582 static int
583 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
584 {
585 const size_t descsize = AWGE_TOTAL_RING_COUNT *
586 sizeof(struct dwc_gmac_dev_dmadesc);
587 int error, nsegs;
588 void *rings;
589
590 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
591 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
592 if (error != 0) {
593 aprint_error_dev(sc->sc_dev,
594 "could not create desc DMA map\n");
595 sc->sc_dma_ring_map = NULL;
596 goto fail;
597 }
598
599 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
600 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
601 if (error != 0) {
602 aprint_error_dev(sc->sc_dev,
603 "could not map DMA memory\n");
604 goto fail;
605 }
606
607 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
608 descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
609 if (error != 0) {
610 aprint_error_dev(sc->sc_dev,
611 "could not allocate DMA memory\n");
612 goto fail;
613 }
614
615 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
616 descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
617 if (error != 0) {
618 aprint_error_dev(sc->sc_dev,
619 "could not load desc DMA map\n");
620 goto fail;
621 }
622
623 /* give first AWGE_RX_RING_COUNT to the RX side */
624 sc->sc_rxq.r_desc = rings;
625 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
626
627 /* and next rings to the TX side */
628 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
629 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
630 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
631
632 return 0;
633
634 fail:
635 dwc_gmac_free_dma_rings(sc);
636 return error;
637 }
638
639 static void
640 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
641 {
642 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
643 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
644 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
645 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
646 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
647 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
648 }
649
650 static void
651 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
652 {
653 struct dwc_gmac_rx_data *data;
654 int i;
655
656 if (ring->r_desc == NULL)
657 return;
658
659
660 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
661 data = &ring->r_data[i];
662
663 if (data->rd_map != NULL) {
664 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
665 AWGE_RX_RING_COUNT
666 * sizeof(struct dwc_gmac_dev_dmadesc),
667 BUS_DMASYNC_POSTREAD);
668 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
669 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
670 }
671 if (data->rd_m != NULL)
672 m_freem(data->rd_m);
673 }
674 }
675
676 static int
677 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
678 struct dwc_gmac_tx_ring *ring)
679 {
680 int i, error = 0;
681
682 ring->t_queued = 0;
683 ring->t_cur = ring->t_next = 0;
684
685 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
686 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
687 TX_DESC_OFFSET(0),
688 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
689 BUS_DMASYNC_POSTWRITE);
690
691 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
692 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
693 AWGE_TX_RING_COUNT, MCLBYTES, 0,
694 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
695 &ring->t_data[i].td_map);
696 if (error != 0) {
697 aprint_error_dev(sc->sc_dev,
698 "could not create TX DMA map #%d\n", i);
699 ring->t_data[i].td_map = NULL;
700 goto fail;
701 }
702 ring->t_desc[i].ddesc_next = htole32(
703 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
704 *TX_NEXT(i));
705 }
706
707 return 0;
708
709 fail:
710 dwc_gmac_free_tx_ring(sc, ring);
711 return error;
712 }
713
714 static void
715 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
716 {
717 /* 'end' is pointing one descriptor beyond the last we want to sync */
718 if (end > start) {
719 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
720 TX_DESC_OFFSET(start),
721 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
722 ops);
723 return;
724 }
725 /* sync from 'start' to end of ring */
726 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
727 TX_DESC_OFFSET(start),
728 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
729 ops);
730 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
731 /* sync from start of ring to 'end' */
732 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
733 TX_DESC_OFFSET(0),
734 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
735 ops);
736 }
737 }
738
739 static void
740 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
741 struct dwc_gmac_tx_ring *ring)
742 {
743 int i;
744
745 mutex_enter(&ring->t_mtx);
746 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
747 struct dwc_gmac_tx_data *data = &ring->t_data[i];
748
749 if (data->td_m != NULL) {
750 bus_dmamap_sync(sc->sc_dmat, data->td_active,
751 0, data->td_active->dm_mapsize,
752 BUS_DMASYNC_POSTWRITE);
753 bus_dmamap_unload(sc->sc_dmat, data->td_active);
754 m_freem(data->td_m);
755 data->td_m = NULL;
756 }
757 }
758
759 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
760 TX_DESC_OFFSET(0),
761 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
762 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
763 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
764 sc->sc_txq.t_physaddr);
765
766 ring->t_queued = 0;
767 ring->t_cur = ring->t_next = 0;
768 mutex_exit(&ring->t_mtx);
769 }
770
771 static void
772 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
773 struct dwc_gmac_tx_ring *ring)
774 {
775 int i;
776
777 /* unload the maps */
778 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
779 struct dwc_gmac_tx_data *data = &ring->t_data[i];
780
781 if (data->td_m != NULL) {
782 bus_dmamap_sync(sc->sc_dmat, data->td_active,
783 0, data->td_map->dm_mapsize,
784 BUS_DMASYNC_POSTWRITE);
785 bus_dmamap_unload(sc->sc_dmat, data->td_active);
786 m_freem(data->td_m);
787 data->td_m = NULL;
788 }
789 }
790
791 /* and actually free them */
792 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
793 struct dwc_gmac_tx_data *data = &ring->t_data[i];
794
795 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
796 }
797 }
798
799 static void
800 dwc_gmac_miibus_statchg(struct ifnet *ifp)
801 {
802 struct dwc_gmac_softc * const sc = ifp->if_softc;
803 struct mii_data * const mii = &sc->sc_mii;
804 uint32_t conf, flow;
805
806 /*
807 * Set MII or GMII interface based on the speed
808 * negotiated by the PHY.
809 */
810 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
811 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
812 | AWIN_GMAC_MAC_CONF_FULLDPLX);
813 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
814 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
815 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
816 | AWIN_GMAC_MAC_CONF_RXENABLE
817 | AWIN_GMAC_MAC_CONF_TXENABLE;
818 switch (IFM_SUBTYPE(mii->mii_media_active)) {
819 case IFM_10_T:
820 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
821 break;
822 case IFM_100_TX:
823 conf |= AWIN_GMAC_MAC_CONF_FES100 |
824 AWIN_GMAC_MAC_CONF_MIISEL;
825 break;
826 case IFM_1000_T:
827 break;
828 }
829 if (sc->sc_set_speed)
830 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
831
832 flow = 0;
833 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
834 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
835 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
836 }
837 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
838 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
839 }
840 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
841 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
842 }
843 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
844 AWIN_GMAC_MAC_FLOWCTRL, flow);
845
846 #ifdef DWC_GMAC_DEBUG
847 aprint_normal_dev(sc->sc_dev,
848 "setting MAC conf register: %08x\n", conf);
849 #endif
850
851 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
852 AWIN_GMAC_MAC_CONF, conf);
853 }
854
855 static int
856 dwc_gmac_init(struct ifnet *ifp)
857 {
858 struct dwc_gmac_softc *sc = ifp->if_softc;
859
860 mutex_enter(sc->sc_lock);
861 int ret = dwc_gmac_init_locked(ifp);
862 mutex_exit(sc->sc_lock);
863
864 return ret;
865 }
866
867 static int
868 dwc_gmac_init_locked(struct ifnet *ifp)
869 {
870 struct dwc_gmac_softc *sc = ifp->if_softc;
871 uint32_t ffilt;
872
873 if (ifp->if_flags & IFF_RUNNING)
874 return 0;
875
876 dwc_gmac_stop_locked(ifp, 0);
877
878 /*
879 * Configure DMA burst/transfer mode and RX/TX priorities.
880 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
881 */
882 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
883 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
884 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
885 __SHIFTIN(2, GMAC_BUSMODE_PBL));
886
887 /*
888 * Set up address filter
889 */
890 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
891 if (ifp->if_flags & IFF_PROMISC) {
892 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
893 } else {
894 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
895 }
896 if (ifp->if_flags & IFF_BROADCAST) {
897 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
898 } else {
899 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
900 }
901 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
902
903 /*
904 * Set up multicast filter
905 */
906 dwc_gmac_setmulti(sc);
907
908 /*
909 * Set up dma pointer for RX and TX ring
910 */
911 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
912 sc->sc_rxq.r_physaddr);
913 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
914 sc->sc_txq.t_physaddr);
915
916 /*
917 * Start RX/TX part
918 */
919 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
920 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
921 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
922 }
923 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
924
925 sc->sc_stopping = false;
926
927 ifp->if_flags |= IFF_RUNNING;
928 sc->sc_txbusy = false;
929
930 return 0;
931 }
932
933 static void
934 dwc_gmac_start(struct ifnet *ifp)
935 {
936 struct dwc_gmac_softc *sc = ifp->if_softc;
937 #ifdef DWCGMAC_MPSAFE
938 KASSERT(if_is_mpsafe(ifp));
939 #endif
940
941 mutex_enter(sc->sc_lock);
942 if (!sc->sc_stopping) {
943 mutex_enter(&sc->sc_txq.t_mtx);
944 dwc_gmac_start_locked(ifp);
945 mutex_exit(&sc->sc_txq.t_mtx);
946 }
947 mutex_exit(sc->sc_lock);
948 }
949
950 static void
951 dwc_gmac_start_locked(struct ifnet *ifp)
952 {
953 struct dwc_gmac_softc *sc = ifp->if_softc;
954 int old = sc->sc_txq.t_queued;
955 int start = sc->sc_txq.t_cur;
956 struct mbuf *m0;
957
958 if ((ifp->if_flags & IFF_RUNNING) == 0)
959 return;
960 if (sc->sc_txbusy)
961 return;
962
963 for (;;) {
964 IFQ_POLL(&ifp->if_snd, m0);
965 if (m0 == NULL)
966 break;
967 if (dwc_gmac_queue(sc, m0) != 0) {
968 sc->sc_txbusy = true;
969 break;
970 }
971 IFQ_DEQUEUE(&ifp->if_snd, m0);
972 bpf_mtap(ifp, m0, BPF_D_OUT);
973 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
974 sc->sc_txbusy = true;
975 break;
976 }
977 }
978
979 if (sc->sc_txq.t_queued != old) {
980 /* packets have been queued, kick it off */
981 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
982 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
983
984 #ifdef DWC_GMAC_DEBUG
985 dwc_dump_status(sc);
986 #endif
987 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
988 AWIN_GMAC_DMA_TXPOLL, ~0U);
989 }
990 }
991
992 static void
993 dwc_gmac_stop(struct ifnet *ifp, int disable)
994 {
995 struct dwc_gmac_softc *sc = ifp->if_softc;
996
997 mutex_enter(sc->sc_lock);
998 dwc_gmac_stop_locked(ifp, disable);
999 mutex_exit(sc->sc_lock);
1000 }
1001
1002 static void
1003 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1004 {
1005 struct dwc_gmac_softc *sc = ifp->if_softc;
1006
1007 sc->sc_stopping = true;
1008
1009 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1010 AWIN_GMAC_DMA_OPMODE,
1011 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1012 AWIN_GMAC_DMA_OPMODE)
1013 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1014 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1015 AWIN_GMAC_DMA_OPMODE,
1016 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1017 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1018
1019 mii_down(&sc->sc_mii);
1020 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1021 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1022
1023 ifp->if_flags &= ~IFF_RUNNING;
1024 sc->sc_txbusy = false;
1025 }
1026
1027 /*
1028 * Add m0 to the TX ring
1029 */
1030 static int
1031 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1032 {
1033 struct dwc_gmac_dev_dmadesc *desc = NULL;
1034 struct dwc_gmac_tx_data *data = NULL;
1035 bus_dmamap_t map;
1036 int error, i, first;
1037
1038 #ifdef DWC_GMAC_DEBUG
1039 aprint_normal_dev(sc->sc_dev,
1040 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1041 #endif
1042
1043 first = sc->sc_txq.t_cur;
1044 map = sc->sc_txq.t_data[first].td_map;
1045
1046 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1047 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1048 if (error != 0) {
1049 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1050 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1051 return error;
1052 }
1053
1054 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1055 bus_dmamap_unload(sc->sc_dmat, map);
1056 return ENOBUFS;
1057 }
1058
1059 for (i = 0; i < map->dm_nsegs; i++) {
1060 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1061 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1062
1063 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1064
1065 #ifdef DWC_GMAC_DEBUG
1066 aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1067 "len %lu\n", sc->sc_txq.t_cur,
1068 (unsigned long)map->dm_segs[i].ds_addr,
1069 (unsigned long)map->dm_segs[i].ds_len);
1070 #endif
1071
1072 sc->sc_descm->tx_init_flags(desc);
1073 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1074
1075 if (i == 0)
1076 sc->sc_descm->tx_set_first_frag(desc);
1077
1078 /*
1079 * Defer passing ownership of the first descriptor
1080 * until we are done.
1081 */
1082 if (i != 0)
1083 sc->sc_descm->tx_set_owned_by_dev(desc);
1084
1085 sc->sc_txq.t_queued++;
1086 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1087 }
1088
1089 sc->sc_descm->tx_set_last_frag(desc);
1090
1091 data->td_m = m0;
1092 data->td_active = map;
1093
1094 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1095 BUS_DMASYNC_PREWRITE);
1096
1097 /* Pass first to device */
1098 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1099
1100 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1101 BUS_DMASYNC_PREWRITE);
1102
1103 return 0;
1104 }
1105
1106 /*
1107 * If the interface is up and running, only modify the receive
1108 * filter when setting promiscuous or debug mode. Otherwise fall
1109 * through to ether_ioctl, which will reset the chip.
1110 */
1111 static int
1112 dwc_gmac_ifflags_cb(struct ethercom *ec)
1113 {
1114 struct ifnet *ifp = &ec->ec_if;
1115 struct dwc_gmac_softc *sc = ifp->if_softc;
1116 int ret = 0;
1117
1118 mutex_enter(sc->sc_lock);
1119 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1120 sc->sc_if_flags = ifp->if_flags;
1121
1122 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1123 ret = ENETRESET;
1124 goto out;
1125 }
1126 if ((change & IFF_PROMISC) != 0) {
1127 dwc_gmac_setmulti(sc);
1128 }
1129 out:
1130 mutex_exit(sc->sc_lock);
1131
1132 return ret;
1133 }
1134
1135 static int
1136 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1137 {
1138 struct dwc_gmac_softc *sc = ifp->if_softc;
1139 int error = 0;
1140
1141 int s = splnet();
1142 error = ether_ioctl(ifp, cmd, data);
1143
1144 #ifdef DWCGMAC_MPSAFE
1145 splx(s);
1146 #endif
1147
1148 if (error == ENETRESET) {
1149 error = 0;
1150 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1151 ;
1152 else if (ifp->if_flags & IFF_RUNNING) {
1153 /*
1154 * Multicast list has changed; set the hardware filter
1155 * accordingly.
1156 */
1157 mutex_enter(sc->sc_lock);
1158 dwc_gmac_setmulti(sc);
1159 mutex_exit(sc->sc_lock);
1160 }
1161 }
1162
1163 /* Try to get things going again */
1164 if (ifp->if_flags & IFF_UP)
1165 dwc_gmac_start(ifp);
1166 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1167
1168 #ifndef DWCGMAC_MPSAFE
1169 splx(s);
1170 #endif
1171
1172 return error;
1173 }
1174
1175 static void
1176 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1177 {
1178 struct ifnet *ifp = &sc->sc_ec.ec_if;
1179 struct dwc_gmac_tx_data *data;
1180 struct dwc_gmac_dev_dmadesc *desc;
1181 int i, nsegs;
1182
1183 mutex_enter(&sc->sc_txq.t_mtx);
1184
1185 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1186 #ifdef DWC_GMAC_DEBUG
1187 aprint_normal_dev(sc->sc_dev,
1188 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1189 i, sc->sc_txq.t_queued);
1190 #endif
1191
1192 /*
1193 * i + 1 does not need to be a valid descriptor,
1194 * this is just a special notion to just sync
1195 * a single tx descriptor (i)
1196 */
1197 dwc_gmac_txdesc_sync(sc, i, i + 1,
1198 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1199
1200 desc = &sc->sc_txq.t_desc[i];
1201 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1202 break;
1203
1204 data = &sc->sc_txq.t_data[i];
1205 if (data->td_m == NULL)
1206 continue;
1207
1208 if_statinc(ifp, if_opackets);
1209 nsegs = data->td_active->dm_nsegs;
1210 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1211 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1212 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1213
1214 #ifdef DWC_GMAC_DEBUG
1215 aprint_normal_dev(sc->sc_dev,
1216 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1217 "freeing mbuf %p\n", i, data->td_m);
1218 #endif
1219
1220 m_freem(data->td_m);
1221 data->td_m = NULL;
1222
1223 sc->sc_txq.t_queued -= nsegs;
1224 }
1225
1226 sc->sc_txq.t_next = i;
1227
1228 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1229 sc->sc_txbusy = false;
1230 }
1231 mutex_exit(&sc->sc_txq.t_mtx);
1232 }
1233
1234 static void
1235 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1236 {
1237 struct ifnet *ifp = &sc->sc_ec.ec_if;
1238 struct dwc_gmac_dev_dmadesc *desc;
1239 struct dwc_gmac_rx_data *data;
1240 bus_addr_t physaddr;
1241 struct mbuf *m, *mnew;
1242 int i, len, error;
1243
1244 mutex_enter(&sc->sc_rxq.r_mtx);
1245 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1246 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1247 RX_DESC_OFFSET(i), sizeof(*desc),
1248 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1249 desc = &sc->sc_rxq.r_desc[i];
1250 data = &sc->sc_rxq.r_data[i];
1251
1252 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1253 break;
1254
1255 if (sc->sc_descm->rx_has_error(desc)) {
1256 #ifdef DWC_GMAC_DEBUG
1257 aprint_normal_dev(sc->sc_dev,
1258 "RX error: descriptor status %08x, skipping\n",
1259 le32toh(desc->ddesc_status0));
1260 #endif
1261 if_statinc(ifp, if_ierrors);
1262 goto skip;
1263 }
1264
1265 len = sc->sc_descm->rx_get_len(desc);
1266
1267 #ifdef DWC_GMAC_DEBUG
1268 aprint_normal_dev(sc->sc_dev,
1269 "rx int: device is done with descriptor #%d, len: %d\n",
1270 i, len);
1271 #endif
1272
1273 /*
1274 * Try to get a new mbuf before passing this one
1275 * up, if that fails, drop the packet and reuse
1276 * the existing one.
1277 */
1278 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1279 if (mnew == NULL) {
1280 if_statinc(ifp, if_ierrors);
1281 goto skip;
1282 }
1283 MCLGET(mnew, M_DONTWAIT);
1284 if ((mnew->m_flags & M_EXT) == 0) {
1285 m_freem(mnew);
1286 if_statinc(ifp, if_ierrors);
1287 goto skip;
1288 }
1289 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1290 if (mnew->m_len > AWGE_MAX_PACKET) {
1291 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1292 }
1293
1294 /* unload old DMA map */
1295 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1296 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1297 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1298
1299 /* and reload with new mbuf */
1300 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1301 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1302 if (error != 0) {
1303 m_freem(mnew);
1304 /* try to reload old mbuf */
1305 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1306 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1307 if (error != 0) {
1308 panic("%s: could not load old rx mbuf",
1309 device_xname(sc->sc_dev));
1310 }
1311 if_statinc(ifp, if_ierrors);
1312 goto skip;
1313 }
1314 physaddr = data->rd_map->dm_segs[0].ds_addr;
1315
1316 /*
1317 * New mbuf loaded, update RX ring and continue
1318 */
1319 m = data->rd_m;
1320 data->rd_m = mnew;
1321 desc->ddesc_data = htole32(physaddr);
1322
1323 /* finalize mbuf */
1324 m->m_pkthdr.len = m->m_len = len;
1325 m_set_rcvif(m, ifp);
1326 m->m_flags |= M_HASFCS;
1327
1328 if_percpuq_enqueue(sc->sc_ipq, m);
1329
1330 skip:
1331 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1332 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1333
1334 sc->sc_descm->rx_init_flags(desc);
1335 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1336 sc->sc_descm->rx_set_owned_by_dev(desc);
1337
1338 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1339 RX_DESC_OFFSET(i), sizeof(*desc),
1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1341 }
1342
1343 /* update RX pointer */
1344 sc->sc_rxq.r_cur = i;
1345
1346 mutex_exit(&sc->sc_rxq.r_mtx);
1347 }
1348
1349 /*
1350 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1351 */
1352 static uint32_t
1353 bitrev32(uint32_t x)
1354 {
1355 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1356 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1357 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1358 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1359
1360 return (x >> 16) | (x << 16);
1361 }
1362
1363 static void
1364 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1365 {
1366 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1367 struct ether_multi *enm;
1368 struct ether_multistep step;
1369 struct ethercom *ec = &sc->sc_ec;
1370 uint32_t hashes[2] = { 0, 0 };
1371 uint32_t ffilt, h;
1372 int mcnt;
1373
1374 KASSERT(mutex_owned(sc->sc_lock));
1375
1376 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1377
1378 if (ifp->if_flags & IFF_PROMISC) {
1379 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1380 goto special_filter;
1381 }
1382
1383 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1384
1385 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1386 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1387
1388 ETHER_LOCK(ec);
1389 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1390 ETHER_FIRST_MULTI(step, ec, enm);
1391 mcnt = 0;
1392 while (enm != NULL) {
1393 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1394 ETHER_ADDR_LEN) != 0) {
1395 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1396 ec->ec_flags |= ETHER_F_ALLMULTI;
1397 ETHER_UNLOCK(ec);
1398 goto special_filter;
1399 }
1400
1401 h = bitrev32(
1402 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1403 ) >> 26;
1404 hashes[h >> 5] |= (1 << (h & 0x1f));
1405
1406 mcnt++;
1407 ETHER_NEXT_MULTI(step, enm);
1408 }
1409 ETHER_UNLOCK(ec);
1410
1411 if (mcnt)
1412 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1413 else
1414 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1415
1416 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1417 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1418 hashes[0]);
1419 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1420 hashes[1]);
1421 sc->sc_if_flags = ifp->if_flags;
1422
1423 #ifdef DWC_GMAC_DEBUG
1424 dwc_gmac_dump_ffilt(sc, ffilt);
1425 #endif
1426 return;
1427
1428 special_filter:
1429 #ifdef DWC_GMAC_DEBUG
1430 dwc_gmac_dump_ffilt(sc, ffilt);
1431 #endif
1432 /* no MAC hashes, ALLMULTI or PROMISC */
1433 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1434 ffilt);
1435 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1436 0xffffffff);
1437 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1438 0xffffffff);
1439 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1440 }
1441
1442 int
1443 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1444 {
1445 uint32_t status, dma_status;
1446 int rv = 0;
1447
1448 if (sc->sc_stopping)
1449 return 0;
1450
1451 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1452 if (status & AWIN_GMAC_MII_IRQ) {
1453 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1454 AWIN_GMAC_MII_STATUS);
1455 rv = 1;
1456 mii_pollstat(&sc->sc_mii);
1457 }
1458
1459 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1460 AWIN_GMAC_DMA_STATUS);
1461
1462 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1463 rv = 1;
1464
1465 if (dma_status & GMAC_DMA_INT_TIE)
1466 dwc_gmac_tx_intr(sc);
1467
1468 if (dma_status & GMAC_DMA_INT_RIE)
1469 dwc_gmac_rx_intr(sc);
1470
1471 /*
1472 * Check error conditions
1473 */
1474 if (dma_status & GMAC_DMA_INT_ERRORS) {
1475 if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1476 #ifdef DWC_GMAC_DEBUG
1477 dwc_dump_and_abort(sc, "interrupt error condition");
1478 #endif
1479 }
1480
1481 rnd_add_uint32(&sc->rnd_source, dma_status);
1482
1483 /* ack interrupt */
1484 if (dma_status)
1485 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1486 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1487
1488 /*
1489 * Get more packets
1490 */
1491 if (rv)
1492 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1493
1494 return rv;
1495 }
1496
1497 static void
1498 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1499 {
1500
1501 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1502 }
1503
1504 static int
1505 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1506 {
1507
1508 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1509 }
1510
1511 static void
1512 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1513 {
1514 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1515
1516 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1517 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1518 }
1519
1520 static uint32_t
1521 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1522 {
1523
1524 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1525 }
1526
1527 static void
1528 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1529 {
1530
1531 desc->ddesc_status0 = 0;
1532 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1533 }
1534
1535 static void
1536 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1537 {
1538 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1539
1540 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1541 }
1542
1543 static void
1544 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1545 {
1546 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1547
1548 desc->ddesc_cntl1 = htole32(cntl |
1549 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1550 }
1551
1552 static void
1553 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1554 {
1555
1556 desc->ddesc_status0 = 0;
1557 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1558 }
1559
1560 static int
1561 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1562 return !!(le32toh(desc->ddesc_status0) &
1563 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1564 }
1565
1566 static void
1567 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1568 {
1569 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1570
1571 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1572 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1573 }
1574
1575 static uint32_t
1576 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1577 {
1578
1579 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1580 }
1581
1582 static void
1583 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1584 {
1585
1586 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1587 desc->ddesc_cntl1 = 0;
1588 }
1589
1590 static void
1591 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1592 {
1593 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1594
1595 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1596 }
1597
1598 static void
1599 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1600 {
1601 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1602
1603 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1604 }
1605
1606 static void
1607 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1608 {
1609
1610 desc->ddesc_status0 = 0;
1611 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1612 }
1613
1614 static int
1615 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1616 {
1617
1618 return !!(le32toh(desc->ddesc_status0) &
1619 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1620 }
1621
1622 #ifdef DWC_GMAC_DEBUG
1623 static void
1624 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1625 {
1626 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1627 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1628 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1629 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1630 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1631 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1632 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1633 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1634 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1635 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1636 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1637 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1638 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1639 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1640 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1641 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1642 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1643 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1644 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1645 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1646 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1647 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1648 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1649 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1650 }
1651
1652 static void
1653 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1654 {
1655 int i;
1656
1657 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1658 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1659 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1660 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1661 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1662 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1663 "data: %08x next: %08x\n",
1664 i, sc->sc_txq.t_physaddr +
1665 i * sizeof(struct dwc_gmac_dev_dmadesc),
1666 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1667 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1668 }
1669 }
1670
1671 static void
1672 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1673 {
1674 int i;
1675
1676 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1677 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1678 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1679 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1680 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1681 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1682 "data: %08x next: %08x\n",
1683 i, sc->sc_rxq.r_physaddr +
1684 i * sizeof(struct dwc_gmac_dev_dmadesc),
1685 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1686 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1687 }
1688 }
1689
1690 static void
1691 dwc_dump_status(struct dwc_gmac_softc *sc)
1692 {
1693 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1694 AWIN_GMAC_MAC_INTR);
1695 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1696 AWIN_GMAC_DMA_STATUS);
1697 char buf[200];
1698
1699 /* print interrupt state */
1700 snprintb(buf, sizeof(buf), "\177\20"
1701 "b\x10""NI\0"
1702 "b\x0f""AI\0"
1703 "b\x0e""ER\0"
1704 "b\x0d""FB\0"
1705 "b\x0a""ET\0"
1706 "b\x09""RW\0"
1707 "b\x08""RS\0"
1708 "b\x07""RU\0"
1709 "b\x06""RI\0"
1710 "b\x05""UN\0"
1711 "b\x04""OV\0"
1712 "b\x03""TJ\0"
1713 "b\x02""TU\0"
1714 "b\x01""TS\0"
1715 "b\x00""TI\0"
1716 "\0", dma_status);
1717 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1718 status, buf);
1719 }
1720
1721 static void
1722 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1723 {
1724 dwc_dump_status(sc);
1725 dwc_gmac_dump_ffilt(sc,
1726 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1727 dwc_gmac_dump_dma(sc);
1728 dwc_gmac_dump_tx_desc(sc);
1729 dwc_gmac_dump_rx_desc(sc);
1730
1731 panic("%s", msg);
1732 }
1733
1734 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1735 {
1736 char buf[200];
1737
1738 /* print filter setup */
1739 snprintb(buf, sizeof(buf), "\177\20"
1740 "b\x1f""RA\0"
1741 "b\x0a""HPF\0"
1742 "b\x09""SAF\0"
1743 "b\x08""SAIF\0"
1744 "b\x05""DBF\0"
1745 "b\x04""PM\0"
1746 "b\x03""DAIF\0"
1747 "b\x02""HMC\0"
1748 "b\x01""HUC\0"
1749 "b\x00""PR\0"
1750 "\0", ffilt);
1751 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1752 }
1753 #endif
1754