dwc_gmac.c revision 1.92 1 /* $NetBSD: dwc_gmac.c,v 1.92 2024/08/10 12:11:14 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.92 2024/08/10 12:11:14 skrll Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #endif
51
52 #include <sys/param.h>
53 #include <sys/bus.h>
54 #include <sys/device.h>
55 #include <sys/intr.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/cprng.h>
59 #include <sys/rndsource.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
89 static int dwc_gmac_init(struct ifnet *);
90 static int dwc_gmac_init_locked(struct ifnet *);
91 static void dwc_gmac_stop(struct ifnet *, int);
92 static void dwc_gmac_stop_locked(struct ifnet *, int);
93 static void dwc_gmac_start(struct ifnet *);
94 static void dwc_gmac_start_locked(struct ifnet *);
95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
100 static int dwc_gmac_ifflags_cb(struct ethercom *);
101 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
102 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
103 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
104 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
106 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
109 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
111 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
113 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
116 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
117
118 static const struct dwc_gmac_desc_methods desc_methods_standard = {
119 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
120 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
121 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
122 .tx_set_len = dwc_gmac_desc_std_set_len,
123 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
124 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
125 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
126 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
127 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
128 .rx_set_len = dwc_gmac_desc_std_set_len,
129 .rx_get_len = dwc_gmac_desc_std_get_len,
130 .rx_has_error = dwc_gmac_desc_std_rx_has_error
131 };
132
133 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
134 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
135 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
136 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
137 .tx_set_len = dwc_gmac_desc_enh_set_len,
138 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
139 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
140 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
141 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
142 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
143 .rx_set_len = dwc_gmac_desc_enh_set_len,
144 .rx_get_len = dwc_gmac_desc_enh_get_len,
145 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
146 };
147
148
149 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT + (N)) \
150 * sizeof(struct dwc_gmac_dev_dmadesc))
151 #define TX_NEXT(N) (((N) + 1) & (AWGE_TX_RING_COUNT - 1))
152
153 #define RX_DESC_OFFSET(N) ((N) * sizeof(struct dwc_gmac_dev_dmadesc))
154 #define RX_NEXT(N) (((N) + 1) & (AWGE_RX_RING_COUNT - 1))
155
156
157
158 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
159 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
160 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
161
162 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
163 GMAC_DMA_INT_FBE | \
164 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
165 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
166 GMAC_DMA_INT_TJE)
167
168 #define AWIN_DEF_MAC_INTRMASK \
169 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
170 AWIN_GMAC_MAC_INT_LINKCHG)
171
172 #ifdef DWC_GMAC_DEBUG
173 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
174 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
175 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
176 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
177 static void dwc_dump_status(struct dwc_gmac_softc *);
178 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
179 #endif
180
181 int
182 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
183 {
184 uint8_t enaddr[ETHER_ADDR_LEN];
185 uint32_t maclo, machi, hwft;
186 struct mii_data * const mii = &sc->sc_mii;
187 struct ifnet * const ifp = &sc->sc_ec.ec_if;
188 prop_dictionary_t dict;
189
190 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
191 sc->sc_mii_clk = mii_clk & 7;
192
193 dict = device_properties(sc->sc_dev);
194 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
195 if (ea != NULL) {
196 /*
197 * If the MAC address is overridden by a device property,
198 * use that.
199 */
200 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
201 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
202 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
203 } else {
204 /*
205 * If we did not get an externaly configure address,
206 * try to read one from the current filter setup,
207 * before resetting the chip.
208 */
209 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
210 AWIN_GMAC_MAC_ADDR0LO);
211 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212 AWIN_GMAC_MAC_ADDR0HI);
213
214 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
215 /* fake MAC address */
216 maclo = 0x00f2 | (cprng_strong32() << 16);
217 machi = cprng_strong32();
218 }
219
220 enaddr[0] = maclo & 0x0ff;
221 enaddr[1] = (maclo >> 8) & 0x0ff;
222 enaddr[2] = (maclo >> 16) & 0x0ff;
223 enaddr[3] = (maclo >> 24) & 0x0ff;
224 enaddr[4] = machi & 0x0ff;
225 enaddr[5] = (machi >> 8) & 0x0ff;
226 }
227
228 const uint32_t ver =
229 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
230 const uint32_t snpsver =
231 __SHIFTOUT(ver, AWIN_GMAC_MAC_VERSION_SNPSVER_MASK);
232 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", snpsver);
233
234 /*
235 * Init chip and do initial setup
236 */
237 if (dwc_gmac_reset(sc) != 0)
238 return ENXIO; /* not much to cleanup, haven't attached yet */
239 dwc_gmac_write_hwaddr(sc, enaddr);
240 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 ether_sprintf(enaddr));
242
243 hwft = 0;
244 if (snpsver >= 0x35) {
245 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 AWIN_GMAC_DMA_HWFEATURES);
247 aprint_normal_dev(sc->sc_dev,
248 "HW feature mask: %x\n", hwft);
249 }
250
251 if (sizeof(bus_addr_t) > 4) {
252 int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
253 &sc->sc_dmat, BUS_DMA_WAITOK);
254 if (error != 0) {
255 aprint_error_dev(sc->sc_dev,
256 "failed to create DMA subregion\n");
257 return ENOMEM;
258 }
259 }
260
261 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
262 aprint_normal_dev(sc->sc_dev,
263 "Using enhanced descriptor format\n");
264 sc->sc_descm = &desc_methods_enhanced;
265 } else {
266 sc->sc_descm = &desc_methods_standard;
267 }
268 if (hwft & GMAC_DMA_FEAT_RMON) {
269 uint32_t val;
270
271 /* Mask all MMC interrupts */
272 val = 0xffffffff;
273 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
274 GMAC_MMC_RX_INT_MSK, val);
275 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
276 GMAC_MMC_TX_INT_MSK, val);
277 }
278
279 /*
280 * Allocate Tx and Rx rings
281 */
282 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
283 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
284 goto fail;
285 }
286
287 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
288 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
289 goto fail;
290 }
291
292 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
293 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
294 goto fail;
295 }
296
297 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
298 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
299 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
300
301 /*
302 * Prepare interface data
303 */
304 ifp->if_softc = sc;
305 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
306 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
307 #ifdef DWCGMAC_MPSAFE
308 ifp->if_extflags = IFEF_MPSAFE;
309 #endif
310 ifp->if_ioctl = dwc_gmac_ioctl;
311 ifp->if_start = dwc_gmac_start;
312 ifp->if_init = dwc_gmac_init;
313 ifp->if_stop = dwc_gmac_stop;
314 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
315 IFQ_SET_READY(&ifp->if_snd);
316
317 /*
318 * Attach MII subdevices
319 */
320 sc->sc_ec.ec_mii = &sc->sc_mii;
321 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
322 mii->mii_ifp = ifp;
323 mii->mii_readreg = dwc_gmac_miibus_read_reg;
324 mii->mii_writereg = dwc_gmac_miibus_write_reg;
325 mii->mii_statchg = dwc_gmac_miibus_statchg;
326 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
327 MIIF_DOPAUSE);
328
329 if (LIST_EMPTY(&mii->mii_phys)) {
330 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
331 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
332 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
333 } else {
334 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
335 }
336
337 /*
338 * We can support 802.1Q VLAN-sized frames.
339 */
340 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
341
342 /*
343 * Ready, attach interface
344 */
345 /* Attach the interface. */
346 if_initialize(ifp);
347 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
348 if_deferred_start_init(ifp, NULL);
349 ether_ifattach(ifp, enaddr);
350 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
351 if_register(ifp);
352 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
353 RND_TYPE_NET, RND_FLAG_DEFAULT);
354
355 /*
356 * Enable interrupts
357 */
358 mutex_enter(sc->sc_lock);
359 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
360 AWIN_DEF_MAC_INTRMASK);
361 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
362 GMAC_DEF_DMA_INT_MASK);
363 mutex_exit(sc->sc_lock);
364
365 return 0;
366
367 fail:
368 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
369 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
370 dwc_gmac_free_dma_rings(sc);
371 mutex_destroy(&sc->sc_mdio_lock);
372
373 return ENXIO;
374 }
375
376
377
378 static int
379 dwc_gmac_reset(struct dwc_gmac_softc *sc)
380 {
381 size_t cnt;
382 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
383 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
384 | GMAC_BUSMODE_RESET);
385 for (cnt = 0; cnt < 30000; cnt++) {
386 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
387 & GMAC_BUSMODE_RESET) == 0)
388 return 0;
389 delay(10);
390 }
391
392 aprint_error_dev(sc->sc_dev, "reset timed out\n");
393 return EIO;
394 }
395
396 static void
397 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
398 uint8_t enaddr[ETHER_ADDR_LEN])
399 {
400 uint32_t hi, lo;
401
402 hi = enaddr[4] | (enaddr[5] << 8);
403 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
404 | ((uint32_t)enaddr[3] << 24);
405 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
406 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
407 }
408
409 static int
410 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
411 {
412 struct dwc_gmac_softc * const sc = device_private(self);
413 uint16_t mii;
414 size_t cnt;
415
416 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
417 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
418 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
419 | GMAC_MII_BUSY;
420
421 mutex_enter(&sc->sc_mdio_lock);
422 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
423
424 for (cnt = 0; cnt < 1000; cnt++) {
425 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
426 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
427 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
428 AWIN_GMAC_MAC_MIIDATA);
429 break;
430 }
431 delay(10);
432 }
433
434 mutex_exit(&sc->sc_mdio_lock);
435
436 if (cnt >= 1000)
437 return ETIMEDOUT;
438
439 return 0;
440 }
441
442 static int
443 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
444 {
445 struct dwc_gmac_softc * const sc = device_private(self);
446 uint16_t mii;
447 size_t cnt;
448
449 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
450 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
451 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
452 | GMAC_MII_BUSY | GMAC_MII_WRITE;
453
454 mutex_enter(&sc->sc_mdio_lock);
455 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
456 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
457
458 for (cnt = 0; cnt < 1000; cnt++) {
459 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
460 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
461 break;
462 delay(10);
463 }
464
465 mutex_exit(&sc->sc_mdio_lock);
466
467 if (cnt >= 1000)
468 return ETIMEDOUT;
469
470 return 0;
471 }
472
473 static int
474 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
475 struct dwc_gmac_rx_ring *ring)
476 {
477 struct dwc_gmac_rx_data *data;
478 bus_addr_t physaddr;
479 const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
480 int error, i, next;
481
482 ring->r_cur = ring->r_next = 0;
483 memset(ring->r_desc, 0, rxringsz);
484
485 /*
486 * Pre-allocate Rx buffers and populate Rx ring.
487 */
488 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
489 struct dwc_gmac_dev_dmadesc *desc;
490
491 data = &sc->sc_rxq.r_data[i];
492
493 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
494 if (data->rd_m == NULL) {
495 aprint_error_dev(sc->sc_dev,
496 "could not allocate rx mbuf #%d\n", i);
497 error = ENOMEM;
498 goto fail;
499 }
500 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
501 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
502 if (error != 0) {
503 aprint_error_dev(sc->sc_dev,
504 "could not create DMA map\n");
505 data->rd_map = NULL;
506 goto fail;
507 }
508 MCLGET(data->rd_m, M_DONTWAIT);
509 if (!(data->rd_m->m_flags & M_EXT)) {
510 aprint_error_dev(sc->sc_dev,
511 "could not allocate mbuf cluster #%d\n", i);
512 error = ENOMEM;
513 goto fail;
514 }
515 data->rd_m->m_len = data->rd_m->m_pkthdr.len
516 = data->rd_m->m_ext.ext_size;
517 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
518 data->rd_m->m_len = data->rd_m->m_pkthdr.len
519 = AWGE_MAX_PACKET;
520 }
521
522 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
523 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
524 if (error != 0) {
525 aprint_error_dev(sc->sc_dev,
526 "could not load rx buf DMA map #%d", i);
527 goto fail;
528 }
529 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
530 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
531 physaddr = data->rd_map->dm_segs[0].ds_addr;
532
533 desc = &sc->sc_rxq.r_desc[i];
534 desc->ddesc_data = htole32(physaddr);
535 next = RX_NEXT(i);
536 desc->ddesc_next = htole32(ring->r_physaddr
537 + next * sizeof(*desc));
538 sc->sc_descm->rx_init_flags(desc);
539 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
540 sc->sc_descm->rx_set_owned_by_dev(desc);
541 }
542
543 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
544 RX_DESC_OFFSET(0),
545 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
546 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
547 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
548 ring->r_physaddr);
549
550 return 0;
551
552 fail:
553 dwc_gmac_free_rx_ring(sc, ring);
554 return error;
555 }
556
557 static void
558 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
559 struct dwc_gmac_rx_ring *ring)
560 {
561 struct dwc_gmac_dev_dmadesc *desc;
562 struct dwc_gmac_rx_data *data;
563 int i;
564
565 mutex_enter(&ring->r_mtx);
566 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
567 desc = &sc->sc_rxq.r_desc[i];
568 data = &sc->sc_rxq.r_data[i];
569 sc->sc_descm->rx_init_flags(desc);
570 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
571 sc->sc_descm->rx_set_owned_by_dev(desc);
572 }
573
574 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
575 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
576 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
577
578 ring->r_cur = ring->r_next = 0;
579 /* reset DMA address to start of ring */
580 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
581 sc->sc_rxq.r_physaddr);
582 mutex_exit(&ring->r_mtx);
583 }
584
585 static int
586 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
587 {
588 const size_t ringsize = AWGE_TOTAL_RING_COUNT *
589 sizeof(struct dwc_gmac_dev_dmadesc);
590 int error, nsegs;
591 void *rings;
592
593 error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
594 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
595 if (error != 0) {
596 aprint_error_dev(sc->sc_dev,
597 "could not create desc DMA map\n");
598 sc->sc_dma_ring_map = NULL;
599 goto fail;
600 }
601
602 error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
603 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
604 if (error != 0) {
605 aprint_error_dev(sc->sc_dev,
606 "could not map DMA memory\n");
607 goto fail;
608 }
609
610 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
611 ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
612 if (error != 0) {
613 aprint_error_dev(sc->sc_dev,
614 "could not allocate DMA memory\n");
615 goto fail;
616 }
617
618 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
619 ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
620 if (error != 0) {
621 aprint_error_dev(sc->sc_dev,
622 "could not load desc DMA map\n");
623 goto fail;
624 }
625
626 /* give first AWGE_RX_RING_COUNT to the RX side */
627 sc->sc_rxq.r_desc = rings;
628 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
629
630 /* and next rings to the TX side */
631 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
632 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
633 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
634
635 return 0;
636
637 fail:
638 dwc_gmac_free_dma_rings(sc);
639 return error;
640 }
641
642 static void
643 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
644 {
645 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
646 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
647 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
648 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
649 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
650 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
651 }
652
653 static void
654 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
655 {
656 struct dwc_gmac_rx_data *data;
657 int i;
658
659 if (ring->r_desc == NULL)
660 return;
661
662 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
663 data = &ring->r_data[i];
664
665 if (data->rd_map != NULL) {
666 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
667 AWGE_RX_RING_COUNT
668 * sizeof(struct dwc_gmac_dev_dmadesc),
669 BUS_DMASYNC_POSTREAD);
670 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
671 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
672 }
673 m_freem(data->rd_m);
674 }
675 }
676
677 static int
678 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
679 struct dwc_gmac_tx_ring *ring)
680 {
681 int i, error = 0;
682
683 ring->t_queued = 0;
684 ring->t_cur = ring->t_next = 0;
685
686 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
687 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
688 TX_DESC_OFFSET(0),
689 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
690 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
691
692 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
693 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
694 AWGE_TX_RING_COUNT, MCLBYTES, 0,
695 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
696 &ring->t_data[i].td_map);
697 if (error != 0) {
698 aprint_error_dev(sc->sc_dev,
699 "could not create TX DMA map #%d\n", i);
700 ring->t_data[i].td_map = NULL;
701 goto fail;
702 }
703 ring->t_desc[i].ddesc_next = htole32(
704 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
705 * TX_NEXT(i));
706 }
707 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
708 TX_DESC_OFFSET(0),
709 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
711
712 return 0;
713
714 fail:
715 dwc_gmac_free_tx_ring(sc, ring);
716 return error;
717 }
718
719 static void
720 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
721 {
722 /* 'end' is pointing one descriptor beyond the last we want to sync */
723 if (end > start) {
724 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
725 TX_DESC_OFFSET(start),
726 TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
727 ops);
728 return;
729 }
730 /* sync from 'start' to end of ring */
731 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
732 TX_DESC_OFFSET(start),
733 TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
734 ops);
735 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
736 /* sync from start of ring to 'end' */
737 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
738 TX_DESC_OFFSET(0),
739 TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
740 ops);
741 }
742 }
743
744 static void
745 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
746 struct dwc_gmac_tx_ring *ring)
747 {
748 int i;
749
750 mutex_enter(&ring->t_mtx);
751 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
752 struct dwc_gmac_tx_data *data = &ring->t_data[i];
753
754 if (data->td_m != NULL) {
755 bus_dmamap_sync(sc->sc_dmat, data->td_active,
756 0, data->td_active->dm_mapsize,
757 BUS_DMASYNC_POSTWRITE);
758 bus_dmamap_unload(sc->sc_dmat, data->td_active);
759 m_freem(data->td_m);
760 data->td_m = NULL;
761 }
762 }
763
764 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
765 TX_DESC_OFFSET(0),
766 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
767 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
768 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
769 sc->sc_txq.t_physaddr);
770
771 ring->t_queued = 0;
772 ring->t_cur = ring->t_next = 0;
773 mutex_exit(&ring->t_mtx);
774 }
775
776 static void
777 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
778 struct dwc_gmac_tx_ring *ring)
779 {
780 int i;
781
782 /* unload the maps */
783 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
784 struct dwc_gmac_tx_data *data = &ring->t_data[i];
785
786 if (data->td_m != NULL) {
787 bus_dmamap_sync(sc->sc_dmat, data->td_active,
788 0, data->td_map->dm_mapsize,
789 BUS_DMASYNC_POSTWRITE);
790 bus_dmamap_unload(sc->sc_dmat, data->td_active);
791 m_freem(data->td_m);
792 data->td_m = NULL;
793 }
794 }
795
796 /* and actually free them */
797 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
798 struct dwc_gmac_tx_data *data = &ring->t_data[i];
799
800 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
801 }
802 }
803
804 static void
805 dwc_gmac_miibus_statchg(struct ifnet *ifp)
806 {
807 struct dwc_gmac_softc * const sc = ifp->if_softc;
808 struct mii_data * const mii = &sc->sc_mii;
809 uint32_t conf, flow;
810
811 /*
812 * Set MII or GMII interface based on the speed
813 * negotiated by the PHY.
814 */
815 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
816 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
817 | AWIN_GMAC_MAC_CONF_FULLDPLX);
818 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
819 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
820 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
821 | AWIN_GMAC_MAC_CONF_RXENABLE
822 | AWIN_GMAC_MAC_CONF_TXENABLE;
823 switch (IFM_SUBTYPE(mii->mii_media_active)) {
824 case IFM_10_T:
825 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
826 break;
827 case IFM_100_TX:
828 conf |= AWIN_GMAC_MAC_CONF_FES100 |
829 AWIN_GMAC_MAC_CONF_MIISEL;
830 break;
831 case IFM_1000_T:
832 break;
833 }
834 if (sc->sc_set_speed)
835 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
836
837 flow = 0;
838 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
839 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
840 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
841 }
842 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
843 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
844 }
845 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
846 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
847 }
848 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
849 AWIN_GMAC_MAC_FLOWCTRL, flow);
850
851 #ifdef DWC_GMAC_DEBUG
852 aprint_normal_dev(sc->sc_dev,
853 "setting MAC conf register: %08x\n", conf);
854 #endif
855
856 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
857 AWIN_GMAC_MAC_CONF, conf);
858 }
859
860 static int
861 dwc_gmac_init(struct ifnet *ifp)
862 {
863 struct dwc_gmac_softc * const sc = ifp->if_softc;
864
865 mutex_enter(sc->sc_lock);
866 int ret = dwc_gmac_init_locked(ifp);
867 mutex_exit(sc->sc_lock);
868
869 return ret;
870 }
871
872 static int
873 dwc_gmac_init_locked(struct ifnet *ifp)
874 {
875 struct dwc_gmac_softc * const sc = ifp->if_softc;
876 uint32_t ffilt;
877
878 if (ifp->if_flags & IFF_RUNNING)
879 return 0;
880
881 dwc_gmac_stop_locked(ifp, 0);
882
883 /*
884 * Configure DMA burst/transfer mode and RX/TX priorities.
885 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
886 */
887 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
888 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
889 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
890 __SHIFTIN(2, GMAC_BUSMODE_PBL));
891
892 /*
893 * Set up address filter
894 */
895 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
896 if (ifp->if_flags & IFF_PROMISC) {
897 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
898 } else {
899 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
900 }
901 if (ifp->if_flags & IFF_BROADCAST) {
902 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
903 } else {
904 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
905 }
906 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
907
908 /*
909 * Set up multicast filter
910 */
911 dwc_gmac_setmulti(sc);
912
913 /*
914 * Set up dma pointer for RX and TX ring
915 */
916 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
917 sc->sc_rxq.r_physaddr);
918 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
919 sc->sc_txq.t_physaddr);
920
921 /*
922 * Start RX/TX part
923 */
924 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
925 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
926 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
927 }
928 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
929 #ifdef DWC_GMAC_DEBUG
930 aprint_normal_dev(sc->sc_dev,
931 "setting DMA opmode register: %08x\n", opmode);
932 #endif
933
934 sc->sc_stopping = false;
935
936 ifp->if_flags |= IFF_RUNNING;
937 sc->sc_txbusy = false;
938
939 return 0;
940 }
941
942 static void
943 dwc_gmac_start(struct ifnet *ifp)
944 {
945 struct dwc_gmac_softc * const sc = ifp->if_softc;
946 #ifdef DWCGMAC_MPSAFE
947 KASSERT(if_is_mpsafe(ifp));
948 #endif
949
950 mutex_enter(sc->sc_lock);
951 if (!sc->sc_stopping) {
952 mutex_enter(&sc->sc_txq.t_mtx);
953 dwc_gmac_start_locked(ifp);
954 mutex_exit(&sc->sc_txq.t_mtx);
955 }
956 mutex_exit(sc->sc_lock);
957 }
958
959 static void
960 dwc_gmac_start_locked(struct ifnet *ifp)
961 {
962 struct dwc_gmac_softc * const sc = ifp->if_softc;
963 int old = sc->sc_txq.t_queued;
964 int start = sc->sc_txq.t_cur;
965 struct mbuf *m0;
966
967 if ((ifp->if_flags & IFF_RUNNING) == 0)
968 return;
969 if (sc->sc_txbusy)
970 return;
971
972 for (;;) {
973 IFQ_POLL(&ifp->if_snd, m0);
974 if (m0 == NULL)
975 break;
976 if (dwc_gmac_queue(sc, m0) != 0) {
977 sc->sc_txbusy = true;
978 break;
979 }
980 IFQ_DEQUEUE(&ifp->if_snd, m0);
981 bpf_mtap(ifp, m0, BPF_D_OUT);
982 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
983 sc->sc_txbusy = true;
984 break;
985 }
986 }
987
988 if (sc->sc_txq.t_queued != old) {
989 /* packets have been queued, kick it off */
990 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
991 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
992
993 #ifdef DWC_GMAC_DEBUG
994 dwc_dump_status(sc);
995 #endif
996 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
997 AWIN_GMAC_DMA_TXPOLL, ~0U);
998 }
999 }
1000
1001 static void
1002 dwc_gmac_stop(struct ifnet *ifp, int disable)
1003 {
1004 struct dwc_gmac_softc * const sc = ifp->if_softc;
1005
1006 mutex_enter(sc->sc_lock);
1007 dwc_gmac_stop_locked(ifp, disable);
1008 mutex_exit(sc->sc_lock);
1009 }
1010
1011 static void
1012 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1013 {
1014 struct dwc_gmac_softc * const sc = ifp->if_softc;
1015
1016 sc->sc_stopping = true;
1017
1018 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1019 AWIN_GMAC_DMA_OPMODE,
1020 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1021 AWIN_GMAC_DMA_OPMODE)
1022 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1023 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1024 AWIN_GMAC_DMA_OPMODE,
1025 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1026 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1027
1028 mii_down(&sc->sc_mii);
1029 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1030 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1031
1032 ifp->if_flags &= ~IFF_RUNNING;
1033 sc->sc_txbusy = false;
1034 }
1035
1036 /*
1037 * Add m0 to the TX ring
1038 */
1039 static int
1040 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1041 {
1042 struct dwc_gmac_dev_dmadesc *desc = NULL;
1043 struct dwc_gmac_tx_data *data = NULL;
1044 bus_dmamap_t map;
1045 int error, i, first;
1046
1047 #ifdef DWC_GMAC_DEBUG
1048 aprint_normal_dev(sc->sc_dev,
1049 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1050 #endif
1051
1052 first = sc->sc_txq.t_cur;
1053 map = sc->sc_txq.t_data[first].td_map;
1054
1055 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1056 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1057 if (error != 0) {
1058 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1059 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1060 return error;
1061 }
1062
1063 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1064 bus_dmamap_unload(sc->sc_dmat, map);
1065 return ENOBUFS;
1066 }
1067
1068 for (i = 0; i < map->dm_nsegs; i++) {
1069 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1070 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1071
1072 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1073
1074 #ifdef DWC_GMAC_DEBUG
1075 aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1076 "len %lu\n", sc->sc_txq.t_cur,
1077 (unsigned long)map->dm_segs[i].ds_addr,
1078 (unsigned long)map->dm_segs[i].ds_len);
1079 #endif
1080
1081 sc->sc_descm->tx_init_flags(desc);
1082 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1083
1084 if (i == 0)
1085 sc->sc_descm->tx_set_first_frag(desc);
1086
1087 /*
1088 * Defer passing ownership of the first descriptor
1089 * until we are done.
1090 */
1091 if (i != 0)
1092 sc->sc_descm->tx_set_owned_by_dev(desc);
1093
1094 sc->sc_txq.t_queued++;
1095 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1096 }
1097
1098 sc->sc_descm->tx_set_last_frag(desc);
1099
1100 data->td_m = m0;
1101 data->td_active = map;
1102
1103 /* sync the packet buffer */
1104 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1105 BUS_DMASYNC_PREWRITE);
1106
1107 /* sync the new descriptors - ownership not transferred yet */
1108 dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
1109 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1110
1111 /* Pass first to device */
1112 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1113
1114 return 0;
1115 }
1116
1117 /*
1118 * If the interface is up and running, only modify the receive
1119 * filter when setting promiscuous or debug mode. Otherwise fall
1120 * through to ether_ioctl, which will reset the chip.
1121 */
1122 static int
1123 dwc_gmac_ifflags_cb(struct ethercom *ec)
1124 {
1125 struct ifnet * const ifp = &ec->ec_if;
1126 struct dwc_gmac_softc * const sc = ifp->if_softc;
1127 int ret = 0;
1128
1129 mutex_enter(sc->sc_lock);
1130 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1131 sc->sc_if_flags = ifp->if_flags;
1132
1133 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1134 ret = ENETRESET;
1135 goto out;
1136 }
1137 if ((change & IFF_PROMISC) != 0) {
1138 dwc_gmac_setmulti(sc);
1139 }
1140 out:
1141 mutex_exit(sc->sc_lock);
1142
1143 return ret;
1144 }
1145
1146 static int
1147 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1148 {
1149 struct dwc_gmac_softc * const sc = ifp->if_softc;
1150 int error = 0;
1151
1152 int s = splnet();
1153 error = ether_ioctl(ifp, cmd, data);
1154
1155 #ifdef DWCGMAC_MPSAFE
1156 splx(s);
1157 #endif
1158
1159 if (error == ENETRESET) {
1160 error = 0;
1161 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1162 ;
1163 else if (ifp->if_flags & IFF_RUNNING) {
1164 /*
1165 * Multicast list has changed; set the hardware filter
1166 * accordingly.
1167 */
1168 mutex_enter(sc->sc_lock);
1169 dwc_gmac_setmulti(sc);
1170 mutex_exit(sc->sc_lock);
1171 }
1172 }
1173
1174 /* Try to get things going again */
1175 if (ifp->if_flags & IFF_UP)
1176 dwc_gmac_start(ifp);
1177 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1178
1179 #ifndef DWCGMAC_MPSAFE
1180 splx(s);
1181 #endif
1182
1183 return error;
1184 }
1185
1186 static void
1187 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1188 {
1189 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1190 struct dwc_gmac_tx_data *data;
1191 struct dwc_gmac_dev_dmadesc *desc;
1192 int i, nsegs;
1193
1194 mutex_enter(&sc->sc_txq.t_mtx);
1195
1196 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1197 #ifdef DWC_GMAC_DEBUG
1198 aprint_normal_dev(sc->sc_dev,
1199 "%s: checking desc #%d (t_queued: %d)\n", __func__,
1200 i, sc->sc_txq.t_queued);
1201 #endif
1202
1203 /*
1204 * i + 1 does not need to be a valid descriptor,
1205 * this is just a special notion to just sync
1206 * a single tx descriptor (i)
1207 */
1208 dwc_gmac_txdesc_sync(sc, i, i + 1,
1209 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1210
1211 desc = &sc->sc_txq.t_desc[i];
1212 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1213 break;
1214
1215 data = &sc->sc_txq.t_data[i];
1216 if (data->td_m == NULL)
1217 continue;
1218
1219 if_statinc(ifp, if_opackets);
1220 nsegs = data->td_active->dm_nsegs;
1221 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1222 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1223 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1224
1225 #ifdef DWC_GMAC_DEBUG
1226 aprint_normal_dev(sc->sc_dev,
1227 "%s: done with packet at desc #%d, freeing mbuf %p\n",
1228 __func__, i, data->td_m);
1229 #endif
1230
1231 m_freem(data->td_m);
1232 data->td_m = NULL;
1233
1234 sc->sc_txq.t_queued -= nsegs;
1235 }
1236
1237 sc->sc_txq.t_next = i;
1238
1239 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1240 sc->sc_txbusy = false;
1241 }
1242 mutex_exit(&sc->sc_txq.t_mtx);
1243 }
1244
1245 static void
1246 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1247 {
1248 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1249 struct dwc_gmac_dev_dmadesc *desc;
1250 struct dwc_gmac_rx_data *data;
1251 bus_addr_t physaddr;
1252 struct mbuf *m, *mnew;
1253 int i, len, error;
1254
1255 mutex_enter(&sc->sc_rxq.r_mtx);
1256 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1257 #ifdef DWC_GMAC_DEBUG
1258 aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n",
1259 __func__, i);
1260 #endif
1261 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1262 RX_DESC_OFFSET(i), sizeof(*desc),
1263 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1264 desc = &sc->sc_rxq.r_desc[i];
1265 data = &sc->sc_rxq.r_data[i];
1266
1267 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1268 break;
1269
1270 if (sc->sc_descm->rx_has_error(desc)) {
1271 #ifdef DWC_GMAC_DEBUG
1272 aprint_normal_dev(sc->sc_dev,
1273 "%s: RX error: status %08x, skipping\n",
1274 __func__, le32toh(desc->ddesc_status0));
1275 #endif
1276 if_statinc(ifp, if_ierrors);
1277 goto skip;
1278 }
1279
1280 len = sc->sc_descm->rx_get_len(desc);
1281
1282 #ifdef DWC_GMAC_DEBUG
1283 aprint_normal_dev(sc->sc_dev,
1284 "%s: device is done with descriptor #%d, len: %d\n",
1285 __func__, i, len);
1286 #endif
1287
1288 /*
1289 * Try to get a new mbuf before passing this one
1290 * up, if that fails, drop the packet and reuse
1291 * the existing one.
1292 */
1293 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1294 if (mnew == NULL) {
1295 if_statinc(ifp, if_ierrors);
1296 goto skip;
1297 }
1298 MCLGET(mnew, M_DONTWAIT);
1299 if ((mnew->m_flags & M_EXT) == 0) {
1300 m_freem(mnew);
1301 if_statinc(ifp, if_ierrors);
1302 goto skip;
1303 }
1304 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1305 if (mnew->m_len > AWGE_MAX_PACKET) {
1306 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1307 }
1308
1309 /* unload old DMA map */
1310 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1311 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1312 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1313
1314 /* and reload with new mbuf */
1315 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1316 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1317 if (error != 0) {
1318 m_freem(mnew);
1319 /* try to reload old mbuf */
1320 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1321 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1322 if (error != 0) {
1323 panic("%s: could not load old rx mbuf",
1324 device_xname(sc->sc_dev));
1325 }
1326 if_statinc(ifp, if_ierrors);
1327 goto skip;
1328 }
1329 physaddr = data->rd_map->dm_segs[0].ds_addr;
1330
1331 #ifdef DWC_GMAC_DEBUG
1332 aprint_normal_dev(sc->sc_dev,
1333 "%s: receiving packet at desc #%d, using mbuf %p\n",
1334 __func__, i, data->rd_m);
1335 #endif
1336 /*
1337 * New mbuf loaded, update RX ring and continue
1338 */
1339 m = data->rd_m;
1340 data->rd_m = mnew;
1341 desc->ddesc_data = htole32(physaddr);
1342
1343 /* finalize mbuf */
1344 m->m_pkthdr.len = m->m_len = len;
1345 m_set_rcvif(m, ifp);
1346 m->m_flags |= M_HASFCS;
1347
1348 if_percpuq_enqueue(sc->sc_ipq, m);
1349
1350 skip:
1351 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1352 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1353
1354 sc->sc_descm->rx_init_flags(desc);
1355 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1356
1357 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1358 RX_DESC_OFFSET(i), sizeof(*desc),
1359 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1360
1361 sc->sc_descm->rx_set_owned_by_dev(desc);
1362
1363 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1364 RX_DESC_OFFSET(i), sizeof(*desc),
1365 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1366 }
1367
1368 /* update RX pointer */
1369 sc->sc_rxq.r_cur = i;
1370
1371 mutex_exit(&sc->sc_rxq.r_mtx);
1372 }
1373
1374 static void
1375 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1376 {
1377 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1378 struct ether_multi *enm;
1379 struct ether_multistep step;
1380 struct ethercom *ec = &sc->sc_ec;
1381 uint32_t hashes[2] = { 0, 0 };
1382 uint32_t ffilt, h;
1383 int mcnt;
1384
1385 KASSERT(mutex_owned(sc->sc_lock));
1386
1387 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1388
1389 if (ifp->if_flags & IFF_PROMISC) {
1390 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1391 goto special_filter;
1392 }
1393
1394 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1395
1396 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1397 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1398
1399 ETHER_LOCK(ec);
1400 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1401 ETHER_FIRST_MULTI(step, ec, enm);
1402 mcnt = 0;
1403 while (enm != NULL) {
1404 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1405 ETHER_ADDR_LEN) != 0) {
1406 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1407 ec->ec_flags |= ETHER_F_ALLMULTI;
1408 ETHER_UNLOCK(ec);
1409 goto special_filter;
1410 }
1411
1412 h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1413 hashes[h >> 5] |= (1 << (h & 0x1f));
1414
1415 mcnt++;
1416 ETHER_NEXT_MULTI(step, enm);
1417 }
1418 ETHER_UNLOCK(ec);
1419
1420 if (mcnt)
1421 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1422 else
1423 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1424
1425 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1426 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1427 hashes[0]);
1428 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1429 hashes[1]);
1430 sc->sc_if_flags = ifp->if_flags;
1431
1432 #ifdef DWC_GMAC_DEBUG
1433 dwc_gmac_dump_ffilt(sc, ffilt);
1434 #endif
1435 return;
1436
1437 special_filter:
1438 #ifdef DWC_GMAC_DEBUG
1439 dwc_gmac_dump_ffilt(sc, ffilt);
1440 #endif
1441 /* no MAC hashes, ALLMULTI or PROMISC */
1442 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1443 ffilt);
1444 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1445 0xffffffff);
1446 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1447 0xffffffff);
1448 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1449 }
1450
1451 int
1452 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1453 {
1454 uint32_t status, dma_status;
1455 int rv = 0;
1456
1457 if (sc->sc_stopping)
1458 return 0;
1459
1460 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1461 if (status & AWIN_GMAC_MII_IRQ) {
1462 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1463 AWIN_GMAC_MII_STATUS);
1464 rv = 1;
1465 mii_pollstat(&sc->sc_mii);
1466 }
1467
1468 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1469 AWIN_GMAC_DMA_STATUS);
1470
1471 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1472 rv = 1;
1473
1474 if (dma_status & GMAC_DMA_INT_TIE)
1475 dwc_gmac_tx_intr(sc);
1476
1477 if (dma_status & GMAC_DMA_INT_RIE)
1478 dwc_gmac_rx_intr(sc);
1479
1480 /*
1481 * Check error conditions
1482 */
1483 if (dma_status & GMAC_DMA_INT_ERRORS) {
1484 if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1485 #ifdef DWC_GMAC_DEBUG
1486 dwc_dump_and_abort(sc, "interrupt error condition");
1487 #endif
1488 }
1489
1490 rnd_add_uint32(&sc->rnd_source, dma_status);
1491
1492 /* ack interrupt */
1493 if (dma_status)
1494 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1495 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1496
1497 /*
1498 * Get more packets
1499 */
1500 if (rv)
1501 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1502
1503 return rv;
1504 }
1505
1506 static void
1507 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1508 {
1509
1510 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1511 }
1512
1513 static int
1514 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1515 {
1516
1517 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1518 }
1519
1520 static void
1521 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1522 {
1523 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1524
1525 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1526 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1527 }
1528
1529 static uint32_t
1530 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1531 {
1532
1533 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1534 }
1535
1536 static void
1537 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1538 {
1539
1540 desc->ddesc_status0 = 0;
1541 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1542 }
1543
1544 static void
1545 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1546 {
1547 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1548
1549 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1550 }
1551
1552 static void
1553 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1554 {
1555 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1556
1557 desc->ddesc_cntl1 = htole32(cntl |
1558 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1559 }
1560
1561 static void
1562 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1563 {
1564
1565 desc->ddesc_status0 = 0;
1566 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1567 }
1568
1569 static int
1570 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1571 return !!(le32toh(desc->ddesc_status0) &
1572 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1573 }
1574
1575 static void
1576 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1577 {
1578 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1579
1580 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1581 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1582 }
1583
1584 static uint32_t
1585 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1586 {
1587
1588 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1589 }
1590
1591 static void
1592 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1593 {
1594
1595 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1596 desc->ddesc_cntl1 = 0;
1597 }
1598
1599 static void
1600 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1601 {
1602 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1603
1604 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1605 }
1606
1607 static void
1608 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1609 {
1610 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1611
1612 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1613 }
1614
1615 static void
1616 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1617 {
1618
1619 desc->ddesc_status0 = 0;
1620 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1621 }
1622
1623 static int
1624 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1625 {
1626
1627 return !!(le32toh(desc->ddesc_status0) &
1628 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1629 }
1630
1631 #ifdef DWC_GMAC_DEBUG
1632 static void
1633 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1634 {
1635 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1636 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1637 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1638 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1639 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1640 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1641 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1642 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1643 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1644 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1645 aprint_normal_dev(sc->sc_dev, " status: %08x\n",
1646 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1647 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1648 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1649 aprint_normal_dev(sc->sc_dev, "int en.: %08x\n",
1650 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1651 aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n",
1652 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1653 aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n",
1654 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1655 aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n",
1656 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1657 aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n",
1658 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1659 }
1660
1661 static void
1662 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1663 {
1664 const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
1665
1666 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1667 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1668 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1669
1670 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1671 TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
1672 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1673
1674 for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) {
1675 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1676 aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
1677 "data: %08x next: %08x\n",
1678 i, sc->sc_txq.t_physaddr + i * descsz,
1679 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1680 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1681 }
1682 }
1683
1684 static void
1685 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1686 {
1687 const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
1688
1689 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1690 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1691 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1692
1693 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1694 RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
1695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1696
1697 for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) {
1698 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1699 char buf[200];
1700
1701 if (!sc->sc_descm->rx_is_owned_by_dev(desc)) {
1702 /* print interrupt state */
1703 snprintb(buf, sizeof(buf),
1704 "\177\20"
1705 "b\x1e" "daff\0"
1706 "f\x10\xe" "frlen\0"
1707 "b\x0f" "error\0"
1708 "b\x0e" "rxtrunc\0" /* descriptor error? */
1709 "b\x0d" "saff\0"
1710 "b\x0c" "giantframe\0" /* length error? */
1711 "b\x0b" "damaged\0"
1712 "b\x0a" "vlan\0"
1713 "b\x09" "first\0"
1714 "b\x08" "last\0"
1715 "b\x07" "giant\0"
1716 "b\x06" "collison\0"
1717 "b\x05" "ether\0"
1718 "b\x04" "watchdog\0"
1719 "b\x03" "miierror\0"
1720 "b\x02" "dribbling\0"
1721 "b\x01" "crc\0"
1722 "\0", le32toh(desc->ddesc_status0));
1723 }
1724
1725 aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
1726 "data: %08x next: %08x %s\n",
1727 i, sc->sc_rxq.r_physaddr + i * descsz,
1728 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1729 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next),
1730 sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf);
1731 }
1732 }
1733
1734 static void
1735 dwc_dump_status(struct dwc_gmac_softc *sc)
1736 {
1737 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1738 AWIN_GMAC_MAC_INTR);
1739 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1740 AWIN_GMAC_DMA_STATUS);
1741 char buf[200];
1742
1743 /* print interrupt state */
1744 snprintb(buf, sizeof(buf),
1745 "\177\20"
1746 "b\x1c" "GPI\0"
1747 "b\x1b" "GMC\0"
1748 "b\x1a" "GLI\0"
1749 "f\x17\x3" "EB\0"
1750 "f\x14\x3" "TPS\0"
1751 "f\x11\x3" "RPS\0"
1752 "b\x10" "NI\0"
1753 "b\x0f" "AI\0"
1754 "b\x0e" "ER\0"
1755 "b\x0d" "FB\0"
1756 "b\x0a" "ET\0"
1757 "b\x09" "RW\0"
1758 "b\x08" "RS\0"
1759 "b\x07" "RU\0"
1760 "b\x06" "RI\0"
1761 "b\x05" "UN\0"
1762 "b\x04" "OV\0"
1763 "b\x03" "TJ\0"
1764 "b\x02" "TU\0"
1765 "b\x01" "TS\0"
1766 "b\x00" "TI\0"
1767 "\0", dma_status);
1768 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1769 status, buf);
1770 }
1771
1772 static void
1773 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1774 {
1775 dwc_dump_status(sc);
1776 dwc_gmac_dump_ffilt(sc,
1777 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1778 dwc_gmac_dump_dma(sc);
1779 dwc_gmac_dump_tx_desc(sc);
1780 dwc_gmac_dump_rx_desc(sc);
1781
1782 panic("%s", msg);
1783 }
1784
1785 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1786 {
1787 char buf[200];
1788
1789 /* print filter setup */
1790 snprintb(buf, sizeof(buf), "\177\20"
1791 "b\x1f""RA\0"
1792 "b\x0a""HPF\0"
1793 "b\x09""SAF\0"
1794 "b\x08""SAIF\0"
1795 "b\x05""DBF\0"
1796 "b\x04""PM\0"
1797 "b\x03""DAIF\0"
1798 "b\x02""HMC\0"
1799 "b\x01""HUC\0"
1800 "b\x00""PR\0"
1801 "\0", ffilt);
1802 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1803 }
1804 #endif
1805