dwc_gmac.c revision 1.94 1 /* $NetBSD: dwc_gmac.c,v 1.94 2024/08/11 12:48:09 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 /*
43 * Lock order:
44 *
45 * IFNET_LOCK -> sc_mcast_lock
46 * IFNET_LOCK -> sc_intr_lock -> {sc_txq.t_mtx, sc_rxq.r_mtx}
47 */
48
49 #include <sys/cdefs.h>
50
51 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.94 2024/08/11 12:48:09 riastradh Exp $");
52
53 /* #define DWC_GMAC_DEBUG 1 */
54
55 #ifdef _KERNEL_OPT
56 #include "opt_inet.h"
57 #endif
58
59 #include <sys/param.h>
60 #include <sys/bus.h>
61 #include <sys/device.h>
62 #include <sys/intr.h>
63 #include <sys/systm.h>
64 #include <sys/sockio.h>
65 #include <sys/cprng.h>
66 #include <sys/rndsource.h>
67
68 #include <net/if.h>
69 #include <net/if_ether.h>
70 #include <net/if_media.h>
71 #include <net/bpf.h>
72 #ifdef INET
73 #include <netinet/if_inarp.h>
74 #endif
75
76 #include <dev/mii/miivar.h>
77
78 #include <dev/ic/dwc_gmac_reg.h>
79 #include <dev/ic/dwc_gmac_var.h>
80
81 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
82 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
83 static void dwc_gmac_miibus_statchg(struct ifnet *);
84
85 static int dwc_gmac_reset(struct dwc_gmac_softc *);
86 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
87 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
88 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
89 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
90 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
91 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
92 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
93 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
94 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
95 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
96 static int dwc_gmac_init(struct ifnet *);
97 static void dwc_gmac_stop(struct ifnet *, int);
98 static void dwc_gmac_start(struct ifnet *);
99 static void dwc_gmac_start_locked(struct ifnet *);
100 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
101 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
102 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
103 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
104 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
105 static int dwc_gmac_ifflags_cb(struct ethercom *);
106 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
107 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
109 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
113 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
114 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
116 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
119 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
120 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
121 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
122
123 static const struct dwc_gmac_desc_methods desc_methods_standard = {
124 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
125 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
126 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
127 .tx_set_len = dwc_gmac_desc_std_set_len,
128 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
129 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
130 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
131 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
132 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
133 .rx_set_len = dwc_gmac_desc_std_set_len,
134 .rx_get_len = dwc_gmac_desc_std_get_len,
135 .rx_has_error = dwc_gmac_desc_std_rx_has_error
136 };
137
138 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
139 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
140 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
141 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
142 .tx_set_len = dwc_gmac_desc_enh_set_len,
143 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
144 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
145 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
146 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
147 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
148 .rx_set_len = dwc_gmac_desc_enh_set_len,
149 .rx_get_len = dwc_gmac_desc_enh_get_len,
150 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
151 };
152
153
154 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT + (N)) \
155 * sizeof(struct dwc_gmac_dev_dmadesc))
156 #define TX_NEXT(N) (((N) + 1) & (AWGE_TX_RING_COUNT - 1))
157
158 #define RX_DESC_OFFSET(N) ((N) * sizeof(struct dwc_gmac_dev_dmadesc))
159 #define RX_NEXT(N) (((N) + 1) & (AWGE_RX_RING_COUNT - 1))
160
161
162
163 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
164 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
165 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
166
167 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
168 GMAC_DMA_INT_FBE | \
169 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
170 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
171 GMAC_DMA_INT_TJE)
172
173 #define AWIN_DEF_MAC_INTRMASK \
174 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
175 AWIN_GMAC_MAC_INT_LINKCHG)
176
177 #ifdef DWC_GMAC_DEBUG
178 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
179 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
181 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
182 static void dwc_dump_status(struct dwc_gmac_softc *);
183 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
184 #endif
185
186 int
187 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
188 {
189 uint8_t enaddr[ETHER_ADDR_LEN];
190 uint32_t maclo, machi, hwft;
191 struct mii_data * const mii = &sc->sc_mii;
192 struct ifnet * const ifp = &sc->sc_ec.ec_if;
193 prop_dictionary_t dict;
194
195 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
196 sc->sc_mii_clk = mii_clk & 7;
197
198 dict = device_properties(sc->sc_dev);
199 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
200 if (ea != NULL) {
201 /*
202 * If the MAC address is overridden by a device property,
203 * use that.
204 */
205 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
206 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
207 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
208 } else {
209 /*
210 * If we did not get an externaly configure address,
211 * try to read one from the current filter setup,
212 * before resetting the chip.
213 */
214 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
215 AWIN_GMAC_MAC_ADDR0LO);
216 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
217 AWIN_GMAC_MAC_ADDR0HI);
218
219 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
220 /* fake MAC address */
221 maclo = 0x00f2 | (cprng_strong32() << 16);
222 machi = cprng_strong32();
223 }
224
225 enaddr[0] = maclo & 0x0ff;
226 enaddr[1] = (maclo >> 8) & 0x0ff;
227 enaddr[2] = (maclo >> 16) & 0x0ff;
228 enaddr[3] = (maclo >> 24) & 0x0ff;
229 enaddr[4] = machi & 0x0ff;
230 enaddr[5] = (machi >> 8) & 0x0ff;
231 }
232
233 const uint32_t ver =
234 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
235 const uint32_t snpsver =
236 __SHIFTOUT(ver, AWIN_GMAC_MAC_VERSION_SNPSVER_MASK);
237 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", snpsver);
238
239 /*
240 * Init chip and do initial setup
241 */
242 if (dwc_gmac_reset(sc) != 0)
243 return ENXIO; /* not much to cleanup, haven't attached yet */
244 dwc_gmac_write_hwaddr(sc, enaddr);
245 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
246 ether_sprintf(enaddr));
247
248 hwft = 0;
249 if (snpsver >= 0x35) {
250 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
251 AWIN_GMAC_DMA_HWFEATURES);
252 aprint_normal_dev(sc->sc_dev,
253 "HW feature mask: %x\n", hwft);
254 }
255
256 if (sizeof(bus_addr_t) > 4) {
257 int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
258 &sc->sc_dmat, BUS_DMA_WAITOK);
259 if (error != 0) {
260 aprint_error_dev(sc->sc_dev,
261 "failed to create DMA subregion\n");
262 return ENOMEM;
263 }
264 }
265
266 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
267 aprint_normal_dev(sc->sc_dev,
268 "Using enhanced descriptor format\n");
269 sc->sc_descm = &desc_methods_enhanced;
270 } else {
271 sc->sc_descm = &desc_methods_standard;
272 }
273 if (hwft & GMAC_DMA_FEAT_RMON) {
274 uint32_t val;
275
276 /* Mask all MMC interrupts */
277 val = 0xffffffff;
278 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
279 GMAC_MMC_RX_INT_MSK, val);
280 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
281 GMAC_MMC_TX_INT_MSK, val);
282 }
283
284 /*
285 * Allocate Tx and Rx rings
286 */
287 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
288 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
289 goto fail;
290 }
291
292 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
293 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
294 goto fail;
295 }
296
297 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
298 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
299 goto fail;
300 }
301
302 sc->sc_stopping = false;
303 sc->sc_txbusy = false;
304
305 sc->sc_mcast_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET);
306 sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
307 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
308 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
309
310 /*
311 * Prepare interface data
312 */
313 ifp->if_softc = sc;
314 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
315 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
316 ifp->if_extflags = IFEF_MPSAFE;
317 ifp->if_ioctl = dwc_gmac_ioctl;
318 ifp->if_start = dwc_gmac_start;
319 ifp->if_init = dwc_gmac_init;
320 ifp->if_stop = dwc_gmac_stop;
321 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
322 IFQ_SET_READY(&ifp->if_snd);
323
324 /*
325 * Attach MII subdevices
326 */
327 sc->sc_ec.ec_mii = &sc->sc_mii;
328 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
329 mii->mii_ifp = ifp;
330 mii->mii_readreg = dwc_gmac_miibus_read_reg;
331 mii->mii_writereg = dwc_gmac_miibus_write_reg;
332 mii->mii_statchg = dwc_gmac_miibus_statchg;
333 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
334 MIIF_DOPAUSE);
335
336 if (LIST_EMPTY(&mii->mii_phys)) {
337 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
338 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
339 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
340 } else {
341 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
342 }
343
344 /*
345 * We can support 802.1Q VLAN-sized frames.
346 */
347 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
348
349 /*
350 * Ready, attach interface
351 */
352 /* Attach the interface. */
353 if_initialize(ifp);
354 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
355 if_deferred_start_init(ifp, NULL);
356 ether_ifattach(ifp, enaddr);
357 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
358 if_register(ifp);
359 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
360 RND_TYPE_NET, RND_FLAG_DEFAULT);
361
362 /*
363 * Enable interrupts
364 */
365 mutex_enter(sc->sc_intr_lock);
366 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
367 AWIN_DEF_MAC_INTRMASK);
368 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
369 GMAC_DEF_DMA_INT_MASK);
370 mutex_exit(sc->sc_intr_lock);
371
372 return 0;
373
374 fail:
375 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
376 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
377 dwc_gmac_free_dma_rings(sc);
378 mutex_destroy(&sc->sc_mdio_lock);
379
380 return ENXIO;
381 }
382
383
384
385 static int
386 dwc_gmac_reset(struct dwc_gmac_softc *sc)
387 {
388 size_t cnt;
389 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
390 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
391 | GMAC_BUSMODE_RESET);
392 for (cnt = 0; cnt < 30000; cnt++) {
393 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
394 & GMAC_BUSMODE_RESET) == 0)
395 return 0;
396 delay(10);
397 }
398
399 aprint_error_dev(sc->sc_dev, "reset timed out\n");
400 return EIO;
401 }
402
403 static void
404 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
405 uint8_t enaddr[ETHER_ADDR_LEN])
406 {
407 uint32_t hi, lo;
408
409 hi = enaddr[4] | (enaddr[5] << 8);
410 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
411 | ((uint32_t)enaddr[3] << 24);
412 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
413 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
414 }
415
416 static int
417 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
418 {
419 struct dwc_gmac_softc * const sc = device_private(self);
420 uint16_t mii;
421 size_t cnt;
422
423 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
424 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
425 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
426 | GMAC_MII_BUSY;
427
428 mutex_enter(&sc->sc_mdio_lock);
429 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
430
431 for (cnt = 0; cnt < 1000; cnt++) {
432 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
433 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
434 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
435 AWIN_GMAC_MAC_MIIDATA);
436 break;
437 }
438 delay(10);
439 }
440
441 mutex_exit(&sc->sc_mdio_lock);
442
443 if (cnt >= 1000)
444 return ETIMEDOUT;
445
446 return 0;
447 }
448
449 static int
450 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
451 {
452 struct dwc_gmac_softc * const sc = device_private(self);
453 uint16_t mii;
454 size_t cnt;
455
456 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
457 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
458 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
459 | GMAC_MII_BUSY | GMAC_MII_WRITE;
460
461 mutex_enter(&sc->sc_mdio_lock);
462 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
463 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
464
465 for (cnt = 0; cnt < 1000; cnt++) {
466 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
467 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
468 break;
469 delay(10);
470 }
471
472 mutex_exit(&sc->sc_mdio_lock);
473
474 if (cnt >= 1000)
475 return ETIMEDOUT;
476
477 return 0;
478 }
479
480 static int
481 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
482 struct dwc_gmac_rx_ring *ring)
483 {
484 struct dwc_gmac_rx_data *data;
485 bus_addr_t physaddr;
486 const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
487 int error, i, next;
488
489 ring->r_cur = ring->r_next = 0;
490 memset(ring->r_desc, 0, rxringsz);
491
492 /*
493 * Pre-allocate Rx buffers and populate Rx ring.
494 */
495 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
496 struct dwc_gmac_dev_dmadesc *desc;
497
498 data = &sc->sc_rxq.r_data[i];
499
500 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
501 if (data->rd_m == NULL) {
502 aprint_error_dev(sc->sc_dev,
503 "could not allocate rx mbuf #%d\n", i);
504 error = ENOMEM;
505 goto fail;
506 }
507 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
508 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
509 if (error != 0) {
510 aprint_error_dev(sc->sc_dev,
511 "could not create DMA map\n");
512 data->rd_map = NULL;
513 goto fail;
514 }
515 MCLGET(data->rd_m, M_DONTWAIT);
516 if (!(data->rd_m->m_flags & M_EXT)) {
517 aprint_error_dev(sc->sc_dev,
518 "could not allocate mbuf cluster #%d\n", i);
519 error = ENOMEM;
520 goto fail;
521 }
522 data->rd_m->m_len = data->rd_m->m_pkthdr.len
523 = data->rd_m->m_ext.ext_size;
524 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
525 data->rd_m->m_len = data->rd_m->m_pkthdr.len
526 = AWGE_MAX_PACKET;
527 }
528
529 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
530 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
531 if (error != 0) {
532 aprint_error_dev(sc->sc_dev,
533 "could not load rx buf DMA map #%d", i);
534 goto fail;
535 }
536 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
537 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
538 physaddr = data->rd_map->dm_segs[0].ds_addr;
539
540 desc = &sc->sc_rxq.r_desc[i];
541 desc->ddesc_data = htole32(physaddr);
542 next = RX_NEXT(i);
543 desc->ddesc_next = htole32(ring->r_physaddr
544 + next * sizeof(*desc));
545 sc->sc_descm->rx_init_flags(desc);
546 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
547 sc->sc_descm->rx_set_owned_by_dev(desc);
548 }
549
550 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
551 RX_DESC_OFFSET(0),
552 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
553 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
554 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
555 ring->r_physaddr);
556
557 return 0;
558
559 fail:
560 dwc_gmac_free_rx_ring(sc, ring);
561 return error;
562 }
563
564 static void
565 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
566 struct dwc_gmac_rx_ring *ring)
567 {
568 struct dwc_gmac_dev_dmadesc *desc;
569 struct dwc_gmac_rx_data *data;
570 int i;
571
572 mutex_enter(&ring->r_mtx);
573 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
574 desc = &sc->sc_rxq.r_desc[i];
575 data = &sc->sc_rxq.r_data[i];
576 sc->sc_descm->rx_init_flags(desc);
577 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
578 sc->sc_descm->rx_set_owned_by_dev(desc);
579 }
580
581 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
582 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
583 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
584
585 ring->r_cur = ring->r_next = 0;
586 /* reset DMA address to start of ring */
587 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
588 sc->sc_rxq.r_physaddr);
589 mutex_exit(&ring->r_mtx);
590 }
591
592 static int
593 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
594 {
595 const size_t ringsize = AWGE_TOTAL_RING_COUNT *
596 sizeof(struct dwc_gmac_dev_dmadesc);
597 int error, nsegs;
598 void *rings;
599
600 error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
601 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
602 if (error != 0) {
603 aprint_error_dev(sc->sc_dev,
604 "could not create desc DMA map\n");
605 sc->sc_dma_ring_map = NULL;
606 goto fail;
607 }
608
609 error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
610 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
611 if (error != 0) {
612 aprint_error_dev(sc->sc_dev,
613 "could not map DMA memory\n");
614 goto fail;
615 }
616
617 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
618 ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
619 if (error != 0) {
620 aprint_error_dev(sc->sc_dev,
621 "could not allocate DMA memory\n");
622 goto fail;
623 }
624
625 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
626 ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
627 if (error != 0) {
628 aprint_error_dev(sc->sc_dev,
629 "could not load desc DMA map\n");
630 goto fail;
631 }
632
633 /* give first AWGE_RX_RING_COUNT to the RX side */
634 sc->sc_rxq.r_desc = rings;
635 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
636
637 /* and next rings to the TX side */
638 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
639 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
640 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
641
642 return 0;
643
644 fail:
645 dwc_gmac_free_dma_rings(sc);
646 return error;
647 }
648
649 static void
650 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
651 {
652 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
653 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
654 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
655 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
656 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
657 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
658 }
659
660 static void
661 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
662 {
663 struct dwc_gmac_rx_data *data;
664 int i;
665
666 if (ring->r_desc == NULL)
667 return;
668
669 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
670 data = &ring->r_data[i];
671
672 if (data->rd_map != NULL) {
673 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
674 AWGE_RX_RING_COUNT
675 * sizeof(struct dwc_gmac_dev_dmadesc),
676 BUS_DMASYNC_POSTREAD);
677 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
678 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
679 }
680 m_freem(data->rd_m);
681 }
682 }
683
684 static int
685 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
686 struct dwc_gmac_tx_ring *ring)
687 {
688 int i, error = 0;
689
690 ring->t_queued = 0;
691 ring->t_cur = ring->t_next = 0;
692
693 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
694 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
695 TX_DESC_OFFSET(0),
696 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
697 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
698
699 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
700 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
701 AWGE_TX_RING_COUNT, MCLBYTES, 0,
702 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
703 &ring->t_data[i].td_map);
704 if (error != 0) {
705 aprint_error_dev(sc->sc_dev,
706 "could not create TX DMA map #%d\n", i);
707 ring->t_data[i].td_map = NULL;
708 goto fail;
709 }
710 ring->t_desc[i].ddesc_next = htole32(
711 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
712 * TX_NEXT(i));
713 }
714 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
715 TX_DESC_OFFSET(0),
716 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
718
719 return 0;
720
721 fail:
722 dwc_gmac_free_tx_ring(sc, ring);
723 return error;
724 }
725
726 static void
727 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
728 {
729 /* 'end' is pointing one descriptor beyond the last we want to sync */
730 if (end > start) {
731 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
732 TX_DESC_OFFSET(start),
733 TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
734 ops);
735 return;
736 }
737 /* sync from 'start' to end of ring */
738 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
739 TX_DESC_OFFSET(start),
740 TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
741 ops);
742 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
743 /* sync from start of ring to 'end' */
744 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
745 TX_DESC_OFFSET(0),
746 TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
747 ops);
748 }
749 }
750
751 static void
752 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
753 struct dwc_gmac_tx_ring *ring)
754 {
755 int i;
756
757 mutex_enter(&ring->t_mtx);
758 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
759 struct dwc_gmac_tx_data *data = &ring->t_data[i];
760
761 if (data->td_m != NULL) {
762 bus_dmamap_sync(sc->sc_dmat, data->td_active,
763 0, data->td_active->dm_mapsize,
764 BUS_DMASYNC_POSTWRITE);
765 bus_dmamap_unload(sc->sc_dmat, data->td_active);
766 m_freem(data->td_m);
767 data->td_m = NULL;
768 }
769 }
770
771 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
772 TX_DESC_OFFSET(0),
773 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
774 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
775 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
776 sc->sc_txq.t_physaddr);
777
778 ring->t_queued = 0;
779 ring->t_cur = ring->t_next = 0;
780 mutex_exit(&ring->t_mtx);
781 }
782
783 static void
784 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
785 struct dwc_gmac_tx_ring *ring)
786 {
787 int i;
788
789 /* unload the maps */
790 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
791 struct dwc_gmac_tx_data *data = &ring->t_data[i];
792
793 if (data->td_m != NULL) {
794 bus_dmamap_sync(sc->sc_dmat, data->td_active,
795 0, data->td_map->dm_mapsize,
796 BUS_DMASYNC_POSTWRITE);
797 bus_dmamap_unload(sc->sc_dmat, data->td_active);
798 m_freem(data->td_m);
799 data->td_m = NULL;
800 }
801 }
802
803 /* and actually free them */
804 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
805 struct dwc_gmac_tx_data *data = &ring->t_data[i];
806
807 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
808 }
809 }
810
811 static void
812 dwc_gmac_miibus_statchg(struct ifnet *ifp)
813 {
814 struct dwc_gmac_softc * const sc = ifp->if_softc;
815 struct mii_data * const mii = &sc->sc_mii;
816 uint32_t conf, flow;
817
818 /*
819 * Set MII or GMII interface based on the speed
820 * negotiated by the PHY.
821 */
822 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
823 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
824 | AWIN_GMAC_MAC_CONF_FULLDPLX);
825 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
826 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
827 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
828 | AWIN_GMAC_MAC_CONF_RXENABLE
829 | AWIN_GMAC_MAC_CONF_TXENABLE;
830 switch (IFM_SUBTYPE(mii->mii_media_active)) {
831 case IFM_10_T:
832 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
833 break;
834 case IFM_100_TX:
835 conf |= AWIN_GMAC_MAC_CONF_FES100 |
836 AWIN_GMAC_MAC_CONF_MIISEL;
837 break;
838 case IFM_1000_T:
839 break;
840 }
841 if (sc->sc_set_speed)
842 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
843
844 flow = 0;
845 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
846 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
847 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
848 }
849 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
850 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
851 }
852 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
853 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
854 }
855 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
856 AWIN_GMAC_MAC_FLOWCTRL, flow);
857
858 #ifdef DWC_GMAC_DEBUG
859 aprint_normal_dev(sc->sc_dev,
860 "setting MAC conf register: %08x\n", conf);
861 #endif
862
863 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
864 AWIN_GMAC_MAC_CONF, conf);
865 }
866
867 static int
868 dwc_gmac_init(struct ifnet *ifp)
869 {
870 struct dwc_gmac_softc * const sc = ifp->if_softc;
871 uint32_t ffilt;
872
873 ASSERT_SLEEPABLE();
874 KASSERT(IFNET_LOCKED(ifp));
875 KASSERT(ifp == &sc->sc_ec.ec_if);
876
877 dwc_gmac_stop(ifp, 0);
878
879 /*
880 * Configure DMA burst/transfer mode and RX/TX priorities.
881 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
882 */
883 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
884 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
885 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
886 __SHIFTIN(2, GMAC_BUSMODE_PBL));
887
888 /*
889 * Set up address filter
890 */
891 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
892 if (ifp->if_flags & IFF_PROMISC) {
893 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
894 } else {
895 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
896 }
897 if (ifp->if_flags & IFF_BROADCAST) {
898 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
899 } else {
900 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
901 }
902 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
903
904 /*
905 * Set up multicast filter
906 */
907 mutex_enter(sc->sc_mcast_lock);
908 dwc_gmac_setmulti(sc);
909 mutex_exit(sc->sc_mcast_lock);
910
911 /*
912 * Set up dma pointer for RX and TX ring
913 */
914 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
915 sc->sc_rxq.r_physaddr);
916 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
917 sc->sc_txq.t_physaddr);
918
919 /*
920 * Start RX/TX part
921 */
922 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
923 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
924 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
925 }
926 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
927 #ifdef DWC_GMAC_DEBUG
928 aprint_normal_dev(sc->sc_dev,
929 "setting DMA opmode register: %08x\n", opmode);
930 #endif
931
932 ifp->if_flags |= IFF_RUNNING;
933 sc->sc_if_flags = ifp->if_flags;
934
935 mutex_enter(sc->sc_intr_lock);
936 sc->sc_stopping = false;
937 mutex_exit(sc->sc_intr_lock);
938
939 mutex_enter(&sc->sc_txq.t_mtx);
940 sc->sc_txbusy = false;
941 mutex_exit(&sc->sc_txq.t_mtx);
942
943 return 0;
944 }
945
946 static void
947 dwc_gmac_start(struct ifnet *ifp)
948 {
949 struct dwc_gmac_softc * const sc = ifp->if_softc;
950 KASSERT(if_is_mpsafe(ifp));
951
952 mutex_enter(sc->sc_intr_lock);
953 if (!sc->sc_stopping) {
954 dwc_gmac_start_locked(ifp);
955 }
956 mutex_exit(sc->sc_intr_lock);
957 }
958
959 static void
960 dwc_gmac_start_locked(struct ifnet *ifp)
961 {
962 struct dwc_gmac_softc * const sc = ifp->if_softc;
963 int old = sc->sc_txq.t_queued;
964 int start = sc->sc_txq.t_cur;
965 struct mbuf *m0;
966
967 KASSERT(mutex_owned(sc->sc_intr_lock));
968
969 mutex_enter(&sc->sc_txq.t_mtx);
970 if (sc->sc_txbusy) {
971 mutex_exit(&sc->sc_txq.t_mtx);
972 return;
973 }
974
975 for (;;) {
976 IFQ_POLL(&ifp->if_snd, m0);
977 if (m0 == NULL)
978 break;
979 if (dwc_gmac_queue(sc, m0) != 0) {
980 sc->sc_txbusy = true;
981 break;
982 }
983 IFQ_DEQUEUE(&ifp->if_snd, m0);
984 bpf_mtap(ifp, m0, BPF_D_OUT);
985 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
986 sc->sc_txbusy = true;
987 break;
988 }
989 }
990
991 if (sc->sc_txq.t_queued != old) {
992 /* packets have been queued, kick it off */
993 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
994 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
995
996 #ifdef DWC_GMAC_DEBUG
997 dwc_dump_status(sc);
998 #endif
999 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1000 AWIN_GMAC_DMA_TXPOLL, ~0U);
1001 }
1002 mutex_exit(&sc->sc_txq.t_mtx);
1003 }
1004
1005 static void
1006 dwc_gmac_stop(struct ifnet *ifp, int disable)
1007 {
1008 struct dwc_gmac_softc * const sc = ifp->if_softc;
1009
1010 ASSERT_SLEEPABLE();
1011 KASSERT(IFNET_LOCKED(ifp));
1012
1013 ifp->if_flags &= ~IFF_RUNNING;
1014
1015 mutex_enter(sc->sc_mcast_lock);
1016 sc->sc_if_flags = ifp->if_flags;
1017 mutex_exit(sc->sc_mcast_lock);
1018
1019 mutex_enter(sc->sc_intr_lock);
1020 sc->sc_stopping = true;
1021 mutex_exit(sc->sc_intr_lock);
1022
1023 mutex_enter(&sc->sc_txq.t_mtx);
1024 sc->sc_txbusy = false;
1025 mutex_exit(&sc->sc_txq.t_mtx);
1026
1027 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1028 AWIN_GMAC_DMA_OPMODE,
1029 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1030 AWIN_GMAC_DMA_OPMODE)
1031 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1032 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1033 AWIN_GMAC_DMA_OPMODE,
1034 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1035 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1036
1037 mii_down(&sc->sc_mii);
1038 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1039 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1040 }
1041
1042 /*
1043 * Add m0 to the TX ring
1044 */
1045 static int
1046 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1047 {
1048 struct dwc_gmac_dev_dmadesc *desc = NULL;
1049 struct dwc_gmac_tx_data *data = NULL;
1050 bus_dmamap_t map;
1051 int error, i, first;
1052
1053 #ifdef DWC_GMAC_DEBUG
1054 aprint_normal_dev(sc->sc_dev,
1055 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1056 #endif
1057
1058 first = sc->sc_txq.t_cur;
1059 map = sc->sc_txq.t_data[first].td_map;
1060
1061 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1062 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1063 if (error != 0) {
1064 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1065 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1066 return error;
1067 }
1068
1069 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1070 bus_dmamap_unload(sc->sc_dmat, map);
1071 return ENOBUFS;
1072 }
1073
1074 for (i = 0; i < map->dm_nsegs; i++) {
1075 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1076 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1077
1078 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1079
1080 #ifdef DWC_GMAC_DEBUG
1081 aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1082 "len %lu\n", sc->sc_txq.t_cur,
1083 (unsigned long)map->dm_segs[i].ds_addr,
1084 (unsigned long)map->dm_segs[i].ds_len);
1085 #endif
1086
1087 sc->sc_descm->tx_init_flags(desc);
1088 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1089
1090 if (i == 0)
1091 sc->sc_descm->tx_set_first_frag(desc);
1092
1093 /*
1094 * Defer passing ownership of the first descriptor
1095 * until we are done.
1096 */
1097 if (i != 0)
1098 sc->sc_descm->tx_set_owned_by_dev(desc);
1099
1100 sc->sc_txq.t_queued++;
1101 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1102 }
1103
1104 sc->sc_descm->tx_set_last_frag(desc);
1105
1106 data->td_m = m0;
1107 data->td_active = map;
1108
1109 /* sync the packet buffer */
1110 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1111 BUS_DMASYNC_PREWRITE);
1112
1113 /* sync the new descriptors - ownership not transferred yet */
1114 dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
1115 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1116
1117 /* Pass first to device */
1118 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1119
1120 return 0;
1121 }
1122
1123 /*
1124 * If the interface is up and running, only modify the receive
1125 * filter when setting promiscuous or debug mode. Otherwise fall
1126 * through to ether_ioctl, which will reset the chip.
1127 */
1128 static int
1129 dwc_gmac_ifflags_cb(struct ethercom *ec)
1130 {
1131 struct ifnet * const ifp = &ec->ec_if;
1132 struct dwc_gmac_softc * const sc = ifp->if_softc;
1133 int ret = 0;
1134
1135 KASSERT(IFNET_LOCKED(ifp));
1136 mutex_enter(sc->sc_mcast_lock);
1137
1138 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1139 sc->sc_if_flags = ifp->if_flags;
1140
1141 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1142 ret = ENETRESET;
1143 } else if ((change & IFF_PROMISC) != 0) {
1144 dwc_gmac_setmulti(sc);
1145 }
1146
1147 mutex_exit(sc->sc_mcast_lock);
1148
1149 return ret;
1150 }
1151
1152 static int
1153 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1154 {
1155 struct dwc_gmac_softc * const sc = ifp->if_softc;
1156 int error = 0;
1157
1158 switch (cmd) {
1159 case SIOCADDMULTI:
1160 case SIOCDELMULTI:
1161 break;
1162 default:
1163 KASSERT(IFNET_LOCKED(ifp));
1164 }
1165
1166 const int s = splnet();
1167 error = ether_ioctl(ifp, cmd, data);
1168 splx(s);
1169
1170 if (error == ENETRESET) {
1171 error = 0;
1172 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1173 mutex_enter(sc->sc_mcast_lock);
1174 if (sc->sc_if_flags & IFF_RUNNING) {
1175 /*
1176 * Multicast list has changed; set the hardware
1177 * filter accordingly.
1178 */
1179 dwc_gmac_setmulti(sc);
1180 }
1181 mutex_exit(sc->sc_mcast_lock);
1182 }
1183 }
1184
1185 return error;
1186 }
1187
1188 static void
1189 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1190 {
1191 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1192 struct dwc_gmac_tx_data *data;
1193 struct dwc_gmac_dev_dmadesc *desc;
1194 int i, nsegs;
1195
1196 mutex_enter(&sc->sc_txq.t_mtx);
1197
1198 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1199 #ifdef DWC_GMAC_DEBUG
1200 aprint_normal_dev(sc->sc_dev,
1201 "%s: checking desc #%d (t_queued: %d)\n", __func__,
1202 i, sc->sc_txq.t_queued);
1203 #endif
1204
1205 /*
1206 * i + 1 does not need to be a valid descriptor,
1207 * this is just a special notion to just sync
1208 * a single tx descriptor (i)
1209 */
1210 dwc_gmac_txdesc_sync(sc, i, i + 1,
1211 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1212
1213 desc = &sc->sc_txq.t_desc[i];
1214 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1215 break;
1216
1217 data = &sc->sc_txq.t_data[i];
1218 if (data->td_m == NULL)
1219 continue;
1220
1221 if_statinc(ifp, if_opackets);
1222 nsegs = data->td_active->dm_nsegs;
1223 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1224 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1225 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1226
1227 #ifdef DWC_GMAC_DEBUG
1228 aprint_normal_dev(sc->sc_dev,
1229 "%s: done with packet at desc #%d, freeing mbuf %p\n",
1230 __func__, i, data->td_m);
1231 #endif
1232
1233 m_freem(data->td_m);
1234 data->td_m = NULL;
1235
1236 sc->sc_txq.t_queued -= nsegs;
1237 }
1238
1239 sc->sc_txq.t_next = i;
1240
1241 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1242 sc->sc_txbusy = false;
1243 }
1244 mutex_exit(&sc->sc_txq.t_mtx);
1245 }
1246
1247 static void
1248 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1249 {
1250 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1251 struct dwc_gmac_dev_dmadesc *desc;
1252 struct dwc_gmac_rx_data *data;
1253 bus_addr_t physaddr;
1254 struct mbuf *m, *mnew;
1255 int i, len, error;
1256
1257 mutex_enter(&sc->sc_rxq.r_mtx);
1258 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1259 #ifdef DWC_GMAC_DEBUG
1260 aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n",
1261 __func__, i);
1262 #endif
1263 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1264 RX_DESC_OFFSET(i), sizeof(*desc),
1265 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1266 desc = &sc->sc_rxq.r_desc[i];
1267 data = &sc->sc_rxq.r_data[i];
1268
1269 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1270 break;
1271
1272 if (sc->sc_descm->rx_has_error(desc)) {
1273 #ifdef DWC_GMAC_DEBUG
1274 aprint_normal_dev(sc->sc_dev,
1275 "%s: RX error: status %08x, skipping\n",
1276 __func__, le32toh(desc->ddesc_status0));
1277 #endif
1278 if_statinc(ifp, if_ierrors);
1279 goto skip;
1280 }
1281
1282 len = sc->sc_descm->rx_get_len(desc);
1283
1284 #ifdef DWC_GMAC_DEBUG
1285 aprint_normal_dev(sc->sc_dev,
1286 "%s: device is done with descriptor #%d, len: %d\n",
1287 __func__, i, len);
1288 #endif
1289
1290 /*
1291 * Try to get a new mbuf before passing this one
1292 * up, if that fails, drop the packet and reuse
1293 * the existing one.
1294 */
1295 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1296 if (mnew == NULL) {
1297 if_statinc(ifp, if_ierrors);
1298 goto skip;
1299 }
1300 MCLGET(mnew, M_DONTWAIT);
1301 if ((mnew->m_flags & M_EXT) == 0) {
1302 m_freem(mnew);
1303 if_statinc(ifp, if_ierrors);
1304 goto skip;
1305 }
1306 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1307 if (mnew->m_len > AWGE_MAX_PACKET) {
1308 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1309 }
1310
1311 /* unload old DMA map */
1312 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1313 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1314 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1315
1316 /* and reload with new mbuf */
1317 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1318 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1319 if (error != 0) {
1320 m_freem(mnew);
1321 /* try to reload old mbuf */
1322 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1323 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1324 if (error != 0) {
1325 panic("%s: could not load old rx mbuf",
1326 device_xname(sc->sc_dev));
1327 }
1328 if_statinc(ifp, if_ierrors);
1329 goto skip;
1330 }
1331 physaddr = data->rd_map->dm_segs[0].ds_addr;
1332
1333 #ifdef DWC_GMAC_DEBUG
1334 aprint_normal_dev(sc->sc_dev,
1335 "%s: receiving packet at desc #%d, using mbuf %p\n",
1336 __func__, i, data->rd_m);
1337 #endif
1338 /*
1339 * New mbuf loaded, update RX ring and continue
1340 */
1341 m = data->rd_m;
1342 data->rd_m = mnew;
1343 desc->ddesc_data = htole32(physaddr);
1344
1345 /* finalize mbuf */
1346 m->m_pkthdr.len = m->m_len = len;
1347 m_set_rcvif(m, ifp);
1348 m->m_flags |= M_HASFCS;
1349
1350 if_percpuq_enqueue(sc->sc_ipq, m);
1351
1352 skip:
1353 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1354 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1355
1356 sc->sc_descm->rx_init_flags(desc);
1357 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1358
1359 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1360 RX_DESC_OFFSET(i), sizeof(*desc),
1361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1362
1363 sc->sc_descm->rx_set_owned_by_dev(desc);
1364
1365 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1366 RX_DESC_OFFSET(i), sizeof(*desc),
1367 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1368 }
1369
1370 /* update RX pointer */
1371 sc->sc_rxq.r_cur = i;
1372
1373 mutex_exit(&sc->sc_rxq.r_mtx);
1374 }
1375
1376 static void
1377 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1378 {
1379 struct ether_multi *enm;
1380 struct ether_multistep step;
1381 struct ethercom *ec = &sc->sc_ec;
1382 uint32_t hashes[2] = { 0, 0 };
1383 uint32_t ffilt, h;
1384 int mcnt;
1385
1386 KASSERT(mutex_owned(sc->sc_mcast_lock));
1387
1388 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1389
1390 if (sc->sc_if_flags & IFF_PROMISC) {
1391 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1392 goto special_filter;
1393 }
1394
1395 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1396
1397 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1398 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1399
1400 ETHER_LOCK(ec);
1401 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1402 ETHER_FIRST_MULTI(step, ec, enm);
1403 mcnt = 0;
1404 while (enm != NULL) {
1405 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1406 ETHER_ADDR_LEN) != 0) {
1407 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1408 ec->ec_flags |= ETHER_F_ALLMULTI;
1409 ETHER_UNLOCK(ec);
1410 goto special_filter;
1411 }
1412
1413 h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1414 hashes[h >> 5] |= (1 << (h & 0x1f));
1415
1416 mcnt++;
1417 ETHER_NEXT_MULTI(step, enm);
1418 }
1419 ETHER_UNLOCK(ec);
1420
1421 if (mcnt)
1422 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1423 else
1424 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1425
1426 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1427 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1428 hashes[0]);
1429 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1430 hashes[1]);
1431
1432 #ifdef DWC_GMAC_DEBUG
1433 dwc_gmac_dump_ffilt(sc, ffilt);
1434 #endif
1435 return;
1436
1437 special_filter:
1438 #ifdef DWC_GMAC_DEBUG
1439 dwc_gmac_dump_ffilt(sc, ffilt);
1440 #endif
1441 /* no MAC hashes, ALLMULTI or PROMISC */
1442 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1443 ffilt);
1444 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1445 0xffffffff);
1446 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1447 0xffffffff);
1448 }
1449
1450 int
1451 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1452 {
1453 uint32_t status, dma_status;
1454 int rv = 0;
1455
1456 mutex_enter(sc->sc_intr_lock);
1457 if (sc->sc_stopping) {
1458 mutex_exit(sc->sc_intr_lock);
1459 return 0;
1460 }
1461
1462 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1463 if (status & AWIN_GMAC_MII_IRQ) {
1464 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1465 AWIN_GMAC_MII_STATUS);
1466 rv = 1;
1467 mii_pollstat(&sc->sc_mii);
1468 }
1469
1470 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1471 AWIN_GMAC_DMA_STATUS);
1472
1473 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1474 rv = 1;
1475
1476 if (dma_status & GMAC_DMA_INT_TIE)
1477 dwc_gmac_tx_intr(sc);
1478
1479 if (dma_status & GMAC_DMA_INT_RIE)
1480 dwc_gmac_rx_intr(sc);
1481
1482 /*
1483 * Check error conditions
1484 */
1485 if (dma_status & GMAC_DMA_INT_ERRORS) {
1486 if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1487 #ifdef DWC_GMAC_DEBUG
1488 dwc_dump_and_abort(sc, "interrupt error condition");
1489 #endif
1490 }
1491
1492 rnd_add_uint32(&sc->rnd_source, dma_status);
1493
1494 /* ack interrupt */
1495 if (dma_status)
1496 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1497 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1498
1499 /*
1500 * Get more packets
1501 */
1502 if (rv)
1503 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1504
1505 mutex_exit(sc->sc_intr_lock);
1506
1507 return rv;
1508 }
1509
1510 static void
1511 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1512 {
1513
1514 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1515 }
1516
1517 static int
1518 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1519 {
1520
1521 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1522 }
1523
1524 static void
1525 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1526 {
1527 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1528
1529 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1530 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1531 }
1532
1533 static uint32_t
1534 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1535 {
1536
1537 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1538 }
1539
1540 static void
1541 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1542 {
1543
1544 desc->ddesc_status0 = 0;
1545 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1546 }
1547
1548 static void
1549 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1550 {
1551 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1552
1553 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1554 }
1555
1556 static void
1557 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1558 {
1559 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1560
1561 desc->ddesc_cntl1 = htole32(cntl |
1562 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1563 }
1564
1565 static void
1566 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1567 {
1568
1569 desc->ddesc_status0 = 0;
1570 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1571 }
1572
1573 static int
1574 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1575 return !!(le32toh(desc->ddesc_status0) &
1576 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1577 }
1578
1579 static void
1580 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1581 {
1582 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1583
1584 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1585 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1586 }
1587
1588 static uint32_t
1589 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1590 {
1591
1592 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1593 }
1594
1595 static void
1596 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1597 {
1598
1599 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1600 desc->ddesc_cntl1 = 0;
1601 }
1602
1603 static void
1604 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1605 {
1606 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1607
1608 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1609 }
1610
1611 static void
1612 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1613 {
1614 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1615
1616 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1617 }
1618
1619 static void
1620 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1621 {
1622
1623 desc->ddesc_status0 = 0;
1624 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1625 }
1626
1627 static int
1628 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1629 {
1630
1631 return !!(le32toh(desc->ddesc_status0) &
1632 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1633 }
1634
1635 #ifdef DWC_GMAC_DEBUG
1636 static void
1637 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1638 {
1639 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1640 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1641 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1642 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1643 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1644 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1645 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1646 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1647 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1648 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1649 aprint_normal_dev(sc->sc_dev, " status: %08x\n",
1650 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1651 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1652 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1653 aprint_normal_dev(sc->sc_dev, "int en.: %08x\n",
1654 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1655 aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n",
1656 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1657 aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n",
1658 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1659 aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n",
1660 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1661 aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n",
1662 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1663 }
1664
1665 static void
1666 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1667 {
1668 const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
1669
1670 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1671 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1672 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1673
1674 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1675 TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
1676 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1677
1678 for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) {
1679 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1680 aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
1681 "data: %08x next: %08x\n",
1682 i, sc->sc_txq.t_physaddr + i * descsz,
1683 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1684 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1685 }
1686 }
1687
1688 static void
1689 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1690 {
1691 const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
1692
1693 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1694 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1695 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1696
1697 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1698 RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
1699 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1700
1701 for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) {
1702 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1703 char buf[200];
1704
1705 if (!sc->sc_descm->rx_is_owned_by_dev(desc)) {
1706 /* print interrupt state */
1707 snprintb(buf, sizeof(buf),
1708 "\177\20"
1709 "b\x1e" "daff\0"
1710 "f\x10\xe" "frlen\0"
1711 "b\x0f" "error\0"
1712 "b\x0e" "rxtrunc\0" /* descriptor error? */
1713 "b\x0d" "saff\0"
1714 "b\x0c" "giantframe\0" /* length error? */
1715 "b\x0b" "damaged\0"
1716 "b\x0a" "vlan\0"
1717 "b\x09" "first\0"
1718 "b\x08" "last\0"
1719 "b\x07" "giant\0"
1720 "b\x06" "collison\0"
1721 "b\x05" "ether\0"
1722 "b\x04" "watchdog\0"
1723 "b\x03" "miierror\0"
1724 "b\x02" "dribbling\0"
1725 "b\x01" "crc\0"
1726 "\0", le32toh(desc->ddesc_status0));
1727 }
1728
1729 aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
1730 "data: %08x next: %08x %s\n",
1731 i, sc->sc_rxq.r_physaddr + i * descsz,
1732 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1733 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next),
1734 sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf);
1735 }
1736 }
1737
1738 static void
1739 dwc_dump_status(struct dwc_gmac_softc *sc)
1740 {
1741 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1742 AWIN_GMAC_MAC_INTR);
1743 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1744 AWIN_GMAC_DMA_STATUS);
1745 char buf[200];
1746
1747 /* print interrupt state */
1748 snprintb(buf, sizeof(buf),
1749 "\177\20"
1750 "b\x1c" "GPI\0"
1751 "b\x1b" "GMC\0"
1752 "b\x1a" "GLI\0"
1753 "f\x17\x3" "EB\0"
1754 "f\x14\x3" "TPS\0"
1755 "f\x11\x3" "RPS\0"
1756 "b\x10" "NI\0"
1757 "b\x0f" "AI\0"
1758 "b\x0e" "ER\0"
1759 "b\x0d" "FB\0"
1760 "b\x0a" "ET\0"
1761 "b\x09" "RW\0"
1762 "b\x08" "RS\0"
1763 "b\x07" "RU\0"
1764 "b\x06" "RI\0"
1765 "b\x05" "UN\0"
1766 "b\x04" "OV\0"
1767 "b\x03" "TJ\0"
1768 "b\x02" "TU\0"
1769 "b\x01" "TS\0"
1770 "b\x00" "TI\0"
1771 "\0", dma_status);
1772 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1773 status, buf);
1774 }
1775
1776 static void
1777 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1778 {
1779 dwc_dump_status(sc);
1780 dwc_gmac_dump_ffilt(sc,
1781 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1782 dwc_gmac_dump_dma(sc);
1783 dwc_gmac_dump_tx_desc(sc);
1784 dwc_gmac_dump_rx_desc(sc);
1785
1786 panic("%s", msg);
1787 }
1788
1789 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1790 {
1791 char buf[200];
1792
1793 /* print filter setup */
1794 snprintb(buf, sizeof(buf), "\177\20"
1795 "b\x1f""RA\0"
1796 "b\x0a""HPF\0"
1797 "b\x09""SAF\0"
1798 "b\x08""SAIF\0"
1799 "b\x05""DBF\0"
1800 "b\x04""PM\0"
1801 "b\x03""DAIF\0"
1802 "b\x02""HMC\0"
1803 "b\x01""HUC\0"
1804 "b\x00""PR\0"
1805 "\0", ffilt);
1806 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1807 }
1808 #endif
1809