dwc_gmac.c revision 1.93 1 /* $NetBSD: dwc_gmac.c,v 1.93 2024/08/10 12:16:47 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.93 2024/08/10 12:16:47 skrll Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #endif
51
52 #include <sys/param.h>
53 #include <sys/bus.h>
54 #include <sys/device.h>
55 #include <sys/intr.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 #include <sys/cprng.h>
59 #include <sys/rndsource.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
75 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t[ETHER_ADDR_LEN]);
80 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
81 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
82 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
83 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
89 static int dwc_gmac_init(struct ifnet *);
90 static int dwc_gmac_init_locked(struct ifnet *);
91 static void dwc_gmac_stop(struct ifnet *, int);
92 static void dwc_gmac_stop_locked(struct ifnet *, int);
93 static void dwc_gmac_start(struct ifnet *);
94 static void dwc_gmac_start_locked(struct ifnet *);
95 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
96 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
97 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
98 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
100 static int dwc_gmac_ifflags_cb(struct ethercom *);
101 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
102 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
103 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
104 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
106 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
109 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
111 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
113 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
116 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
117
118 static const struct dwc_gmac_desc_methods desc_methods_standard = {
119 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
120 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
121 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
122 .tx_set_len = dwc_gmac_desc_std_set_len,
123 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
124 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
125 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
126 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
127 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
128 .rx_set_len = dwc_gmac_desc_std_set_len,
129 .rx_get_len = dwc_gmac_desc_std_get_len,
130 .rx_has_error = dwc_gmac_desc_std_rx_has_error
131 };
132
133 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
134 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
135 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
136 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
137 .tx_set_len = dwc_gmac_desc_enh_set_len,
138 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
139 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
140 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
141 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
142 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
143 .rx_set_len = dwc_gmac_desc_enh_set_len,
144 .rx_get_len = dwc_gmac_desc_enh_get_len,
145 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
146 };
147
148
149 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT + (N)) \
150 * sizeof(struct dwc_gmac_dev_dmadesc))
151 #define TX_NEXT(N) (((N) + 1) & (AWGE_TX_RING_COUNT - 1))
152
153 #define RX_DESC_OFFSET(N) ((N) * sizeof(struct dwc_gmac_dev_dmadesc))
154 #define RX_NEXT(N) (((N) + 1) & (AWGE_RX_RING_COUNT - 1))
155
156
157
158 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
159 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
160 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
161
162 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
163 GMAC_DMA_INT_FBE | \
164 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
165 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
166 GMAC_DMA_INT_TJE)
167
168 #define AWIN_DEF_MAC_INTRMASK \
169 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
170 AWIN_GMAC_MAC_INT_LINKCHG)
171
172 #ifdef DWC_GMAC_DEBUG
173 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
174 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
175 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
176 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
177 static void dwc_dump_status(struct dwc_gmac_softc *);
178 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
179 #endif
180
181 int
182 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
183 {
184 uint8_t enaddr[ETHER_ADDR_LEN];
185 uint32_t maclo, machi, hwft;
186 struct mii_data * const mii = &sc->sc_mii;
187 struct ifnet * const ifp = &sc->sc_ec.ec_if;
188 prop_dictionary_t dict;
189
190 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
191 sc->sc_mii_clk = mii_clk & 7;
192
193 dict = device_properties(sc->sc_dev);
194 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
195 if (ea != NULL) {
196 /*
197 * If the MAC address is overridden by a device property,
198 * use that.
199 */
200 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
201 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
202 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
203 } else {
204 /*
205 * If we did not get an externaly configure address,
206 * try to read one from the current filter setup,
207 * before resetting the chip.
208 */
209 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
210 AWIN_GMAC_MAC_ADDR0LO);
211 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212 AWIN_GMAC_MAC_ADDR0HI);
213
214 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
215 /* fake MAC address */
216 maclo = 0x00f2 | (cprng_strong32() << 16);
217 machi = cprng_strong32();
218 }
219
220 enaddr[0] = maclo & 0x0ff;
221 enaddr[1] = (maclo >> 8) & 0x0ff;
222 enaddr[2] = (maclo >> 16) & 0x0ff;
223 enaddr[3] = (maclo >> 24) & 0x0ff;
224 enaddr[4] = machi & 0x0ff;
225 enaddr[5] = (machi >> 8) & 0x0ff;
226 }
227
228 const uint32_t ver =
229 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
230 const uint32_t snpsver =
231 __SHIFTOUT(ver, AWIN_GMAC_MAC_VERSION_SNPSVER_MASK);
232 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", snpsver);
233
234 /*
235 * Init chip and do initial setup
236 */
237 if (dwc_gmac_reset(sc) != 0)
238 return ENXIO; /* not much to cleanup, haven't attached yet */
239 dwc_gmac_write_hwaddr(sc, enaddr);
240 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 ether_sprintf(enaddr));
242
243 hwft = 0;
244 if (snpsver >= 0x35) {
245 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 AWIN_GMAC_DMA_HWFEATURES);
247 aprint_normal_dev(sc->sc_dev,
248 "HW feature mask: %x\n", hwft);
249 }
250
251 if (sizeof(bus_addr_t) > 4) {
252 int error = bus_dmatag_subregion(sc->sc_dmat, 0, __MASK(32),
253 &sc->sc_dmat, BUS_DMA_WAITOK);
254 if (error != 0) {
255 aprint_error_dev(sc->sc_dev,
256 "failed to create DMA subregion\n");
257 return ENOMEM;
258 }
259 }
260
261 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
262 aprint_normal_dev(sc->sc_dev,
263 "Using enhanced descriptor format\n");
264 sc->sc_descm = &desc_methods_enhanced;
265 } else {
266 sc->sc_descm = &desc_methods_standard;
267 }
268 if (hwft & GMAC_DMA_FEAT_RMON) {
269 uint32_t val;
270
271 /* Mask all MMC interrupts */
272 val = 0xffffffff;
273 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
274 GMAC_MMC_RX_INT_MSK, val);
275 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
276 GMAC_MMC_TX_INT_MSK, val);
277 }
278
279 /*
280 * Allocate Tx and Rx rings
281 */
282 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
283 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
284 goto fail;
285 }
286
287 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
288 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
289 goto fail;
290 }
291
292 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
293 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
294 goto fail;
295 }
296
297 sc->sc_stopping = false;
298 sc->sc_txbusy = false;
299
300 sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
301 sc->sc_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
302 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
303 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
304
305 /*
306 * Prepare interface data
307 */
308 ifp->if_softc = sc;
309 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
310 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
311 ifp->if_extflags = IFEF_MPSAFE;
312 ifp->if_ioctl = dwc_gmac_ioctl;
313 ifp->if_start = dwc_gmac_start;
314 ifp->if_init = dwc_gmac_init;
315 ifp->if_stop = dwc_gmac_stop;
316 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
317 IFQ_SET_READY(&ifp->if_snd);
318
319 /*
320 * Attach MII subdevices
321 */
322 sc->sc_ec.ec_mii = &sc->sc_mii;
323 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
324 mii->mii_ifp = ifp;
325 mii->mii_readreg = dwc_gmac_miibus_read_reg;
326 mii->mii_writereg = dwc_gmac_miibus_write_reg;
327 mii->mii_statchg = dwc_gmac_miibus_statchg;
328 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
329 MIIF_DOPAUSE);
330
331 if (LIST_EMPTY(&mii->mii_phys)) {
332 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
333 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
334 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
335 } else {
336 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
337 }
338
339 /*
340 * We can support 802.1Q VLAN-sized frames.
341 */
342 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
343
344 /*
345 * Ready, attach interface
346 */
347 /* Attach the interface. */
348 if_initialize(ifp);
349 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
350 if_deferred_start_init(ifp, NULL);
351 ether_ifattach(ifp, enaddr);
352 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
353 if_register(ifp);
354 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
355 RND_TYPE_NET, RND_FLAG_DEFAULT);
356
357 /*
358 * Enable interrupts
359 */
360 mutex_enter(sc->sc_intr_lock);
361 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
362 AWIN_DEF_MAC_INTRMASK);
363 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
364 GMAC_DEF_DMA_INT_MASK);
365 mutex_exit(sc->sc_intr_lock);
366
367 return 0;
368
369 fail:
370 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
371 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
372 dwc_gmac_free_dma_rings(sc);
373 mutex_destroy(&sc->sc_mdio_lock);
374
375 return ENXIO;
376 }
377
378
379
380 static int
381 dwc_gmac_reset(struct dwc_gmac_softc *sc)
382 {
383 size_t cnt;
384 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
385 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
386 | GMAC_BUSMODE_RESET);
387 for (cnt = 0; cnt < 30000; cnt++) {
388 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
389 & GMAC_BUSMODE_RESET) == 0)
390 return 0;
391 delay(10);
392 }
393
394 aprint_error_dev(sc->sc_dev, "reset timed out\n");
395 return EIO;
396 }
397
398 static void
399 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
400 uint8_t enaddr[ETHER_ADDR_LEN])
401 {
402 uint32_t hi, lo;
403
404 hi = enaddr[4] | (enaddr[5] << 8);
405 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
406 | ((uint32_t)enaddr[3] << 24);
407 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
408 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
409 }
410
411 static int
412 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
413 {
414 struct dwc_gmac_softc * const sc = device_private(self);
415 uint16_t mii;
416 size_t cnt;
417
418 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
419 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
420 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
421 | GMAC_MII_BUSY;
422
423 mutex_enter(&sc->sc_mdio_lock);
424 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
425
426 for (cnt = 0; cnt < 1000; cnt++) {
427 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
428 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
429 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
430 AWIN_GMAC_MAC_MIIDATA);
431 break;
432 }
433 delay(10);
434 }
435
436 mutex_exit(&sc->sc_mdio_lock);
437
438 if (cnt >= 1000)
439 return ETIMEDOUT;
440
441 return 0;
442 }
443
444 static int
445 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
446 {
447 struct dwc_gmac_softc * const sc = device_private(self);
448 uint16_t mii;
449 size_t cnt;
450
451 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
452 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
453 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
454 | GMAC_MII_BUSY | GMAC_MII_WRITE;
455
456 mutex_enter(&sc->sc_mdio_lock);
457 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
458 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
459
460 for (cnt = 0; cnt < 1000; cnt++) {
461 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
462 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
463 break;
464 delay(10);
465 }
466
467 mutex_exit(&sc->sc_mdio_lock);
468
469 if (cnt >= 1000)
470 return ETIMEDOUT;
471
472 return 0;
473 }
474
475 static int
476 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
477 struct dwc_gmac_rx_ring *ring)
478 {
479 struct dwc_gmac_rx_data *data;
480 bus_addr_t physaddr;
481 const size_t rxringsz = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
482 int error, i, next;
483
484 ring->r_cur = ring->r_next = 0;
485 memset(ring->r_desc, 0, rxringsz);
486
487 /*
488 * Pre-allocate Rx buffers and populate Rx ring.
489 */
490 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
491 struct dwc_gmac_dev_dmadesc *desc;
492
493 data = &sc->sc_rxq.r_data[i];
494
495 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
496 if (data->rd_m == NULL) {
497 aprint_error_dev(sc->sc_dev,
498 "could not allocate rx mbuf #%d\n", i);
499 error = ENOMEM;
500 goto fail;
501 }
502 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
503 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
504 if (error != 0) {
505 aprint_error_dev(sc->sc_dev,
506 "could not create DMA map\n");
507 data->rd_map = NULL;
508 goto fail;
509 }
510 MCLGET(data->rd_m, M_DONTWAIT);
511 if (!(data->rd_m->m_flags & M_EXT)) {
512 aprint_error_dev(sc->sc_dev,
513 "could not allocate mbuf cluster #%d\n", i);
514 error = ENOMEM;
515 goto fail;
516 }
517 data->rd_m->m_len = data->rd_m->m_pkthdr.len
518 = data->rd_m->m_ext.ext_size;
519 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
520 data->rd_m->m_len = data->rd_m->m_pkthdr.len
521 = AWGE_MAX_PACKET;
522 }
523
524 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
525 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
526 if (error != 0) {
527 aprint_error_dev(sc->sc_dev,
528 "could not load rx buf DMA map #%d", i);
529 goto fail;
530 }
531 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
532 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
533 physaddr = data->rd_map->dm_segs[0].ds_addr;
534
535 desc = &sc->sc_rxq.r_desc[i];
536 desc->ddesc_data = htole32(physaddr);
537 next = RX_NEXT(i);
538 desc->ddesc_next = htole32(ring->r_physaddr
539 + next * sizeof(*desc));
540 sc->sc_descm->rx_init_flags(desc);
541 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
542 sc->sc_descm->rx_set_owned_by_dev(desc);
543 }
544
545 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
546 RX_DESC_OFFSET(0),
547 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
548 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
549 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
550 ring->r_physaddr);
551
552 return 0;
553
554 fail:
555 dwc_gmac_free_rx_ring(sc, ring);
556 return error;
557 }
558
559 static void
560 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
561 struct dwc_gmac_rx_ring *ring)
562 {
563 struct dwc_gmac_dev_dmadesc *desc;
564 struct dwc_gmac_rx_data *data;
565 int i;
566
567 mutex_enter(&ring->r_mtx);
568 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
569 desc = &sc->sc_rxq.r_desc[i];
570 data = &sc->sc_rxq.r_data[i];
571 sc->sc_descm->rx_init_flags(desc);
572 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
573 sc->sc_descm->rx_set_owned_by_dev(desc);
574 }
575
576 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
577 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
578 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
579
580 ring->r_cur = ring->r_next = 0;
581 /* reset DMA address to start of ring */
582 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
583 sc->sc_rxq.r_physaddr);
584 mutex_exit(&ring->r_mtx);
585 }
586
587 static int
588 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
589 {
590 const size_t ringsize = AWGE_TOTAL_RING_COUNT *
591 sizeof(struct dwc_gmac_dev_dmadesc);
592 int error, nsegs;
593 void *rings;
594
595 error = bus_dmamap_create(sc->sc_dmat, ringsize, 1, ringsize, 0,
596 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
597 if (error != 0) {
598 aprint_error_dev(sc->sc_dev,
599 "could not create desc DMA map\n");
600 sc->sc_dma_ring_map = NULL;
601 goto fail;
602 }
603
604 error = bus_dmamem_alloc(sc->sc_dmat, ringsize, PAGE_SIZE, 0,
605 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
606 if (error != 0) {
607 aprint_error_dev(sc->sc_dev,
608 "could not map DMA memory\n");
609 goto fail;
610 }
611
612 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
613 ringsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
614 if (error != 0) {
615 aprint_error_dev(sc->sc_dev,
616 "could not allocate DMA memory\n");
617 goto fail;
618 }
619
620 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
621 ringsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
622 if (error != 0) {
623 aprint_error_dev(sc->sc_dev,
624 "could not load desc DMA map\n");
625 goto fail;
626 }
627
628 /* give first AWGE_RX_RING_COUNT to the RX side */
629 sc->sc_rxq.r_desc = rings;
630 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
631
632 /* and next rings to the TX side */
633 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
634 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
635 AWGE_RX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc);
636
637 return 0;
638
639 fail:
640 dwc_gmac_free_dma_rings(sc);
641 return error;
642 }
643
644 static void
645 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
646 {
647 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
648 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
649 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
650 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
651 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
652 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
653 }
654
655 static void
656 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
657 {
658 struct dwc_gmac_rx_data *data;
659 int i;
660
661 if (ring->r_desc == NULL)
662 return;
663
664 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
665 data = &ring->r_data[i];
666
667 if (data->rd_map != NULL) {
668 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
669 AWGE_RX_RING_COUNT
670 * sizeof(struct dwc_gmac_dev_dmadesc),
671 BUS_DMASYNC_POSTREAD);
672 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
673 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
674 }
675 m_freem(data->rd_m);
676 }
677 }
678
679 static int
680 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
681 struct dwc_gmac_tx_ring *ring)
682 {
683 int i, error = 0;
684
685 ring->t_queued = 0;
686 ring->t_cur = ring->t_next = 0;
687
688 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT * sizeof(*ring->t_desc));
689 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
690 TX_DESC_OFFSET(0),
691 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
692 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
693
694 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
695 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
696 AWGE_TX_RING_COUNT, MCLBYTES, 0,
697 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
698 &ring->t_data[i].td_map);
699 if (error != 0) {
700 aprint_error_dev(sc->sc_dev,
701 "could not create TX DMA map #%d\n", i);
702 ring->t_data[i].td_map = NULL;
703 goto fail;
704 }
705 ring->t_desc[i].ddesc_next = htole32(
706 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
707 * TX_NEXT(i));
708 }
709 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
710 TX_DESC_OFFSET(0),
711 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
712 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
713
714 return 0;
715
716 fail:
717 dwc_gmac_free_tx_ring(sc, ring);
718 return error;
719 }
720
721 static void
722 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
723 {
724 /* 'end' is pointing one descriptor beyond the last we want to sync */
725 if (end > start) {
726 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
727 TX_DESC_OFFSET(start),
728 TX_DESC_OFFSET(end) - TX_DESC_OFFSET(start),
729 ops);
730 return;
731 }
732 /* sync from 'start' to end of ring */
733 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
734 TX_DESC_OFFSET(start),
735 TX_DESC_OFFSET(AWGE_TX_RING_COUNT) - TX_DESC_OFFSET(start),
736 ops);
737 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
738 /* sync from start of ring to 'end' */
739 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
740 TX_DESC_OFFSET(0),
741 TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0),
742 ops);
743 }
744 }
745
746 static void
747 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
748 struct dwc_gmac_tx_ring *ring)
749 {
750 int i;
751
752 mutex_enter(&ring->t_mtx);
753 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
754 struct dwc_gmac_tx_data *data = &ring->t_data[i];
755
756 if (data->td_m != NULL) {
757 bus_dmamap_sync(sc->sc_dmat, data->td_active,
758 0, data->td_active->dm_mapsize,
759 BUS_DMASYNC_POSTWRITE);
760 bus_dmamap_unload(sc->sc_dmat, data->td_active);
761 m_freem(data->td_m);
762 data->td_m = NULL;
763 }
764 }
765
766 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
767 TX_DESC_OFFSET(0),
768 AWGE_TX_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc),
769 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
770 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
771 sc->sc_txq.t_physaddr);
772
773 ring->t_queued = 0;
774 ring->t_cur = ring->t_next = 0;
775 mutex_exit(&ring->t_mtx);
776 }
777
778 static void
779 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
780 struct dwc_gmac_tx_ring *ring)
781 {
782 int i;
783
784 /* unload the maps */
785 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
786 struct dwc_gmac_tx_data *data = &ring->t_data[i];
787
788 if (data->td_m != NULL) {
789 bus_dmamap_sync(sc->sc_dmat, data->td_active,
790 0, data->td_map->dm_mapsize,
791 BUS_DMASYNC_POSTWRITE);
792 bus_dmamap_unload(sc->sc_dmat, data->td_active);
793 m_freem(data->td_m);
794 data->td_m = NULL;
795 }
796 }
797
798 /* and actually free them */
799 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
800 struct dwc_gmac_tx_data *data = &ring->t_data[i];
801
802 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
803 }
804 }
805
806 static void
807 dwc_gmac_miibus_statchg(struct ifnet *ifp)
808 {
809 struct dwc_gmac_softc * const sc = ifp->if_softc;
810 struct mii_data * const mii = &sc->sc_mii;
811 uint32_t conf, flow;
812
813 /*
814 * Set MII or GMII interface based on the speed
815 * negotiated by the PHY.
816 */
817 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
818 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
819 | AWIN_GMAC_MAC_CONF_FULLDPLX);
820 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
821 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
822 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
823 | AWIN_GMAC_MAC_CONF_RXENABLE
824 | AWIN_GMAC_MAC_CONF_TXENABLE;
825 switch (IFM_SUBTYPE(mii->mii_media_active)) {
826 case IFM_10_T:
827 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
828 break;
829 case IFM_100_TX:
830 conf |= AWIN_GMAC_MAC_CONF_FES100 |
831 AWIN_GMAC_MAC_CONF_MIISEL;
832 break;
833 case IFM_1000_T:
834 break;
835 }
836 if (sc->sc_set_speed)
837 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
838
839 flow = 0;
840 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
841 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
842 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
843 }
844 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
845 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
846 }
847 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
848 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
849 }
850 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
851 AWIN_GMAC_MAC_FLOWCTRL, flow);
852
853 #ifdef DWC_GMAC_DEBUG
854 aprint_normal_dev(sc->sc_dev,
855 "setting MAC conf register: %08x\n", conf);
856 #endif
857
858 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
859 AWIN_GMAC_MAC_CONF, conf);
860 }
861
862 static int
863 dwc_gmac_init(struct ifnet *ifp)
864 {
865 struct dwc_gmac_softc * const sc = ifp->if_softc;
866
867 KASSERT(IFNET_LOCKED(ifp));
868
869 mutex_enter(sc->sc_core_lock);
870 int ret = dwc_gmac_init_locked(ifp);
871 mutex_exit(sc->sc_core_lock);
872
873 return ret;
874 }
875
876 static int
877 dwc_gmac_init_locked(struct ifnet *ifp)
878 {
879 struct dwc_gmac_softc * const sc = ifp->if_softc;
880 uint32_t ffilt;
881
882 ASSERT_SLEEPABLE();
883 KASSERT(IFNET_LOCKED(ifp));
884 KASSERT(mutex_owned(sc->sc_core_lock));
885 KASSERT(ifp == &sc->sc_ec.ec_if);
886
887 dwc_gmac_stop_locked(ifp, 0);
888
889 /*
890 * Configure DMA burst/transfer mode and RX/TX priorities.
891 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
892 */
893 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
894 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
895 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
896 __SHIFTIN(2, GMAC_BUSMODE_PBL));
897
898 /*
899 * Set up address filter
900 */
901 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
902 if (ifp->if_flags & IFF_PROMISC) {
903 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
904 } else {
905 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
906 }
907 if (ifp->if_flags & IFF_BROADCAST) {
908 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
909 } else {
910 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
911 }
912 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
913
914 /*
915 * Set up multicast filter
916 */
917 dwc_gmac_setmulti(sc);
918
919 /*
920 * Set up dma pointer for RX and TX ring
921 */
922 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
923 sc->sc_rxq.r_physaddr);
924 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
925 sc->sc_txq.t_physaddr);
926
927 /*
928 * Start RX/TX part
929 */
930 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
931 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
932 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
933 }
934 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
935 #ifdef DWC_GMAC_DEBUG
936 aprint_normal_dev(sc->sc_dev,
937 "setting DMA opmode register: %08x\n", opmode);
938 #endif
939
940 ifp->if_flags |= IFF_RUNNING;
941 sc->sc_if_flags = ifp->if_flags;
942
943 mutex_enter(sc->sc_intr_lock);
944 sc->sc_stopping = false;
945
946 mutex_enter(&sc->sc_txq.t_mtx);
947 sc->sc_txbusy = false;
948 mutex_exit(&sc->sc_txq.t_mtx);
949 mutex_exit(sc->sc_intr_lock);
950
951 return 0;
952 }
953
954 static void
955 dwc_gmac_start(struct ifnet *ifp)
956 {
957 struct dwc_gmac_softc * const sc = ifp->if_softc;
958 KASSERT(if_is_mpsafe(ifp));
959
960 mutex_enter(sc->sc_intr_lock);
961 if (!sc->sc_stopping) {
962 dwc_gmac_start_locked(ifp);
963 }
964 mutex_exit(sc->sc_intr_lock);
965 }
966
967 static void
968 dwc_gmac_start_locked(struct ifnet *ifp)
969 {
970 struct dwc_gmac_softc * const sc = ifp->if_softc;
971 int old = sc->sc_txq.t_queued;
972 int start = sc->sc_txq.t_cur;
973 struct mbuf *m0;
974
975 KASSERT(mutex_owned(sc->sc_intr_lock));
976
977 mutex_enter(&sc->sc_txq.t_mtx);
978 if (sc->sc_txbusy) {
979 mutex_exit(&sc->sc_txq.t_mtx);
980 return;
981 }
982
983 for (;;) {
984 IFQ_POLL(&ifp->if_snd, m0);
985 if (m0 == NULL)
986 break;
987 if (dwc_gmac_queue(sc, m0) != 0) {
988 sc->sc_txbusy = true;
989 break;
990 }
991 IFQ_DEQUEUE(&ifp->if_snd, m0);
992 bpf_mtap(ifp, m0, BPF_D_OUT);
993 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
994 sc->sc_txbusy = true;
995 break;
996 }
997 }
998
999 if (sc->sc_txq.t_queued != old) {
1000 /* packets have been queued, kick it off */
1001 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
1002 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1003
1004 #ifdef DWC_GMAC_DEBUG
1005 dwc_dump_status(sc);
1006 #endif
1007 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1008 AWIN_GMAC_DMA_TXPOLL, ~0U);
1009 }
1010 mutex_exit(&sc->sc_txq.t_mtx);
1011 }
1012
1013 static void
1014 dwc_gmac_stop(struct ifnet *ifp, int disable)
1015 {
1016 struct dwc_gmac_softc * const sc = ifp->if_softc;
1017
1018 mutex_enter(sc->sc_core_lock);
1019 dwc_gmac_stop_locked(ifp, disable);
1020 mutex_exit(sc->sc_core_lock);
1021 }
1022
1023 static void
1024 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
1025 {
1026 struct dwc_gmac_softc * const sc = ifp->if_softc;
1027
1028 ASSERT_SLEEPABLE();
1029 KASSERT(IFNET_LOCKED(ifp));
1030 KASSERT(mutex_owned(sc->sc_core_lock));
1031
1032 mutex_enter(sc->sc_intr_lock);
1033 sc->sc_stopping = true;
1034
1035 mutex_enter(&sc->sc_txq.t_mtx);
1036 sc->sc_txbusy = false;
1037 mutex_exit(&sc->sc_txq.t_mtx);
1038
1039 mutex_exit(sc->sc_intr_lock);
1040
1041 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1042 AWIN_GMAC_DMA_OPMODE,
1043 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1044 AWIN_GMAC_DMA_OPMODE)
1045 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1046 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1047 AWIN_GMAC_DMA_OPMODE,
1048 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1049 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1050
1051 mii_down(&sc->sc_mii);
1052 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1053 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1054
1055 ifp->if_flags &= ~IFF_RUNNING;
1056 sc->sc_if_flags = ifp->if_flags;
1057 }
1058
1059 /*
1060 * Add m0 to the TX ring
1061 */
1062 static int
1063 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1064 {
1065 struct dwc_gmac_dev_dmadesc *desc = NULL;
1066 struct dwc_gmac_tx_data *data = NULL;
1067 bus_dmamap_t map;
1068 int error, i, first;
1069
1070 #ifdef DWC_GMAC_DEBUG
1071 aprint_normal_dev(sc->sc_dev,
1072 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1073 #endif
1074
1075 first = sc->sc_txq.t_cur;
1076 map = sc->sc_txq.t_data[first].td_map;
1077
1078 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1079 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1080 if (error != 0) {
1081 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1082 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1083 return error;
1084 }
1085
1086 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1087 bus_dmamap_unload(sc->sc_dmat, map);
1088 return ENOBUFS;
1089 }
1090
1091 for (i = 0; i < map->dm_nsegs; i++) {
1092 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1093 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1094
1095 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1096
1097 #ifdef DWC_GMAC_DEBUG
1098 aprint_normal_dev(sc->sc_dev, "enqueuing desc #%d data %08lx "
1099 "len %lu\n", sc->sc_txq.t_cur,
1100 (unsigned long)map->dm_segs[i].ds_addr,
1101 (unsigned long)map->dm_segs[i].ds_len);
1102 #endif
1103
1104 sc->sc_descm->tx_init_flags(desc);
1105 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1106
1107 if (i == 0)
1108 sc->sc_descm->tx_set_first_frag(desc);
1109
1110 /*
1111 * Defer passing ownership of the first descriptor
1112 * until we are done.
1113 */
1114 if (i != 0)
1115 sc->sc_descm->tx_set_owned_by_dev(desc);
1116
1117 sc->sc_txq.t_queued++;
1118 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1119 }
1120
1121 sc->sc_descm->tx_set_last_frag(desc);
1122
1123 data->td_m = m0;
1124 data->td_active = map;
1125
1126 /* sync the packet buffer */
1127 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1128 BUS_DMASYNC_PREWRITE);
1129
1130 /* sync the new descriptors - ownership not transferred yet */
1131 dwc_gmac_txdesc_sync(sc, first, sc->sc_txq.t_cur,
1132 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1133
1134 /* Pass first to device */
1135 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1136
1137 return 0;
1138 }
1139
1140 /*
1141 * If the interface is up and running, only modify the receive
1142 * filter when setting promiscuous or debug mode. Otherwise fall
1143 * through to ether_ioctl, which will reset the chip.
1144 */
1145 static int
1146 dwc_gmac_ifflags_cb(struct ethercom *ec)
1147 {
1148 struct ifnet * const ifp = &ec->ec_if;
1149 struct dwc_gmac_softc * const sc = ifp->if_softc;
1150 int ret = 0;
1151
1152 KASSERT(IFNET_LOCKED(ifp));
1153 mutex_enter(sc->sc_core_lock);
1154
1155 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1156 sc->sc_if_flags = ifp->if_flags;
1157
1158 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1159 ret = ENETRESET;
1160 } else if ((change & IFF_PROMISC) != 0) {
1161 dwc_gmac_setmulti(sc);
1162 }
1163
1164 mutex_exit(sc->sc_core_lock);
1165
1166 return ret;
1167 }
1168
1169 static int
1170 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1171 {
1172 struct dwc_gmac_softc * const sc = ifp->if_softc;
1173 int error = 0;
1174
1175 switch (cmd) {
1176 case SIOCADDMULTI:
1177 case SIOCDELMULTI:
1178 break;
1179 default:
1180 KASSERT(IFNET_LOCKED(ifp));
1181 }
1182
1183 const int s = splnet();
1184 error = ether_ioctl(ifp, cmd, data);
1185 splx(s);
1186
1187 if (error == ENETRESET) {
1188 error = 0;
1189 if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) {
1190 mutex_enter(sc->sc_core_lock);
1191 if (sc->sc_if_flags & IFF_RUNNING) {
1192 /*
1193 * Multicast list has changed; set the hardware
1194 * filter accordingly.
1195 */
1196 dwc_gmac_setmulti(sc);
1197 }
1198 mutex_exit(sc->sc_core_lock);
1199 }
1200 }
1201
1202 return error;
1203 }
1204
1205 static void
1206 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1207 {
1208 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1209 struct dwc_gmac_tx_data *data;
1210 struct dwc_gmac_dev_dmadesc *desc;
1211 int i, nsegs;
1212
1213 mutex_enter(&sc->sc_txq.t_mtx);
1214
1215 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1216 #ifdef DWC_GMAC_DEBUG
1217 aprint_normal_dev(sc->sc_dev,
1218 "%s: checking desc #%d (t_queued: %d)\n", __func__,
1219 i, sc->sc_txq.t_queued);
1220 #endif
1221
1222 /*
1223 * i + 1 does not need to be a valid descriptor,
1224 * this is just a special notion to just sync
1225 * a single tx descriptor (i)
1226 */
1227 dwc_gmac_txdesc_sync(sc, i, i + 1,
1228 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1229
1230 desc = &sc->sc_txq.t_desc[i];
1231 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1232 break;
1233
1234 data = &sc->sc_txq.t_data[i];
1235 if (data->td_m == NULL)
1236 continue;
1237
1238 if_statinc(ifp, if_opackets);
1239 nsegs = data->td_active->dm_nsegs;
1240 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1241 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1242 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1243
1244 #ifdef DWC_GMAC_DEBUG
1245 aprint_normal_dev(sc->sc_dev,
1246 "%s: done with packet at desc #%d, freeing mbuf %p\n",
1247 __func__, i, data->td_m);
1248 #endif
1249
1250 m_freem(data->td_m);
1251 data->td_m = NULL;
1252
1253 sc->sc_txq.t_queued -= nsegs;
1254 }
1255
1256 sc->sc_txq.t_next = i;
1257
1258 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1259 sc->sc_txbusy = false;
1260 }
1261 mutex_exit(&sc->sc_txq.t_mtx);
1262 }
1263
1264 static void
1265 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1266 {
1267 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1268 struct dwc_gmac_dev_dmadesc *desc;
1269 struct dwc_gmac_rx_data *data;
1270 bus_addr_t physaddr;
1271 struct mbuf *m, *mnew;
1272 int i, len, error;
1273
1274 mutex_enter(&sc->sc_rxq.r_mtx);
1275 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1276 #ifdef DWC_GMAC_DEBUG
1277 aprint_normal_dev(sc->sc_dev, "%s: checking desc #%d\n",
1278 __func__, i);
1279 #endif
1280 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1281 RX_DESC_OFFSET(i), sizeof(*desc),
1282 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1283 desc = &sc->sc_rxq.r_desc[i];
1284 data = &sc->sc_rxq.r_data[i];
1285
1286 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1287 break;
1288
1289 if (sc->sc_descm->rx_has_error(desc)) {
1290 #ifdef DWC_GMAC_DEBUG
1291 aprint_normal_dev(sc->sc_dev,
1292 "%s: RX error: status %08x, skipping\n",
1293 __func__, le32toh(desc->ddesc_status0));
1294 #endif
1295 if_statinc(ifp, if_ierrors);
1296 goto skip;
1297 }
1298
1299 len = sc->sc_descm->rx_get_len(desc);
1300
1301 #ifdef DWC_GMAC_DEBUG
1302 aprint_normal_dev(sc->sc_dev,
1303 "%s: device is done with descriptor #%d, len: %d\n",
1304 __func__, i, len);
1305 #endif
1306
1307 /*
1308 * Try to get a new mbuf before passing this one
1309 * up, if that fails, drop the packet and reuse
1310 * the existing one.
1311 */
1312 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1313 if (mnew == NULL) {
1314 if_statinc(ifp, if_ierrors);
1315 goto skip;
1316 }
1317 MCLGET(mnew, M_DONTWAIT);
1318 if ((mnew->m_flags & M_EXT) == 0) {
1319 m_freem(mnew);
1320 if_statinc(ifp, if_ierrors);
1321 goto skip;
1322 }
1323 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1324 if (mnew->m_len > AWGE_MAX_PACKET) {
1325 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1326 }
1327
1328 /* unload old DMA map */
1329 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1330 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1331 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1332
1333 /* and reload with new mbuf */
1334 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1335 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1336 if (error != 0) {
1337 m_freem(mnew);
1338 /* try to reload old mbuf */
1339 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1340 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1341 if (error != 0) {
1342 panic("%s: could not load old rx mbuf",
1343 device_xname(sc->sc_dev));
1344 }
1345 if_statinc(ifp, if_ierrors);
1346 goto skip;
1347 }
1348 physaddr = data->rd_map->dm_segs[0].ds_addr;
1349
1350 #ifdef DWC_GMAC_DEBUG
1351 aprint_normal_dev(sc->sc_dev,
1352 "%s: receiving packet at desc #%d, using mbuf %p\n",
1353 __func__, i, data->rd_m);
1354 #endif
1355 /*
1356 * New mbuf loaded, update RX ring and continue
1357 */
1358 m = data->rd_m;
1359 data->rd_m = mnew;
1360 desc->ddesc_data = htole32(physaddr);
1361
1362 /* finalize mbuf */
1363 m->m_pkthdr.len = m->m_len = len;
1364 m_set_rcvif(m, ifp);
1365 m->m_flags |= M_HASFCS;
1366
1367 if_percpuq_enqueue(sc->sc_ipq, m);
1368
1369 skip:
1370 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1371 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1372
1373 sc->sc_descm->rx_init_flags(desc);
1374 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1375
1376 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1377 RX_DESC_OFFSET(i), sizeof(*desc),
1378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1379
1380 sc->sc_descm->rx_set_owned_by_dev(desc);
1381
1382 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1383 RX_DESC_OFFSET(i), sizeof(*desc),
1384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1385 }
1386
1387 /* update RX pointer */
1388 sc->sc_rxq.r_cur = i;
1389
1390 mutex_exit(&sc->sc_rxq.r_mtx);
1391 }
1392
1393 static void
1394 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1395 {
1396 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1397 struct ether_multi *enm;
1398 struct ether_multistep step;
1399 struct ethercom *ec = &sc->sc_ec;
1400 uint32_t hashes[2] = { 0, 0 };
1401 uint32_t ffilt, h;
1402 int mcnt;
1403
1404 KASSERT(mutex_owned(sc->sc_core_lock));
1405
1406 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1407
1408 if (sc->sc_if_flags & IFF_PROMISC) {
1409 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1410 goto special_filter;
1411 }
1412
1413 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1414
1415 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1416 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1417
1418 ETHER_LOCK(ec);
1419 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1420 ETHER_FIRST_MULTI(step, ec, enm);
1421 mcnt = 0;
1422 while (enm != NULL) {
1423 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1424 ETHER_ADDR_LEN) != 0) {
1425 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1426 ec->ec_flags |= ETHER_F_ALLMULTI;
1427 ETHER_UNLOCK(ec);
1428 goto special_filter;
1429 }
1430
1431 h = ~ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26;
1432 hashes[h >> 5] |= (1 << (h & 0x1f));
1433
1434 mcnt++;
1435 ETHER_NEXT_MULTI(step, enm);
1436 }
1437 ETHER_UNLOCK(ec);
1438
1439 if (mcnt)
1440 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1441 else
1442 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1443
1444 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1445 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1446 hashes[0]);
1447 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1448 hashes[1]);
1449
1450 #ifdef DWC_GMAC_DEBUG
1451 dwc_gmac_dump_ffilt(sc, ffilt);
1452 #endif
1453 return;
1454
1455 special_filter:
1456 #ifdef DWC_GMAC_DEBUG
1457 dwc_gmac_dump_ffilt(sc, ffilt);
1458 #endif
1459 /* no MAC hashes, ALLMULTI or PROMISC */
1460 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1461 ffilt);
1462 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1463 0xffffffff);
1464 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1465 0xffffffff);
1466 sc->sc_if_flags = ifp->if_flags;
1467 }
1468
1469 int
1470 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1471 {
1472 uint32_t status, dma_status;
1473 int rv = 0;
1474
1475 mutex_enter(sc->sc_intr_lock);
1476 if (sc->sc_stopping) {
1477 mutex_exit(sc->sc_intr_lock);
1478 return 0;
1479 }
1480
1481 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1482 if (status & AWIN_GMAC_MII_IRQ) {
1483 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1484 AWIN_GMAC_MII_STATUS);
1485 rv = 1;
1486 mii_pollstat(&sc->sc_mii);
1487 }
1488
1489 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1490 AWIN_GMAC_DMA_STATUS);
1491
1492 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1493 rv = 1;
1494
1495 if (dma_status & GMAC_DMA_INT_TIE)
1496 dwc_gmac_tx_intr(sc);
1497
1498 if (dma_status & GMAC_DMA_INT_RIE)
1499 dwc_gmac_rx_intr(sc);
1500
1501 /*
1502 * Check error conditions
1503 */
1504 if (dma_status & GMAC_DMA_INT_ERRORS) {
1505 if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1506 #ifdef DWC_GMAC_DEBUG
1507 dwc_dump_and_abort(sc, "interrupt error condition");
1508 #endif
1509 }
1510
1511 rnd_add_uint32(&sc->rnd_source, dma_status);
1512
1513 /* ack interrupt */
1514 if (dma_status)
1515 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1516 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1517
1518 /*
1519 * Get more packets
1520 */
1521 if (rv)
1522 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1523
1524 mutex_exit(sc->sc_intr_lock);
1525
1526 return rv;
1527 }
1528
1529 static void
1530 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1531 {
1532
1533 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1534 }
1535
1536 static int
1537 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1538 {
1539
1540 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1541 }
1542
1543 static void
1544 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1545 {
1546 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1547
1548 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1549 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1550 }
1551
1552 static uint32_t
1553 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1554 {
1555
1556 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1557 }
1558
1559 static void
1560 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1561 {
1562
1563 desc->ddesc_status0 = 0;
1564 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1565 }
1566
1567 static void
1568 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1569 {
1570 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1571
1572 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1573 }
1574
1575 static void
1576 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1577 {
1578 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1579
1580 desc->ddesc_cntl1 = htole32(cntl |
1581 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1582 }
1583
1584 static void
1585 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1586 {
1587
1588 desc->ddesc_status0 = 0;
1589 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1590 }
1591
1592 static int
1593 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1594 return !!(le32toh(desc->ddesc_status0) &
1595 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1596 }
1597
1598 static void
1599 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1600 {
1601 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1602
1603 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1604 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1605 }
1606
1607 static uint32_t
1608 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1609 {
1610
1611 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1612 }
1613
1614 static void
1615 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1616 {
1617
1618 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1619 desc->ddesc_cntl1 = 0;
1620 }
1621
1622 static void
1623 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1624 {
1625 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1626
1627 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1628 }
1629
1630 static void
1631 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1632 {
1633 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1634
1635 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1636 }
1637
1638 static void
1639 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1640 {
1641
1642 desc->ddesc_status0 = 0;
1643 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1644 }
1645
1646 static int
1647 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1648 {
1649
1650 return !!(le32toh(desc->ddesc_status0) &
1651 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1652 }
1653
1654 #ifdef DWC_GMAC_DEBUG
1655 static void
1656 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1657 {
1658 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1659 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1660 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1661 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1662 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1663 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1664 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1665 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1666 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1667 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1668 aprint_normal_dev(sc->sc_dev, " status: %08x\n",
1669 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1670 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1671 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1672 aprint_normal_dev(sc->sc_dev, "int en.: %08x\n",
1673 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1674 aprint_normal_dev(sc->sc_dev, " cur tx: %08x\n",
1675 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1676 aprint_normal_dev(sc->sc_dev, " cur rx: %08x\n",
1677 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1678 aprint_normal_dev(sc->sc_dev, "cur txb: %08x\n",
1679 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1680 aprint_normal_dev(sc->sc_dev, "cur rxb: %08x\n",
1681 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1682 }
1683
1684 static void
1685 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1686 {
1687 const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
1688
1689 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1690 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1691 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1692
1693 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1694 TX_DESC_OFFSET(0), AWGE_TX_RING_COUNT * descsz,
1695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1696
1697 for (size_t i = 0; i < AWGE_TX_RING_COUNT; i++) {
1698 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1699 aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
1700 "data: %08x next: %08x\n",
1701 i, sc->sc_txq.t_physaddr + i * descsz,
1702 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1703 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1704 }
1705 }
1706
1707 static void
1708 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1709 {
1710 const size_t descsz = sizeof(struct dwc_gmac_dev_dmadesc);
1711
1712 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1713 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1714 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1715
1716 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1717 RX_DESC_OFFSET(0), AWGE_RX_RING_COUNT * descsz,
1718 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1719
1720 for (size_t i = 0; i < AWGE_RX_RING_COUNT; i++) {
1721 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1722 char buf[200];
1723
1724 if (!sc->sc_descm->rx_is_owned_by_dev(desc)) {
1725 /* print interrupt state */
1726 snprintb(buf, sizeof(buf),
1727 "\177\20"
1728 "b\x1e" "daff\0"
1729 "f\x10\xe" "frlen\0"
1730 "b\x0f" "error\0"
1731 "b\x0e" "rxtrunc\0" /* descriptor error? */
1732 "b\x0d" "saff\0"
1733 "b\x0c" "giantframe\0" /* length error? */
1734 "b\x0b" "damaged\0"
1735 "b\x0a" "vlan\0"
1736 "b\x09" "first\0"
1737 "b\x08" "last\0"
1738 "b\x07" "giant\0"
1739 "b\x06" "collison\0"
1740 "b\x05" "ether\0"
1741 "b\x04" "watchdog\0"
1742 "b\x03" "miierror\0"
1743 "b\x02" "dribbling\0"
1744 "b\x01" "crc\0"
1745 "\0", le32toh(desc->ddesc_status0));
1746 }
1747
1748 aprint_normal("#%3zu (%08lx): status: %08x cntl: %08x "
1749 "data: %08x next: %08x %s\n",
1750 i, sc->sc_rxq.r_physaddr + i * descsz,
1751 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1752 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next),
1753 sc->sc_descm->rx_is_owned_by_dev(desc) ? "" : buf);
1754 }
1755 }
1756
1757 static void
1758 dwc_dump_status(struct dwc_gmac_softc *sc)
1759 {
1760 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1761 AWIN_GMAC_MAC_INTR);
1762 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1763 AWIN_GMAC_DMA_STATUS);
1764 char buf[200];
1765
1766 /* print interrupt state */
1767 snprintb(buf, sizeof(buf),
1768 "\177\20"
1769 "b\x1c" "GPI\0"
1770 "b\x1b" "GMC\0"
1771 "b\x1a" "GLI\0"
1772 "f\x17\x3" "EB\0"
1773 "f\x14\x3" "TPS\0"
1774 "f\x11\x3" "RPS\0"
1775 "b\x10" "NI\0"
1776 "b\x0f" "AI\0"
1777 "b\x0e" "ER\0"
1778 "b\x0d" "FB\0"
1779 "b\x0a" "ET\0"
1780 "b\x09" "RW\0"
1781 "b\x08" "RS\0"
1782 "b\x07" "RU\0"
1783 "b\x06" "RI\0"
1784 "b\x05" "UN\0"
1785 "b\x04" "OV\0"
1786 "b\x03" "TJ\0"
1787 "b\x02" "TU\0"
1788 "b\x01" "TS\0"
1789 "b\x00" "TI\0"
1790 "\0", dma_status);
1791 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1792 status, buf);
1793 }
1794
1795 static void
1796 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1797 {
1798 dwc_dump_status(sc);
1799 dwc_gmac_dump_ffilt(sc,
1800 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1801 dwc_gmac_dump_dma(sc);
1802 dwc_gmac_dump_tx_desc(sc);
1803 dwc_gmac_dump_rx_desc(sc);
1804
1805 panic("%s", msg);
1806 }
1807
1808 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1809 {
1810 char buf[200];
1811
1812 /* print filter setup */
1813 snprintb(buf, sizeof(buf), "\177\20"
1814 "b\x1f""RA\0"
1815 "b\x0a""HPF\0"
1816 "b\x09""SAF\0"
1817 "b\x08""SAIF\0"
1818 "b\x05""DBF\0"
1819 "b\x04""PM\0"
1820 "b\x03""DAIF\0"
1821 "b\x02""HMC\0"
1822 "b\x01""HUC\0"
1823 "b\x00""PR\0"
1824 "\0", ffilt);
1825 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1826 }
1827 #endif
1828