dwc_gmac.c revision 1.78 1 /* $NetBSD: dwc_gmac.c,v 1.78 2022/09/18 18:26:53 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.78 2022/09/18 18:26:53 thorpej Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 #include <sys/rndsource.h>
61
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_media.h>
65 #include <net/bpf.h>
66 #ifdef INET
67 #include <netinet/if_inarp.h>
68 #endif
69
70 #include <dev/mii/miivar.h>
71
72 #include <dev/ic/dwc_gmac_reg.h>
73 #include <dev/ic/dwc_gmac_var.h>
74
75 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 static void dwc_gmac_miibus_statchg(struct ifnet *);
78
79 static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 static int dwc_gmac_init(struct ifnet *);
91 static int dwc_gmac_init_locked(struct ifnet *);
92 static void dwc_gmac_stop(struct ifnet *, int);
93 static void dwc_gmac_stop_locked(struct ifnet *, int);
94 static void dwc_gmac_start(struct ifnet *);
95 static void dwc_gmac_start_locked(struct ifnet *);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 .tx_set_len = dwc_gmac_desc_std_set_len,
125 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 .rx_set_len = dwc_gmac_desc_std_set_len,
131 .rx_get_len = dwc_gmac_desc_std_get_len,
132 .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 .tx_set_len = dwc_gmac_desc_enh_set_len,
140 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 .rx_set_len = dwc_gmac_desc_enh_set_len,
146 .rx_get_len = dwc_gmac_desc_enh_get_len,
147 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149
150
151 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154
155 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157
158
159
160 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163
164 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 GMAC_DMA_INT_FBE | \
166 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 GMAC_DMA_INT_TJE)
169
170 #define AWIN_DEF_MAC_INTRMASK \
171 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 AWIN_GMAC_MAC_INT_LINKCHG)
173
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 static void dwc_dump_status(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 #endif
182
183 int
184 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 {
186 uint8_t enaddr[ETHER_ADDR_LEN];
187 uint32_t maclo, machi, ver, hwft;
188 struct mii_data * const mii = &sc->sc_mii;
189 struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 prop_dictionary_t dict;
191
192 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
193 sc->sc_mii_clk = mii_clk & 7;
194
195 dict = device_properties(sc->sc_dev);
196 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
197 if (ea != NULL) {
198 /*
199 * If the MAC address is overridden by a device property,
200 * use that.
201 */
202 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
203 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
204 memcpy(enaddr, prop_data_value(ea), ETHER_ADDR_LEN);
205 } else {
206 /*
207 * If we did not get an externaly configure address,
208 * try to read one from the current filter setup,
209 * before resetting the chip.
210 */
211 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
212 AWIN_GMAC_MAC_ADDR0LO);
213 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
214 AWIN_GMAC_MAC_ADDR0HI);
215
216 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
217 /* fake MAC address */
218 maclo = 0x00f2 | (cprng_strong32() << 16);
219 machi = cprng_strong32();
220 }
221
222 enaddr[0] = maclo & 0x0ff;
223 enaddr[1] = (maclo >> 8) & 0x0ff;
224 enaddr[2] = (maclo >> 16) & 0x0ff;
225 enaddr[3] = (maclo >> 24) & 0x0ff;
226 enaddr[4] = machi & 0x0ff;
227 enaddr[5] = (machi >> 8) & 0x0ff;
228 }
229
230 ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
231 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
232
233 /*
234 * Init chip and do initial setup
235 */
236 if (dwc_gmac_reset(sc) != 0)
237 return ENXIO; /* not much to cleanup, haven't attached yet */
238 dwc_gmac_write_hwaddr(sc, enaddr);
239 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
240 ether_sprintf(enaddr));
241
242 hwft = 0;
243 if (ver >= 0x35) {
244 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
245 AWIN_GMAC_DMA_HWFEATURES);
246 aprint_normal_dev(sc->sc_dev,
247 "HW feature mask: %x\n", hwft);
248 }
249 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
250 aprint_normal_dev(sc->sc_dev,
251 "Using enhanced descriptor format\n");
252 sc->sc_descm = &desc_methods_enhanced;
253 } else {
254 sc->sc_descm = &desc_methods_standard;
255 }
256 if (hwft & GMAC_DMA_FEAT_RMON) {
257 uint32_t val;
258
259 /* Mask all MMC interrupts */
260 val = 0xffffffff;
261 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
262 GMAC_MMC_RX_INT_MSK, val);
263 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
264 GMAC_MMC_TX_INT_MSK, val);
265 }
266
267 /*
268 * Allocate Tx and Rx rings
269 */
270 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
271 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
272 goto fail;
273 }
274
275 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
276 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
277 goto fail;
278 }
279
280 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
281 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
282 goto fail;
283 }
284
285 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
286 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
287 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
288
289 /*
290 * Prepare interface data
291 */
292 ifp->if_softc = sc;
293 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
294 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
295 #ifdef DWCGMAC_MPSAFE
296 ifp->if_extflags = IFEF_MPSAFE;
297 #endif
298 ifp->if_ioctl = dwc_gmac_ioctl;
299 ifp->if_start = dwc_gmac_start;
300 ifp->if_init = dwc_gmac_init;
301 ifp->if_stop = dwc_gmac_stop;
302 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
303 IFQ_SET_READY(&ifp->if_snd);
304
305 /*
306 * Attach MII subdevices
307 */
308 sc->sc_ec.ec_mii = &sc->sc_mii;
309 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
310 mii->mii_ifp = ifp;
311 mii->mii_readreg = dwc_gmac_miibus_read_reg;
312 mii->mii_writereg = dwc_gmac_miibus_write_reg;
313 mii->mii_statchg = dwc_gmac_miibus_statchg;
314 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
315 MIIF_DOPAUSE);
316
317 if (LIST_EMPTY(&mii->mii_phys)) {
318 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
319 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
320 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
321 } else {
322 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
323 }
324
325 /*
326 * We can support 802.1Q VLAN-sized frames.
327 */
328 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
329
330 /*
331 * Ready, attach interface
332 */
333 /* Attach the interface. */
334 if_initialize(ifp);
335 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
336 if_deferred_start_init(ifp, NULL);
337 ether_ifattach(ifp, enaddr);
338 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
339 if_register(ifp);
340 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
341 RND_TYPE_NET, RND_FLAG_DEFAULT);
342
343 /*
344 * Enable interrupts
345 */
346 mutex_enter(sc->sc_lock);
347 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
348 AWIN_DEF_MAC_INTRMASK);
349 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
350 GMAC_DEF_DMA_INT_MASK);
351 mutex_exit(sc->sc_lock);
352
353 return 0;
354
355 fail:
356 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
357 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
358 dwc_gmac_free_dma_rings(sc);
359 mutex_destroy(&sc->sc_mdio_lock);
360
361 return ENXIO;
362 }
363
364
365
366 static int
367 dwc_gmac_reset(struct dwc_gmac_softc *sc)
368 {
369 size_t cnt;
370 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
371 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
372 | GMAC_BUSMODE_RESET);
373 for (cnt = 0; cnt < 30000; cnt++) {
374 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
375 & GMAC_BUSMODE_RESET) == 0)
376 return 0;
377 delay(10);
378 }
379
380 aprint_error_dev(sc->sc_dev, "reset timed out\n");
381 return EIO;
382 }
383
384 static void
385 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
386 uint8_t enaddr[ETHER_ADDR_LEN])
387 {
388 uint32_t hi, lo;
389
390 hi = enaddr[4] | (enaddr[5] << 8);
391 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
392 | ((uint32_t)enaddr[3] << 24);
393 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
394 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
395 }
396
397 static int
398 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
399 {
400 struct dwc_gmac_softc * const sc = device_private(self);
401 uint16_t mii;
402 size_t cnt;
403
404 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
405 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
406 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
407 | GMAC_MII_BUSY;
408
409 mutex_enter(&sc->sc_mdio_lock);
410 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
411
412 for (cnt = 0; cnt < 1000; cnt++) {
413 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
414 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
415 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
416 AWIN_GMAC_MAC_MIIDATA);
417 break;
418 }
419 delay(10);
420 }
421
422 mutex_exit(&sc->sc_mdio_lock);
423
424 if (cnt >= 1000)
425 return ETIMEDOUT;
426
427 return 0;
428 }
429
430 static int
431 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
432 {
433 struct dwc_gmac_softc * const sc = device_private(self);
434 uint16_t mii;
435 size_t cnt;
436
437 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
438 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
439 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
440 | GMAC_MII_BUSY | GMAC_MII_WRITE;
441
442 mutex_enter(&sc->sc_mdio_lock);
443 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
444 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
445
446 for (cnt = 0; cnt < 1000; cnt++) {
447 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
448 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
449 break;
450 delay(10);
451 }
452
453 mutex_exit(&sc->sc_mdio_lock);
454
455 if (cnt >= 1000)
456 return ETIMEDOUT;
457
458 return 0;
459 }
460
461 static int
462 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
463 struct dwc_gmac_rx_ring *ring)
464 {
465 struct dwc_gmac_rx_data *data;
466 bus_addr_t physaddr;
467 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
468 int error, i, next;
469
470 ring->r_cur = ring->r_next = 0;
471 memset(ring->r_desc, 0, descsize);
472
473 /*
474 * Pre-allocate Rx buffers and populate Rx ring.
475 */
476 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
477 struct dwc_gmac_dev_dmadesc *desc;
478
479 data = &sc->sc_rxq.r_data[i];
480
481 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
482 if (data->rd_m == NULL) {
483 aprint_error_dev(sc->sc_dev,
484 "could not allocate rx mbuf #%d\n", i);
485 error = ENOMEM;
486 goto fail;
487 }
488 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
489 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
490 if (error != 0) {
491 aprint_error_dev(sc->sc_dev,
492 "could not create DMA map\n");
493 data->rd_map = NULL;
494 goto fail;
495 }
496 MCLGET(data->rd_m, M_DONTWAIT);
497 if (!(data->rd_m->m_flags & M_EXT)) {
498 aprint_error_dev(sc->sc_dev,
499 "could not allocate mbuf cluster #%d\n", i);
500 error = ENOMEM;
501 goto fail;
502 }
503 data->rd_m->m_len = data->rd_m->m_pkthdr.len
504 = data->rd_m->m_ext.ext_size;
505 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
506 data->rd_m->m_len = data->rd_m->m_pkthdr.len
507 = AWGE_MAX_PACKET;
508 }
509
510 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
511 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
512 if (error != 0) {
513 aprint_error_dev(sc->sc_dev,
514 "could not load rx buf DMA map #%d", i);
515 goto fail;
516 }
517 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
518 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
519 physaddr = data->rd_map->dm_segs[0].ds_addr;
520
521 desc = &sc->sc_rxq.r_desc[i];
522 desc->ddesc_data = htole32(physaddr);
523 next = RX_NEXT(i);
524 desc->ddesc_next = htole32(ring->r_physaddr
525 + next * sizeof(*desc));
526 sc->sc_descm->rx_init_flags(desc);
527 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
528 sc->sc_descm->rx_set_owned_by_dev(desc);
529 }
530
531 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
532 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
533 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
534 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
535 ring->r_physaddr);
536
537 return 0;
538
539 fail:
540 dwc_gmac_free_rx_ring(sc, ring);
541 return error;
542 }
543
544 static void
545 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
546 struct dwc_gmac_rx_ring *ring)
547 {
548 struct dwc_gmac_dev_dmadesc *desc;
549 struct dwc_gmac_rx_data *data;
550 int i;
551
552 mutex_enter(&ring->r_mtx);
553 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
554 desc = &sc->sc_rxq.r_desc[i];
555 data = &sc->sc_rxq.r_data[i];
556 sc->sc_descm->rx_init_flags(desc);
557 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
558 sc->sc_descm->rx_set_owned_by_dev(desc);
559 }
560
561 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
562 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
563 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
564
565 ring->r_cur = ring->r_next = 0;
566 /* reset DMA address to start of ring */
567 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
568 sc->sc_rxq.r_physaddr);
569 mutex_exit(&ring->r_mtx);
570 }
571
572 static int
573 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
574 {
575 const size_t descsize = AWGE_TOTAL_RING_COUNT *
576 sizeof(struct dwc_gmac_dev_dmadesc);
577 int error, nsegs;
578 void *rings;
579
580 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
581 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
582 if (error != 0) {
583 aprint_error_dev(sc->sc_dev,
584 "could not create desc DMA map\n");
585 sc->sc_dma_ring_map = NULL;
586 goto fail;
587 }
588
589 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
590 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
591 if (error != 0) {
592 aprint_error_dev(sc->sc_dev,
593 "could not map DMA memory\n");
594 goto fail;
595 }
596
597 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
598 descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
599 if (error != 0) {
600 aprint_error_dev(sc->sc_dev,
601 "could not allocate DMA memory\n");
602 goto fail;
603 }
604
605 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
606 descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
607 if (error != 0) {
608 aprint_error_dev(sc->sc_dev,
609 "could not load desc DMA map\n");
610 goto fail;
611 }
612
613 /* give first AWGE_RX_RING_COUNT to the RX side */
614 sc->sc_rxq.r_desc = rings;
615 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
616
617 /* and next rings to the TX side */
618 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
619 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
620 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
621
622 return 0;
623
624 fail:
625 dwc_gmac_free_dma_rings(sc);
626 return error;
627 }
628
629 static void
630 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
631 {
632 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
633 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
634 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
635 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
636 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
637 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
638 }
639
640 static void
641 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
642 {
643 struct dwc_gmac_rx_data *data;
644 int i;
645
646 if (ring->r_desc == NULL)
647 return;
648
649
650 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
651 data = &ring->r_data[i];
652
653 if (data->rd_map != NULL) {
654 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
655 AWGE_RX_RING_COUNT
656 *sizeof(struct dwc_gmac_dev_dmadesc),
657 BUS_DMASYNC_POSTREAD);
658 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
659 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
660 }
661 if (data->rd_m != NULL)
662 m_freem(data->rd_m);
663 }
664 }
665
666 static int
667 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
668 struct dwc_gmac_tx_ring *ring)
669 {
670 int i, error = 0;
671
672 ring->t_queued = 0;
673 ring->t_cur = ring->t_next = 0;
674
675 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
676 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
677 TX_DESC_OFFSET(0),
678 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
679 BUS_DMASYNC_POSTWRITE);
680
681 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
682 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
683 AWGE_TX_RING_COUNT, MCLBYTES, 0,
684 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
685 &ring->t_data[i].td_map);
686 if (error != 0) {
687 aprint_error_dev(sc->sc_dev,
688 "could not create TX DMA map #%d\n", i);
689 ring->t_data[i].td_map = NULL;
690 goto fail;
691 }
692 ring->t_desc[i].ddesc_next = htole32(
693 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
694 *TX_NEXT(i));
695 }
696
697 return 0;
698
699 fail:
700 dwc_gmac_free_tx_ring(sc, ring);
701 return error;
702 }
703
704 static void
705 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
706 {
707 /* 'end' is pointing one descriptor beyond the last we want to sync */
708 if (end > start) {
709 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
710 TX_DESC_OFFSET(start),
711 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
712 ops);
713 return;
714 }
715 /* sync from 'start' to end of ring */
716 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
717 TX_DESC_OFFSET(start),
718 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
719 ops);
720 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
721 /* sync from start of ring to 'end' */
722 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
723 TX_DESC_OFFSET(0),
724 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
725 ops);
726 }
727 }
728
729 static void
730 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
731 struct dwc_gmac_tx_ring *ring)
732 {
733 int i;
734
735 mutex_enter(&ring->t_mtx);
736 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
737 struct dwc_gmac_tx_data *data = &ring->t_data[i];
738
739 if (data->td_m != NULL) {
740 bus_dmamap_sync(sc->sc_dmat, data->td_active,
741 0, data->td_active->dm_mapsize,
742 BUS_DMASYNC_POSTWRITE);
743 bus_dmamap_unload(sc->sc_dmat, data->td_active);
744 m_freem(data->td_m);
745 data->td_m = NULL;
746 }
747 }
748
749 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
750 TX_DESC_OFFSET(0),
751 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
752 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
753 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
754 sc->sc_txq.t_physaddr);
755
756 ring->t_queued = 0;
757 ring->t_cur = ring->t_next = 0;
758 mutex_exit(&ring->t_mtx);
759 }
760
761 static void
762 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
763 struct dwc_gmac_tx_ring *ring)
764 {
765 int i;
766
767 /* unload the maps */
768 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
769 struct dwc_gmac_tx_data *data = &ring->t_data[i];
770
771 if (data->td_m != NULL) {
772 bus_dmamap_sync(sc->sc_dmat, data->td_active,
773 0, data->td_map->dm_mapsize,
774 BUS_DMASYNC_POSTWRITE);
775 bus_dmamap_unload(sc->sc_dmat, data->td_active);
776 m_freem(data->td_m);
777 data->td_m = NULL;
778 }
779 }
780
781 /* and actually free them */
782 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
783 struct dwc_gmac_tx_data *data = &ring->t_data[i];
784
785 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
786 }
787 }
788
789 static void
790 dwc_gmac_miibus_statchg(struct ifnet *ifp)
791 {
792 struct dwc_gmac_softc * const sc = ifp->if_softc;
793 struct mii_data * const mii = &sc->sc_mii;
794 uint32_t conf, flow;
795
796 /*
797 * Set MII or GMII interface based on the speed
798 * negotiated by the PHY.
799 */
800 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
801 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
802 | AWIN_GMAC_MAC_CONF_FULLDPLX);
803 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
804 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
805 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
806 | AWIN_GMAC_MAC_CONF_RXENABLE
807 | AWIN_GMAC_MAC_CONF_TXENABLE;
808 switch (IFM_SUBTYPE(mii->mii_media_active)) {
809 case IFM_10_T:
810 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
811 break;
812 case IFM_100_TX:
813 conf |= AWIN_GMAC_MAC_CONF_FES100 |
814 AWIN_GMAC_MAC_CONF_MIISEL;
815 break;
816 case IFM_1000_T:
817 break;
818 }
819 if (sc->sc_set_speed)
820 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
821
822 flow = 0;
823 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
824 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
825 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
826 }
827 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
828 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
829 }
830 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
831 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
832 }
833 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
834 AWIN_GMAC_MAC_FLOWCTRL, flow);
835
836 #ifdef DWC_GMAC_DEBUG
837 aprint_normal_dev(sc->sc_dev,
838 "setting MAC conf register: %08x\n", conf);
839 #endif
840
841 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
842 AWIN_GMAC_MAC_CONF, conf);
843 }
844
845 static int
846 dwc_gmac_init(struct ifnet *ifp)
847 {
848 struct dwc_gmac_softc *sc = ifp->if_softc;
849
850 mutex_enter(sc->sc_lock);
851 int ret = dwc_gmac_init_locked(ifp);
852 mutex_exit(sc->sc_lock);
853
854 return ret;
855 }
856
857 static int
858 dwc_gmac_init_locked(struct ifnet *ifp)
859 {
860 struct dwc_gmac_softc *sc = ifp->if_softc;
861 uint32_t ffilt;
862
863 if (ifp->if_flags & IFF_RUNNING)
864 return 0;
865
866 dwc_gmac_stop_locked(ifp, 0);
867
868 /*
869 * Configure DMA burst/transfer mode and RX/TX priorities.
870 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
871 */
872 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
873 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
874 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
875 __SHIFTIN(2, GMAC_BUSMODE_PBL));
876
877 /*
878 * Set up address filter
879 */
880 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
881 if (ifp->if_flags & IFF_PROMISC) {
882 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
883 } else {
884 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
885 }
886 if (ifp->if_flags & IFF_BROADCAST) {
887 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
888 } else {
889 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
890 }
891 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
892
893 /*
894 * Set up multicast filter
895 */
896 dwc_gmac_setmulti(sc);
897
898 /*
899 * Set up dma pointer for RX and TX ring
900 */
901 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
902 sc->sc_rxq.r_physaddr);
903 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
904 sc->sc_txq.t_physaddr);
905
906 /*
907 * Start RX/TX part
908 */
909 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
910 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
911 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
912 }
913 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
914
915 sc->sc_stopping = false;
916
917 ifp->if_flags |= IFF_RUNNING;
918 sc->sc_txbusy = false;
919
920 return 0;
921 }
922
923 static void
924 dwc_gmac_start(struct ifnet *ifp)
925 {
926 struct dwc_gmac_softc *sc = ifp->if_softc;
927 #ifdef DWCGMAC_MPSAFE
928 KASSERT(if_is_mpsafe(ifp));
929 #endif
930
931 mutex_enter(sc->sc_lock);
932 if (!sc->sc_stopping) {
933 mutex_enter(&sc->sc_txq.t_mtx);
934 dwc_gmac_start_locked(ifp);
935 mutex_exit(&sc->sc_txq.t_mtx);
936 }
937 mutex_exit(sc->sc_lock);
938 }
939
940 static void
941 dwc_gmac_start_locked(struct ifnet *ifp)
942 {
943 struct dwc_gmac_softc *sc = ifp->if_softc;
944 int old = sc->sc_txq.t_queued;
945 int start = sc->sc_txq.t_cur;
946 struct mbuf *m0;
947
948 if ((ifp->if_flags & IFF_RUNNING) == 0)
949 return;
950 if (sc->sc_txbusy)
951 return;
952
953 for (;;) {
954 IFQ_POLL(&ifp->if_snd, m0);
955 if (m0 == NULL)
956 break;
957 if (dwc_gmac_queue(sc, m0) != 0) {
958 sc->sc_txbusy = true;
959 break;
960 }
961 IFQ_DEQUEUE(&ifp->if_snd, m0);
962 bpf_mtap(ifp, m0, BPF_D_OUT);
963 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
964 sc->sc_txbusy = true;
965 break;
966 }
967 }
968
969 if (sc->sc_txq.t_queued != old) {
970 /* packets have been queued, kick it off */
971 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
972 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
973
974 #ifdef DWC_GMAC_DEBUG
975 dwc_dump_status(sc);
976 #endif
977 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
978 AWIN_GMAC_DMA_TXPOLL, ~0U);
979 }
980 }
981
982 static void
983 dwc_gmac_stop(struct ifnet *ifp, int disable)
984 {
985 struct dwc_gmac_softc *sc = ifp->if_softc;
986
987 mutex_enter(sc->sc_lock);
988 dwc_gmac_stop_locked(ifp, disable);
989 mutex_exit(sc->sc_lock);
990 }
991
992 static void
993 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
994 {
995 struct dwc_gmac_softc *sc = ifp->if_softc;
996
997 sc->sc_stopping = true;
998
999 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1000 AWIN_GMAC_DMA_OPMODE,
1001 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1002 AWIN_GMAC_DMA_OPMODE)
1003 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1004 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1005 AWIN_GMAC_DMA_OPMODE,
1006 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1007 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1008
1009 mii_down(&sc->sc_mii);
1010 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1011 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1012
1013 ifp->if_flags &= ~IFF_RUNNING;
1014 sc->sc_txbusy = false;
1015 }
1016
1017 /*
1018 * Add m0 to the TX ring
1019 */
1020 static int
1021 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1022 {
1023 struct dwc_gmac_dev_dmadesc *desc = NULL;
1024 struct dwc_gmac_tx_data *data = NULL;
1025 bus_dmamap_t map;
1026 int error, i, first;
1027
1028 #ifdef DWC_GMAC_DEBUG
1029 aprint_normal_dev(sc->sc_dev,
1030 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1031 #endif
1032
1033 first = sc->sc_txq.t_cur;
1034 map = sc->sc_txq.t_data[first].td_map;
1035
1036 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1037 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1038 if (error != 0) {
1039 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1040 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1041 return error;
1042 }
1043
1044 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1045 bus_dmamap_unload(sc->sc_dmat, map);
1046 return ENOBUFS;
1047 }
1048
1049 for (i = 0; i < map->dm_nsegs; i++) {
1050 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1051 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1052
1053 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1054
1055 #ifdef DWC_GMAC_DEBUG
1056 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1057 "len %lu\n", sc->sc_txq.t_cur,
1058 (unsigned long)map->dm_segs[i].ds_addr,
1059 (unsigned long)map->dm_segs[i].ds_len);
1060 #endif
1061
1062 sc->sc_descm->tx_init_flags(desc);
1063 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1064
1065 if (i == 0)
1066 sc->sc_descm->tx_set_first_frag(desc);
1067
1068 /*
1069 * Defer passing ownership of the first descriptor
1070 * until we are done.
1071 */
1072 if (i != 0)
1073 sc->sc_descm->tx_set_owned_by_dev(desc);
1074
1075 sc->sc_txq.t_queued++;
1076 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1077 }
1078
1079 sc->sc_descm->tx_set_last_frag(desc);
1080
1081 data->td_m = m0;
1082 data->td_active = map;
1083
1084 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1085 BUS_DMASYNC_PREWRITE);
1086
1087 /* Pass first to device */
1088 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1089
1090 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1091 BUS_DMASYNC_PREWRITE);
1092
1093 return 0;
1094 }
1095
1096 /*
1097 * If the interface is up and running, only modify the receive
1098 * filter when setting promiscuous or debug mode. Otherwise fall
1099 * through to ether_ioctl, which will reset the chip.
1100 */
1101 static int
1102 dwc_gmac_ifflags_cb(struct ethercom *ec)
1103 {
1104 struct ifnet *ifp = &ec->ec_if;
1105 struct dwc_gmac_softc *sc = ifp->if_softc;
1106 int ret = 0;
1107
1108 mutex_enter(sc->sc_lock);
1109 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1110 sc->sc_if_flags = ifp->if_flags;
1111
1112 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1113 ret = ENETRESET;
1114 goto out;
1115 }
1116 if ((change & IFF_PROMISC) != 0) {
1117 dwc_gmac_setmulti(sc);
1118 }
1119 out:
1120 mutex_exit(sc->sc_lock);
1121
1122 return ret;
1123 }
1124
1125 static int
1126 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1127 {
1128 struct dwc_gmac_softc *sc = ifp->if_softc;
1129 int error = 0;
1130
1131 int s = splnet();
1132 error = ether_ioctl(ifp, cmd, data);
1133
1134 #ifdef DWCGMAC_MPSAFE
1135 splx(s);
1136 #endif
1137
1138 if (error == ENETRESET) {
1139 error = 0;
1140 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1141 ;
1142 else if (ifp->if_flags & IFF_RUNNING) {
1143 /*
1144 * Multicast list has changed; set the hardware filter
1145 * accordingly.
1146 */
1147 mutex_enter(sc->sc_lock);
1148 dwc_gmac_setmulti(sc);
1149 mutex_exit(sc->sc_lock);
1150 }
1151 }
1152
1153 /* Try to get things going again */
1154 if (ifp->if_flags & IFF_UP)
1155 dwc_gmac_start(ifp);
1156 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1157
1158 #ifndef DWCGMAC_MPSAFE
1159 splx(s);
1160 #endif
1161
1162 return error;
1163 }
1164
1165 static void
1166 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1167 {
1168 struct ifnet *ifp = &sc->sc_ec.ec_if;
1169 struct dwc_gmac_tx_data *data;
1170 struct dwc_gmac_dev_dmadesc *desc;
1171 int i, nsegs;
1172
1173 mutex_enter(&sc->sc_txq.t_mtx);
1174
1175 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1176 #ifdef DWC_GMAC_DEBUG
1177 aprint_normal_dev(sc->sc_dev,
1178 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1179 i, sc->sc_txq.t_queued);
1180 #endif
1181
1182 /*
1183 * i+1 does not need to be a valid descriptor,
1184 * this is just a special notion to just sync
1185 * a single tx descriptor (i)
1186 */
1187 dwc_gmac_txdesc_sync(sc, i, i+1,
1188 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1189
1190 desc = &sc->sc_txq.t_desc[i];
1191 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1192 break;
1193
1194 data = &sc->sc_txq.t_data[i];
1195 if (data->td_m == NULL)
1196 continue;
1197
1198 if_statinc(ifp, if_opackets);
1199 nsegs = data->td_active->dm_nsegs;
1200 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1201 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1202 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1203
1204 #ifdef DWC_GMAC_DEBUG
1205 aprint_normal_dev(sc->sc_dev,
1206 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1207 "freeing mbuf %p\n", i, data->td_m);
1208 #endif
1209
1210 m_freem(data->td_m);
1211 data->td_m = NULL;
1212
1213 sc->sc_txq.t_queued -= nsegs;
1214 }
1215
1216 sc->sc_txq.t_next = i;
1217
1218 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1219 sc->sc_txbusy = false;
1220 }
1221 mutex_exit(&sc->sc_txq.t_mtx);
1222 }
1223
1224 static void
1225 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1226 {
1227 struct ifnet *ifp = &sc->sc_ec.ec_if;
1228 struct dwc_gmac_dev_dmadesc *desc;
1229 struct dwc_gmac_rx_data *data;
1230 bus_addr_t physaddr;
1231 struct mbuf *m, *mnew;
1232 int i, len, error;
1233
1234 mutex_enter(&sc->sc_rxq.r_mtx);
1235 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1236 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1237 RX_DESC_OFFSET(i), sizeof(*desc),
1238 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1239 desc = &sc->sc_rxq.r_desc[i];
1240 data = &sc->sc_rxq.r_data[i];
1241
1242 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1243 break;
1244
1245 if (sc->sc_descm->rx_has_error(desc)) {
1246 #ifdef DWC_GMAC_DEBUG
1247 aprint_normal_dev(sc->sc_dev,
1248 "RX error: descriptor status %08x, skipping\n",
1249 le32toh(desc->ddesc_status0));
1250 #endif
1251 if_statinc(ifp, if_ierrors);
1252 goto skip;
1253 }
1254
1255 len = sc->sc_descm->rx_get_len(desc);
1256
1257 #ifdef DWC_GMAC_DEBUG
1258 aprint_normal_dev(sc->sc_dev,
1259 "rx int: device is done with descriptor #%d, len: %d\n",
1260 i, len);
1261 #endif
1262
1263 /*
1264 * Try to get a new mbuf before passing this one
1265 * up, if that fails, drop the packet and reuse
1266 * the existing one.
1267 */
1268 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1269 if (mnew == NULL) {
1270 if_statinc(ifp, if_ierrors);
1271 goto skip;
1272 }
1273 MCLGET(mnew, M_DONTWAIT);
1274 if ((mnew->m_flags & M_EXT) == 0) {
1275 m_freem(mnew);
1276 if_statinc(ifp, if_ierrors);
1277 goto skip;
1278 }
1279 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1280 if (mnew->m_len > AWGE_MAX_PACKET) {
1281 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1282 }
1283
1284 /* unload old DMA map */
1285 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1286 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1287 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1288
1289 /* and reload with new mbuf */
1290 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1291 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1292 if (error != 0) {
1293 m_freem(mnew);
1294 /* try to reload old mbuf */
1295 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1296 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1297 if (error != 0) {
1298 panic("%s: could not load old rx mbuf",
1299 device_xname(sc->sc_dev));
1300 }
1301 if_statinc(ifp, if_ierrors);
1302 goto skip;
1303 }
1304 physaddr = data->rd_map->dm_segs[0].ds_addr;
1305
1306 /*
1307 * New mbuf loaded, update RX ring and continue
1308 */
1309 m = data->rd_m;
1310 data->rd_m = mnew;
1311 desc->ddesc_data = htole32(physaddr);
1312
1313 /* finalize mbuf */
1314 m->m_pkthdr.len = m->m_len = len;
1315 m_set_rcvif(m, ifp);
1316 m->m_flags |= M_HASFCS;
1317
1318 if_percpuq_enqueue(sc->sc_ipq, m);
1319
1320 skip:
1321 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1322 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1323
1324 sc->sc_descm->rx_init_flags(desc);
1325 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1326 sc->sc_descm->rx_set_owned_by_dev(desc);
1327
1328 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1329 RX_DESC_OFFSET(i), sizeof(*desc),
1330 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1331 }
1332
1333 /* update RX pointer */
1334 sc->sc_rxq.r_cur = i;
1335
1336 mutex_exit(&sc->sc_rxq.r_mtx);
1337 }
1338
1339 /*
1340 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1341 */
1342 static uint32_t
1343 bitrev32(uint32_t x)
1344 {
1345 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1346 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1347 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1348 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1349
1350 return (x >> 16) | (x << 16);
1351 }
1352
1353 static void
1354 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1355 {
1356 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1357 struct ether_multi *enm;
1358 struct ether_multistep step;
1359 struct ethercom *ec = &sc->sc_ec;
1360 uint32_t hashes[2] = { 0, 0 };
1361 uint32_t ffilt, h;
1362 int mcnt;
1363
1364 KASSERT(mutex_owned(sc->sc_lock));
1365
1366 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1367
1368 if (ifp->if_flags & IFF_PROMISC) {
1369 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1370 goto special_filter;
1371 }
1372
1373 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1374
1375 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1376 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1377
1378 ETHER_LOCK(ec);
1379 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1380 ETHER_FIRST_MULTI(step, ec, enm);
1381 mcnt = 0;
1382 while (enm != NULL) {
1383 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1384 ETHER_ADDR_LEN) != 0) {
1385 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1386 ec->ec_flags |= ETHER_F_ALLMULTI;
1387 ETHER_UNLOCK(ec);
1388 goto special_filter;
1389 }
1390
1391 h = bitrev32(
1392 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1393 ) >> 26;
1394 hashes[h >> 5] |= (1 << (h & 0x1f));
1395
1396 mcnt++;
1397 ETHER_NEXT_MULTI(step, enm);
1398 }
1399 ETHER_UNLOCK(ec);
1400
1401 if (mcnt)
1402 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1403 else
1404 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1405
1406 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1407 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1408 hashes[0]);
1409 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1410 hashes[1]);
1411 sc->sc_if_flags = ifp->if_flags;
1412
1413 #ifdef DWC_GMAC_DEBUG
1414 dwc_gmac_dump_ffilt(sc, ffilt);
1415 #endif
1416 return;
1417
1418 special_filter:
1419 #ifdef DWC_GMAC_DEBUG
1420 dwc_gmac_dump_ffilt(sc, ffilt);
1421 #endif
1422 /* no MAC hashes, ALLMULTI or PROMISC */
1423 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1424 ffilt);
1425 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1426 0xffffffff);
1427 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1428 0xffffffff);
1429 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1430 }
1431
1432 int
1433 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1434 {
1435 uint32_t status, dma_status;
1436 int rv = 0;
1437
1438 if (sc->sc_stopping)
1439 return 0;
1440
1441 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1442 if (status & AWIN_GMAC_MII_IRQ) {
1443 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1444 AWIN_GMAC_MII_STATUS);
1445 rv = 1;
1446 mii_pollstat(&sc->sc_mii);
1447 }
1448
1449 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1450 AWIN_GMAC_DMA_STATUS);
1451
1452 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1453 rv = 1;
1454
1455 if (dma_status & GMAC_DMA_INT_TIE)
1456 dwc_gmac_tx_intr(sc);
1457
1458 if (dma_status & GMAC_DMA_INT_RIE)
1459 dwc_gmac_rx_intr(sc);
1460
1461 /*
1462 * Check error conditions
1463 */
1464 if (dma_status & GMAC_DMA_INT_ERRORS) {
1465 if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1466 #ifdef DWC_GMAC_DEBUG
1467 dwc_dump_and_abort(sc, "interrupt error condition");
1468 #endif
1469 }
1470
1471 rnd_add_uint32(&sc->rnd_source, dma_status);
1472
1473 /* ack interrupt */
1474 if (dma_status)
1475 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1476 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1477
1478 /*
1479 * Get more packets
1480 */
1481 if (rv)
1482 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1483
1484 return rv;
1485 }
1486
1487 static void
1488 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1489 {
1490
1491 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1492 }
1493
1494 static int
1495 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1496 {
1497
1498 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1499 }
1500
1501 static void
1502 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1503 {
1504 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1505
1506 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1507 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1508 }
1509
1510 static uint32_t
1511 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1512 {
1513
1514 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1515 }
1516
1517 static void
1518 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1519 {
1520
1521 desc->ddesc_status0 = 0;
1522 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1523 }
1524
1525 static void
1526 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1527 {
1528 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1529
1530 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1531 }
1532
1533 static void
1534 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1535 {
1536 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1537
1538 desc->ddesc_cntl1 = htole32(cntl |
1539 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1540 }
1541
1542 static void
1543 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1544 {
1545
1546 desc->ddesc_status0 = 0;
1547 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1548 }
1549
1550 static int
1551 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1552 return !!(le32toh(desc->ddesc_status0) &
1553 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1554 }
1555
1556 static void
1557 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1558 {
1559 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1560
1561 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1562 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1563 }
1564
1565 static uint32_t
1566 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1567 {
1568
1569 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1570 }
1571
1572 static void
1573 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1574 {
1575
1576 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1577 desc->ddesc_cntl1 = 0;
1578 }
1579
1580 static void
1581 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1582 {
1583 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1584
1585 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1586 }
1587
1588 static void
1589 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1590 {
1591 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1592
1593 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1594 }
1595
1596 static void
1597 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1598 {
1599
1600 desc->ddesc_status0 = 0;
1601 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1602 }
1603
1604 static int
1605 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1606 {
1607
1608 return !!(le32toh(desc->ddesc_status0) &
1609 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1610 }
1611
1612 #ifdef DWC_GMAC_DEBUG
1613 static void
1614 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1615 {
1616 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1617 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1618 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1619 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1620 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1621 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1622 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1623 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1624 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1625 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1626 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1627 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1628 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1629 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1630 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1631 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1632 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1633 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1634 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1635 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1636 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1637 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1638 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1639 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1640 }
1641
1642 static void
1643 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1644 {
1645 int i;
1646
1647 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1648 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1649 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1650 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1651 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1652 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1653 "data: %08x next: %08x\n",
1654 i, sc->sc_txq.t_physaddr +
1655 i*sizeof(struct dwc_gmac_dev_dmadesc),
1656 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1657 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1658 }
1659 }
1660
1661 static void
1662 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1663 {
1664 int i;
1665
1666 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1667 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1668 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1669 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1670 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1671 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1672 "data: %08x next: %08x\n",
1673 i, sc->sc_rxq.r_physaddr +
1674 i*sizeof(struct dwc_gmac_dev_dmadesc),
1675 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1676 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1677 }
1678 }
1679
1680 static void
1681 dwc_dump_status(struct dwc_gmac_softc *sc)
1682 {
1683 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1684 AWIN_GMAC_MAC_INTR);
1685 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1686 AWIN_GMAC_DMA_STATUS);
1687 char buf[200];
1688
1689 /* print interrupt state */
1690 snprintb(buf, sizeof(buf), "\177\20"
1691 "b\x10""NI\0"
1692 "b\x0f""AI\0"
1693 "b\x0e""ER\0"
1694 "b\x0d""FB\0"
1695 "b\x0a""ET\0"
1696 "b\x09""RW\0"
1697 "b\x08""RS\0"
1698 "b\x07""RU\0"
1699 "b\x06""RI\0"
1700 "b\x05""UN\0"
1701 "b\x04""OV\0"
1702 "b\x03""TJ\0"
1703 "b\x02""TU\0"
1704 "b\x01""TS\0"
1705 "b\x00""TI\0"
1706 "\0", dma_status);
1707 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1708 status, buf);
1709 }
1710
1711 static void
1712 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1713 {
1714 dwc_dump_status(sc);
1715 dwc_gmac_dump_ffilt(sc,
1716 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1717 dwc_gmac_dump_dma(sc);
1718 dwc_gmac_dump_tx_desc(sc);
1719 dwc_gmac_dump_rx_desc(sc);
1720
1721 panic("%s", msg);
1722 }
1723
1724 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1725 {
1726 char buf[200];
1727
1728 /* print filter setup */
1729 snprintb(buf, sizeof(buf), "\177\20"
1730 "b\x1f""RA\0"
1731 "b\x0a""HPF\0"
1732 "b\x09""SAF\0"
1733 "b\x08""SAIF\0"
1734 "b\x05""DBF\0"
1735 "b\x04""PM\0"
1736 "b\x03""DAIF\0"
1737 "b\x02""HMC\0"
1738 "b\x01""HUC\0"
1739 "b\x00""PR\0"
1740 "\0", ffilt);
1741 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1742 }
1743 #endif
1744