dwc_gmac.c revision 1.66 1 /* $NetBSD: dwc_gmac.c,v 1.66 2019/10/15 16:30:49 tnn Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.66 2019/10/15 16:30:49 tnn Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 #include <sys/rndsource.h>
61
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_media.h>
65 #include <net/bpf.h>
66 #ifdef INET
67 #include <netinet/if_inarp.h>
68 #endif
69
70 #include <dev/mii/miivar.h>
71
72 #include <dev/ic/dwc_gmac_reg.h>
73 #include <dev/ic/dwc_gmac_var.h>
74
75 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 static void dwc_gmac_miibus_statchg(struct ifnet *);
78
79 static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 static int dwc_gmac_init(struct ifnet *);
91 static int dwc_gmac_init_locked(struct ifnet *);
92 static void dwc_gmac_stop(struct ifnet *, int);
93 static void dwc_gmac_stop_locked(struct ifnet *, int);
94 static void dwc_gmac_start(struct ifnet *);
95 static void dwc_gmac_start_locked(struct ifnet *);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 .tx_set_len = dwc_gmac_desc_std_set_len,
125 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 .rx_set_len = dwc_gmac_desc_std_set_len,
131 .rx_get_len = dwc_gmac_desc_std_get_len,
132 .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 .tx_set_len = dwc_gmac_desc_enh_set_len,
140 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 .rx_set_len = dwc_gmac_desc_enh_set_len,
146 .rx_get_len = dwc_gmac_desc_enh_get_len,
147 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149
150
151 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154
155 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157
158
159
160 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163
164 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 GMAC_DMA_INT_FBE | \
166 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 GMAC_DMA_INT_TJE)
169
170 #define AWIN_DEF_MAC_INTRMASK \
171 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 AWIN_GMAC_MAC_INT_LINKCHG)
173
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 static void dwc_dump_status(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 #endif
182
183 int
184 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 {
186 uint8_t enaddr[ETHER_ADDR_LEN];
187 uint32_t maclo, machi, ver, hwft;
188 struct mii_data * const mii = &sc->sc_mii;
189 struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 prop_dictionary_t dict;
191 int rv;
192
193 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
194 sc->sc_mii_clk = mii_clk & 7;
195
196 dict = device_properties(sc->sc_dev);
197 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
198 if (ea != NULL) {
199 /*
200 * If the MAC address is overriden by a device property,
201 * use that.
202 */
203 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
204 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
205 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
206 } else {
207 /*
208 * If we did not get an externaly configure address,
209 * try to read one from the current filter setup,
210 * before resetting the chip.
211 */
212 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
213 AWIN_GMAC_MAC_ADDR0LO);
214 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
215 AWIN_GMAC_MAC_ADDR0HI);
216
217 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
218 /* fake MAC address */
219 maclo = 0x00f2 | (cprng_strong32() << 16);
220 machi = cprng_strong32();
221 }
222
223 enaddr[0] = maclo & 0x0ff;
224 enaddr[1] = (maclo >> 8) & 0x0ff;
225 enaddr[2] = (maclo >> 16) & 0x0ff;
226 enaddr[3] = (maclo >> 24) & 0x0ff;
227 enaddr[4] = machi & 0x0ff;
228 enaddr[5] = (machi >> 8) & 0x0ff;
229 }
230
231 ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
232 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
233
234 /*
235 * Init chip and do initial setup
236 */
237 if (dwc_gmac_reset(sc) != 0)
238 return ENXIO; /* not much to cleanup, haven't attached yet */
239 dwc_gmac_write_hwaddr(sc, enaddr);
240 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 ether_sprintf(enaddr));
242
243 hwft = 0;
244 if (ver >= 0x35) {
245 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 AWIN_GMAC_DMA_HWFEATURES);
247 aprint_normal_dev(sc->sc_dev,
248 "HW feature mask: %x\n", hwft);
249 }
250 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
251 aprint_normal_dev(sc->sc_dev,
252 "Using enhanced descriptor format\n");
253 sc->sc_descm = &desc_methods_enhanced;
254 } else {
255 sc->sc_descm = &desc_methods_standard;
256 }
257
258 /*
259 * Allocate Tx and Rx rings
260 */
261 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
262 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
263 goto fail;
264 }
265
266 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
267 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
268 goto fail;
269 }
270
271 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
272 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
273 goto fail;
274 }
275
276 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
277 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
278 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
279
280 /*
281 * Prepare interface data
282 */
283 ifp->if_softc = sc;
284 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
285 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
286 #ifdef DWCGMAC_MPSAFE
287 ifp->if_extflags = IFEF_MPSAFE;
288 #endif
289 ifp->if_ioctl = dwc_gmac_ioctl;
290 ifp->if_start = dwc_gmac_start;
291 ifp->if_init = dwc_gmac_init;
292 ifp->if_stop = dwc_gmac_stop;
293 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
294 IFQ_SET_READY(&ifp->if_snd);
295
296 /*
297 * Attach MII subdevices
298 */
299 sc->sc_ec.ec_mii = &sc->sc_mii;
300 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
301 mii->mii_ifp = ifp;
302 mii->mii_readreg = dwc_gmac_miibus_read_reg;
303 mii->mii_writereg = dwc_gmac_miibus_write_reg;
304 mii->mii_statchg = dwc_gmac_miibus_statchg;
305 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
306 MIIF_DOPAUSE);
307
308 if (LIST_EMPTY(&mii->mii_phys)) {
309 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
310 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
311 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
312 } else {
313 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
314 }
315
316 /*
317 * We can support 802.1Q VLAN-sized frames.
318 */
319 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
320
321 /*
322 * Ready, attach interface
323 */
324 /* Attach the interface. */
325 rv = if_initialize(ifp);
326 if (rv != 0)
327 goto fail_2;
328 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
329 if_deferred_start_init(ifp, NULL);
330 ether_ifattach(ifp, enaddr);
331 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
332 if_register(ifp);
333 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
334 RND_TYPE_NET, RND_FLAG_DEFAULT);
335
336 /*
337 * Enable interrupts
338 */
339 mutex_enter(sc->sc_lock);
340 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
341 AWIN_DEF_MAC_INTRMASK);
342 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
343 GMAC_DEF_DMA_INT_MASK);
344 mutex_exit(sc->sc_lock);
345
346 return 0;
347
348 fail_2:
349 ifmedia_removeall(&mii->mii_media);
350 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
351 mutex_destroy(&sc->sc_txq.t_mtx);
352 mutex_destroy(&sc->sc_rxq.r_mtx);
353 mutex_obj_free(sc->sc_lock);
354 fail:
355 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
356 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
357 dwc_gmac_free_dma_rings(sc);
358 mutex_destroy(&sc->sc_mdio_lock);
359
360 return ENXIO;
361 }
362
363
364
365 static int
366 dwc_gmac_reset(struct dwc_gmac_softc *sc)
367 {
368 size_t cnt;
369 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
370 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
371 | GMAC_BUSMODE_RESET);
372 for (cnt = 0; cnt < 3000; cnt++) {
373 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
374 & GMAC_BUSMODE_RESET) == 0)
375 return 0;
376 delay(10);
377 }
378
379 aprint_error_dev(sc->sc_dev, "reset timed out\n");
380 return EIO;
381 }
382
383 static void
384 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
385 uint8_t enaddr[ETHER_ADDR_LEN])
386 {
387 uint32_t hi, lo;
388
389 hi = enaddr[4] | (enaddr[5] << 8);
390 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
391 | (enaddr[3] << 24);
392 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
393 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
394 }
395
396 static int
397 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
398 {
399 struct dwc_gmac_softc * const sc = device_private(self);
400 uint16_t mii;
401 size_t cnt;
402
403 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
404 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
405 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
406 | GMAC_MII_BUSY;
407
408 mutex_enter(&sc->sc_mdio_lock);
409 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
410
411 for (cnt = 0; cnt < 1000; cnt++) {
412 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
413 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
414 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
415 AWIN_GMAC_MAC_MIIDATA);
416 break;
417 }
418 delay(10);
419 }
420
421 mutex_exit(&sc->sc_mdio_lock);
422
423 if (cnt >= 1000)
424 return ETIMEDOUT;
425
426 return 0;
427 }
428
429 static int
430 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
431 {
432 struct dwc_gmac_softc * const sc = device_private(self);
433 uint16_t mii;
434 size_t cnt;
435
436 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
437 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
438 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
439 | GMAC_MII_BUSY | GMAC_MII_WRITE;
440
441 mutex_enter(&sc->sc_mdio_lock);
442 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
443 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
444
445 for (cnt = 0; cnt < 1000; cnt++) {
446 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
447 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
448 break;
449 delay(10);
450 }
451
452 mutex_exit(&sc->sc_mdio_lock);
453
454 if (cnt >= 1000)
455 return ETIMEDOUT;
456
457 return 0;
458 }
459
460 static int
461 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
462 struct dwc_gmac_rx_ring *ring)
463 {
464 struct dwc_gmac_rx_data *data;
465 bus_addr_t physaddr;
466 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
467 int error, i, next;
468
469 ring->r_cur = ring->r_next = 0;
470 memset(ring->r_desc, 0, descsize);
471
472 /*
473 * Pre-allocate Rx buffers and populate Rx ring.
474 */
475 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
476 struct dwc_gmac_dev_dmadesc *desc;
477
478 data = &sc->sc_rxq.r_data[i];
479
480 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
481 if (data->rd_m == NULL) {
482 aprint_error_dev(sc->sc_dev,
483 "could not allocate rx mbuf #%d\n", i);
484 error = ENOMEM;
485 goto fail;
486 }
487 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
488 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
489 if (error != 0) {
490 aprint_error_dev(sc->sc_dev,
491 "could not create DMA map\n");
492 data->rd_map = NULL;
493 goto fail;
494 }
495 MCLGET(data->rd_m, M_DONTWAIT);
496 if (!(data->rd_m->m_flags & M_EXT)) {
497 aprint_error_dev(sc->sc_dev,
498 "could not allocate mbuf cluster #%d\n", i);
499 error = ENOMEM;
500 goto fail;
501 }
502 data->rd_m->m_len = data->rd_m->m_pkthdr.len
503 = data->rd_m->m_ext.ext_size;
504 m_adj(data->rd_m,
505 roundup((uintptr_t)data->rd_m->m_data & 0x3f, 0x40));
506 if (data->rd_m->m_len > AWGE_MAX_PACKET) {
507 data->rd_m->m_len = data->rd_m->m_pkthdr.len
508 = AWGE_MAX_PACKET;
509 }
510
511 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
512 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
513 if (error != 0) {
514 aprint_error_dev(sc->sc_dev,
515 "could not load rx buf DMA map #%d", i);
516 goto fail;
517 }
518 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
519 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
520 physaddr = data->rd_map->dm_segs[0].ds_addr;
521
522 desc = &sc->sc_rxq.r_desc[i];
523 desc->ddesc_data = htole32(physaddr);
524 next = RX_NEXT(i);
525 desc->ddesc_next = htole32(ring->r_physaddr
526 + next * sizeof(*desc));
527 sc->sc_descm->rx_init_flags(desc);
528 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
529 sc->sc_descm->rx_set_owned_by_dev(desc);
530 }
531
532 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
533 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
534 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
535 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
536 ring->r_physaddr);
537
538 return 0;
539
540 fail:
541 dwc_gmac_free_rx_ring(sc, ring);
542 return error;
543 }
544
545 static void
546 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
547 struct dwc_gmac_rx_ring *ring)
548 {
549 struct dwc_gmac_dev_dmadesc *desc;
550 struct dwc_gmac_rx_data *data;
551 int i;
552
553 mutex_enter(&ring->r_mtx);
554 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
555 desc = &sc->sc_rxq.r_desc[i];
556 data = &sc->sc_rxq.r_data[i];
557 sc->sc_descm->rx_init_flags(desc);
558 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
559 sc->sc_descm->rx_set_owned_by_dev(desc);
560 }
561
562 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
563 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
564 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
565
566 ring->r_cur = ring->r_next = 0;
567 /* reset DMA address to start of ring */
568 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
569 sc->sc_rxq.r_physaddr);
570 mutex_exit(&ring->r_mtx);
571 }
572
573 static int
574 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
575 {
576 const size_t descsize = AWGE_TOTAL_RING_COUNT *
577 sizeof(struct dwc_gmac_dev_dmadesc);
578 int error, nsegs;
579 void *rings;
580
581 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
582 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
583 if (error != 0) {
584 aprint_error_dev(sc->sc_dev,
585 "could not create desc DMA map\n");
586 sc->sc_dma_ring_map = NULL;
587 goto fail;
588 }
589
590 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
591 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
592 if (error != 0) {
593 aprint_error_dev(sc->sc_dev,
594 "could not map DMA memory\n");
595 goto fail;
596 }
597
598 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
599 descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
600 if (error != 0) {
601 aprint_error_dev(sc->sc_dev,
602 "could not allocate DMA memory\n");
603 goto fail;
604 }
605
606 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
607 descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
608 if (error != 0) {
609 aprint_error_dev(sc->sc_dev,
610 "could not load desc DMA map\n");
611 goto fail;
612 }
613
614 /* give first AWGE_RX_RING_COUNT to the RX side */
615 sc->sc_rxq.r_desc = rings;
616 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
617
618 /* and next rings to the TX side */
619 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
620 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
621 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
622
623 return 0;
624
625 fail:
626 dwc_gmac_free_dma_rings(sc);
627 return error;
628 }
629
630 static void
631 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
632 {
633 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
634 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
635 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
636 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
637 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
638 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
639 }
640
641 static void
642 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
643 {
644 struct dwc_gmac_rx_data *data;
645 int i;
646
647 if (ring->r_desc == NULL)
648 return;
649
650
651 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
652 data = &ring->r_data[i];
653
654 if (data->rd_map != NULL) {
655 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
656 AWGE_RX_RING_COUNT
657 *sizeof(struct dwc_gmac_dev_dmadesc),
658 BUS_DMASYNC_POSTREAD);
659 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
660 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
661 }
662 if (data->rd_m != NULL)
663 m_freem(data->rd_m);
664 }
665 }
666
667 static int
668 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
669 struct dwc_gmac_tx_ring *ring)
670 {
671 int i, error = 0;
672
673 ring->t_queued = 0;
674 ring->t_cur = ring->t_next = 0;
675
676 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
677 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
678 TX_DESC_OFFSET(0),
679 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
680 BUS_DMASYNC_POSTWRITE);
681
682 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
683 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
684 AWGE_TX_RING_COUNT, MCLBYTES, 0,
685 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
686 &ring->t_data[i].td_map);
687 if (error != 0) {
688 aprint_error_dev(sc->sc_dev,
689 "could not create TX DMA map #%d\n", i);
690 ring->t_data[i].td_map = NULL;
691 goto fail;
692 }
693 ring->t_desc[i].ddesc_next = htole32(
694 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
695 *TX_NEXT(i));
696 }
697
698 return 0;
699
700 fail:
701 dwc_gmac_free_tx_ring(sc, ring);
702 return error;
703 }
704
705 static void
706 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
707 {
708 /* 'end' is pointing one descriptor beyond the last we want to sync */
709 if (end > start) {
710 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
711 TX_DESC_OFFSET(start),
712 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
713 ops);
714 return;
715 }
716 /* sync from 'start' to end of ring */
717 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
718 TX_DESC_OFFSET(start),
719 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
720 ops);
721 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
722 /* sync from start of ring to 'end' */
723 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
724 TX_DESC_OFFSET(0),
725 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
726 ops);
727 }
728 }
729
730 static void
731 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
732 struct dwc_gmac_tx_ring *ring)
733 {
734 int i;
735
736 mutex_enter(&ring->t_mtx);
737 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
738 struct dwc_gmac_tx_data *data = &ring->t_data[i];
739
740 if (data->td_m != NULL) {
741 bus_dmamap_sync(sc->sc_dmat, data->td_active,
742 0, data->td_active->dm_mapsize,
743 BUS_DMASYNC_POSTWRITE);
744 bus_dmamap_unload(sc->sc_dmat, data->td_active);
745 m_freem(data->td_m);
746 data->td_m = NULL;
747 }
748 }
749
750 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
751 TX_DESC_OFFSET(0),
752 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
753 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
754 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
755 sc->sc_txq.t_physaddr);
756
757 ring->t_queued = 0;
758 ring->t_cur = ring->t_next = 0;
759 mutex_exit(&ring->t_mtx);
760 }
761
762 static void
763 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
764 struct dwc_gmac_tx_ring *ring)
765 {
766 int i;
767
768 /* unload the maps */
769 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
770 struct dwc_gmac_tx_data *data = &ring->t_data[i];
771
772 if (data->td_m != NULL) {
773 bus_dmamap_sync(sc->sc_dmat, data->td_active,
774 0, data->td_map->dm_mapsize,
775 BUS_DMASYNC_POSTWRITE);
776 bus_dmamap_unload(sc->sc_dmat, data->td_active);
777 m_freem(data->td_m);
778 data->td_m = NULL;
779 }
780 }
781
782 /* and actually free them */
783 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
784 struct dwc_gmac_tx_data *data = &ring->t_data[i];
785
786 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
787 }
788 }
789
790 static void
791 dwc_gmac_miibus_statchg(struct ifnet *ifp)
792 {
793 struct dwc_gmac_softc * const sc = ifp->if_softc;
794 struct mii_data * const mii = &sc->sc_mii;
795 uint32_t conf, flow;
796
797 /*
798 * Set MII or GMII interface based on the speed
799 * negotiated by the PHY.
800 */
801 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
802 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
803 | AWIN_GMAC_MAC_CONF_FULLDPLX);
804 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
805 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
806 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
807 | AWIN_GMAC_MAC_CONF_ACS
808 | AWIN_GMAC_MAC_CONF_RXENABLE
809 | AWIN_GMAC_MAC_CONF_TXENABLE;
810 switch (IFM_SUBTYPE(mii->mii_media_active)) {
811 case IFM_10_T:
812 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
813 break;
814 case IFM_100_TX:
815 conf |= AWIN_GMAC_MAC_CONF_FES100 |
816 AWIN_GMAC_MAC_CONF_MIISEL;
817 break;
818 case IFM_1000_T:
819 break;
820 }
821 if (sc->sc_set_speed)
822 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
823
824 flow = 0;
825 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
826 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
827 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
828 }
829 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
830 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
831 }
832 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
833 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
834 }
835 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
836 AWIN_GMAC_MAC_FLOWCTRL, flow);
837
838 #ifdef DWC_GMAC_DEBUG
839 aprint_normal_dev(sc->sc_dev,
840 "setting MAC conf register: %08x\n", conf);
841 #endif
842
843 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
844 AWIN_GMAC_MAC_CONF, conf);
845 }
846
847 static int
848 dwc_gmac_init(struct ifnet *ifp)
849 {
850 struct dwc_gmac_softc *sc = ifp->if_softc;
851
852 mutex_enter(sc->sc_lock);
853 int ret = dwc_gmac_init_locked(ifp);
854 mutex_exit(sc->sc_lock);
855
856 return ret;
857 }
858
859 static int
860 dwc_gmac_init_locked(struct ifnet *ifp)
861 {
862 struct dwc_gmac_softc *sc = ifp->if_softc;
863 uint32_t ffilt;
864
865 if (ifp->if_flags & IFF_RUNNING)
866 return 0;
867
868 dwc_gmac_stop_locked(ifp, 0);
869
870 /*
871 * Configure DMA burst/transfer mode and RX/TX priorities.
872 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
873 */
874 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
875 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
876 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
877 __SHIFTIN(2, GMAC_BUSMODE_PBL));
878
879 /*
880 * Set up address filter
881 */
882 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
883 if (ifp->if_flags & IFF_PROMISC) {
884 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
885 } else {
886 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
887 }
888 if (ifp->if_flags & IFF_BROADCAST) {
889 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
890 } else {
891 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
892 }
893 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
894
895 /*
896 * Set up multicast filter
897 */
898 dwc_gmac_setmulti(sc);
899
900 /*
901 * Set up dma pointer for RX and TX ring
902 */
903 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
904 sc->sc_rxq.r_physaddr);
905 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
906 sc->sc_txq.t_physaddr);
907
908 /*
909 * Start RX/TX part
910 */
911 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
912 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
913 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
914 }
915 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
916
917 sc->sc_stopping = false;
918
919 ifp->if_flags |= IFF_RUNNING;
920 ifp->if_flags &= ~IFF_OACTIVE;
921
922 return 0;
923 }
924
925 static void
926 dwc_gmac_start(struct ifnet *ifp)
927 {
928 struct dwc_gmac_softc *sc = ifp->if_softc;
929 #ifdef DWCGMAC_MPSAFE
930 KASSERT(if_is_mpsafe(ifp));
931 #endif
932
933 mutex_enter(sc->sc_lock);
934 if (!sc->sc_stopping) {
935 mutex_enter(&sc->sc_txq.t_mtx);
936 dwc_gmac_start_locked(ifp);
937 mutex_exit(&sc->sc_txq.t_mtx);
938 }
939 mutex_exit(sc->sc_lock);
940 }
941
942 static void
943 dwc_gmac_start_locked(struct ifnet *ifp)
944 {
945 struct dwc_gmac_softc *sc = ifp->if_softc;
946 int old = sc->sc_txq.t_queued;
947 int start = sc->sc_txq.t_cur;
948 struct mbuf *m0;
949
950 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
951 return;
952
953 for (;;) {
954 IFQ_POLL(&ifp->if_snd, m0);
955 if (m0 == NULL)
956 break;
957 if (dwc_gmac_queue(sc, m0) != 0) {
958 ifp->if_flags |= IFF_OACTIVE;
959 break;
960 }
961 IFQ_DEQUEUE(&ifp->if_snd, m0);
962 bpf_mtap(ifp, m0, BPF_D_OUT);
963 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
964 ifp->if_flags |= IFF_OACTIVE;
965 break;
966 }
967 }
968
969 if (sc->sc_txq.t_queued != old) {
970 /* packets have been queued, kick it off */
971 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
972 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
973
974 #ifdef DWC_GMAC_DEBUG
975 dwc_dump_status(sc);
976 #endif
977 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
978 AWIN_GMAC_DMA_TXPOLL, ~0U);
979 }
980 }
981
982 static void
983 dwc_gmac_stop(struct ifnet *ifp, int disable)
984 {
985 struct dwc_gmac_softc *sc = ifp->if_softc;
986
987 mutex_enter(sc->sc_lock);
988 dwc_gmac_stop_locked(ifp, disable);
989 mutex_exit(sc->sc_lock);
990 }
991
992 static void
993 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
994 {
995 struct dwc_gmac_softc *sc = ifp->if_softc;
996
997 sc->sc_stopping = true;
998
999 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1000 AWIN_GMAC_DMA_OPMODE,
1001 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1002 AWIN_GMAC_DMA_OPMODE)
1003 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1004 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1005 AWIN_GMAC_DMA_OPMODE,
1006 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1007 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1008
1009 mii_down(&sc->sc_mii);
1010 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1011 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1012
1013 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1014 }
1015
1016 /*
1017 * Add m0 to the TX ring
1018 */
1019 static int
1020 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1021 {
1022 struct dwc_gmac_dev_dmadesc *desc = NULL;
1023 struct dwc_gmac_tx_data *data = NULL;
1024 bus_dmamap_t map;
1025 int error, i, first;
1026
1027 #ifdef DWC_GMAC_DEBUG
1028 aprint_normal_dev(sc->sc_dev,
1029 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1030 #endif
1031
1032 first = sc->sc_txq.t_cur;
1033 map = sc->sc_txq.t_data[first].td_map;
1034
1035 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1036 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1037 if (error != 0) {
1038 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1039 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1040 return error;
1041 }
1042
1043 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1044 bus_dmamap_unload(sc->sc_dmat, map);
1045 return ENOBUFS;
1046 }
1047
1048 for (i = 0; i < map->dm_nsegs; i++) {
1049 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1050 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1051
1052 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1053
1054 #ifdef DWC_GMAC_DEBUG
1055 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1056 "len %lu\n", sc->sc_txq.t_cur,
1057 (unsigned long)map->dm_segs[i].ds_addr,
1058 (unsigned long)map->dm_segs[i].ds_len);
1059 #endif
1060
1061 sc->sc_descm->tx_init_flags(desc);
1062 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1063
1064 if (i == 0)
1065 sc->sc_descm->tx_set_first_frag(desc);
1066
1067 /*
1068 * Defer passing ownership of the first descriptor
1069 * until we are done.
1070 */
1071 if (i != 0)
1072 sc->sc_descm->tx_set_owned_by_dev(desc);
1073
1074 sc->sc_txq.t_queued++;
1075 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1076 }
1077
1078 sc->sc_descm->tx_set_last_frag(desc);
1079
1080 data->td_m = m0;
1081 data->td_active = map;
1082
1083 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1084 BUS_DMASYNC_PREWRITE);
1085
1086 /* Pass first to device */
1087 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1088
1089 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1090 BUS_DMASYNC_PREWRITE);
1091
1092 return 0;
1093 }
1094
1095 /*
1096 * If the interface is up and running, only modify the receive
1097 * filter when setting promiscuous or debug mode. Otherwise fall
1098 * through to ether_ioctl, which will reset the chip.
1099 */
1100 static int
1101 dwc_gmac_ifflags_cb(struct ethercom *ec)
1102 {
1103 struct ifnet *ifp = &ec->ec_if;
1104 struct dwc_gmac_softc *sc = ifp->if_softc;
1105 int ret = 0;
1106
1107 mutex_enter(sc->sc_lock);
1108 u_short change = ifp->if_flags ^ sc->sc_if_flags;
1109 sc->sc_if_flags = ifp->if_flags;
1110
1111 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1112 ret = ENETRESET;
1113 goto out;
1114 }
1115 if ((change & IFF_PROMISC) != 0) {
1116 dwc_gmac_setmulti(sc);
1117 }
1118 out:
1119 mutex_exit(sc->sc_lock);
1120
1121 return ret;
1122 }
1123
1124 static int
1125 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1126 {
1127 struct dwc_gmac_softc *sc = ifp->if_softc;
1128 int error = 0;
1129
1130 int s = splnet();
1131 error = ether_ioctl(ifp, cmd, data);
1132
1133 #ifdef DWCGMAC_MPSAFE
1134 splx(s);
1135 #endif
1136
1137 if (error == ENETRESET) {
1138 error = 0;
1139 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1140 ;
1141 else if (ifp->if_flags & IFF_RUNNING) {
1142 /*
1143 * Multicast list has changed; set the hardware filter
1144 * accordingly.
1145 */
1146 mutex_enter(sc->sc_lock);
1147 dwc_gmac_setmulti(sc);
1148 mutex_exit(sc->sc_lock);
1149 }
1150 }
1151
1152 /* Try to get things going again */
1153 if (ifp->if_flags & IFF_UP)
1154 dwc_gmac_start(ifp);
1155 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1156
1157 #ifndef DWCGMAC_MPSAFE
1158 splx(s);
1159 #endif
1160
1161 return error;
1162 }
1163
1164 static void
1165 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1166 {
1167 struct ifnet *ifp = &sc->sc_ec.ec_if;
1168 struct dwc_gmac_tx_data *data;
1169 struct dwc_gmac_dev_dmadesc *desc;
1170 int i, nsegs;
1171
1172 mutex_enter(&sc->sc_txq.t_mtx);
1173
1174 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1175 #ifdef DWC_GMAC_DEBUG
1176 aprint_normal_dev(sc->sc_dev,
1177 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1178 i, sc->sc_txq.t_queued);
1179 #endif
1180
1181 /*
1182 * i+1 does not need to be a valid descriptor,
1183 * this is just a special notion to just sync
1184 * a single tx descriptor (i)
1185 */
1186 dwc_gmac_txdesc_sync(sc, i, i+1,
1187 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1188
1189 desc = &sc->sc_txq.t_desc[i];
1190 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1191 break;
1192
1193 data = &sc->sc_txq.t_data[i];
1194 if (data->td_m == NULL)
1195 continue;
1196
1197 ifp->if_opackets++;
1198 nsegs = data->td_active->dm_nsegs;
1199 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1200 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1201 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1202
1203 #ifdef DWC_GMAC_DEBUG
1204 aprint_normal_dev(sc->sc_dev,
1205 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1206 "freeing mbuf %p\n", i, data->td_m);
1207 #endif
1208
1209 m_freem(data->td_m);
1210 data->td_m = NULL;
1211
1212 sc->sc_txq.t_queued -= nsegs;
1213 }
1214
1215 sc->sc_txq.t_next = i;
1216
1217 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1218 ifp->if_flags &= ~IFF_OACTIVE;
1219 }
1220 mutex_exit(&sc->sc_txq.t_mtx);
1221 }
1222
1223 static void
1224 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1225 {
1226 struct ifnet *ifp = &sc->sc_ec.ec_if;
1227 struct dwc_gmac_dev_dmadesc *desc;
1228 struct dwc_gmac_rx_data *data;
1229 bus_addr_t physaddr;
1230 struct mbuf *m, *mnew;
1231 int i, len, error;
1232
1233 mutex_enter(&sc->sc_rxq.r_mtx);
1234 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1235 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1236 RX_DESC_OFFSET(i), sizeof(*desc),
1237 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1238 desc = &sc->sc_rxq.r_desc[i];
1239 data = &sc->sc_rxq.r_data[i];
1240
1241 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1242 break;
1243
1244 if (sc->sc_descm->rx_has_error(desc)) {
1245 #ifdef DWC_GMAC_DEBUG
1246 aprint_normal_dev(sc->sc_dev,
1247 "RX error: descriptor status %08x, skipping\n",
1248 le32toh(desc->ddesc_status0));
1249 #endif
1250 ifp->if_ierrors++;
1251 goto skip;
1252 }
1253
1254 len = sc->sc_descm->rx_get_len(desc);
1255
1256 #ifdef DWC_GMAC_DEBUG
1257 aprint_normal_dev(sc->sc_dev,
1258 "rx int: device is done with descriptor #%d, len: %d\n",
1259 i, len);
1260 #endif
1261
1262 /*
1263 * Try to get a new mbuf before passing this one
1264 * up, if that fails, drop the packet and reuse
1265 * the existing one.
1266 */
1267 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1268 if (mnew == NULL) {
1269 ifp->if_ierrors++;
1270 goto skip;
1271 }
1272 MCLGET(mnew, M_DONTWAIT);
1273 if ((mnew->m_flags & M_EXT) == 0) {
1274 m_freem(mnew);
1275 ifp->if_ierrors++;
1276 goto skip;
1277 }
1278 mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1279 m_adj(mnew, roundup((uintptr_t)mnew->m_data & 0x3f, 0x40));
1280 if (mnew->m_len > AWGE_MAX_PACKET) {
1281 mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1282 }
1283
1284 /* unload old DMA map */
1285 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1286 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1287 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1288
1289 /* and reload with new mbuf */
1290 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1291 mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1292 if (error != 0) {
1293 m_freem(mnew);
1294 /* try to reload old mbuf */
1295 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1296 data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1297 if (error != 0) {
1298 panic("%s: could not load old rx mbuf",
1299 device_xname(sc->sc_dev));
1300 }
1301 ifp->if_ierrors++;
1302 goto skip;
1303 }
1304 physaddr = data->rd_map->dm_segs[0].ds_addr;
1305
1306 /*
1307 * New mbuf loaded, update RX ring and continue
1308 */
1309 m = data->rd_m;
1310 data->rd_m = mnew;
1311 desc->ddesc_data = htole32(physaddr);
1312
1313 /* finalize mbuf */
1314 m->m_pkthdr.len = m->m_len = len;
1315 m_set_rcvif(m, ifp);
1316 m->m_flags |= M_HASFCS;
1317
1318 if_percpuq_enqueue(sc->sc_ipq, m);
1319
1320 skip:
1321 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1322 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1323
1324 sc->sc_descm->rx_init_flags(desc);
1325 sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1326 sc->sc_descm->rx_set_owned_by_dev(desc);
1327
1328 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1329 RX_DESC_OFFSET(i), sizeof(*desc),
1330 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1331 }
1332
1333 /* update RX pointer */
1334 sc->sc_rxq.r_cur = i;
1335
1336 mutex_exit(&sc->sc_rxq.r_mtx);
1337 }
1338
1339 /*
1340 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1341 */
1342 static uint32_t
1343 bitrev32(uint32_t x)
1344 {
1345 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1346 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1347 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1348 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1349
1350 return (x >> 16) | (x << 16);
1351 }
1352
1353 static void
1354 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1355 {
1356 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1357 struct ether_multi *enm;
1358 struct ether_multistep step;
1359 struct ethercom *ec = &sc->sc_ec;
1360 uint32_t hashes[2] = { 0, 0 };
1361 uint32_t ffilt, h;
1362 int mcnt;
1363
1364 KASSERT(mutex_owned(sc->sc_lock));
1365
1366 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1367
1368 if (ifp->if_flags & IFF_PROMISC) {
1369 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1370 goto special_filter;
1371 }
1372
1373 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1374
1375 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1376 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1377
1378 ETHER_LOCK(ec);
1379 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1380 ETHER_FIRST_MULTI(step, ec, enm);
1381 mcnt = 0;
1382 while (enm != NULL) {
1383 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1384 ETHER_ADDR_LEN) != 0) {
1385 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1386 ec->ec_flags |= ETHER_F_ALLMULTI;
1387 ETHER_UNLOCK(ec);
1388 goto special_filter;
1389 }
1390
1391 h = bitrev32(
1392 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1393 ) >> 26;
1394 hashes[h >> 5] |= (1 << (h & 0x1f));
1395
1396 mcnt++;
1397 ETHER_NEXT_MULTI(step, enm);
1398 }
1399 ETHER_UNLOCK(ec);
1400
1401 if (mcnt)
1402 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1403 else
1404 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1405
1406 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1407 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1408 hashes[0]);
1409 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1410 hashes[1]);
1411 sc->sc_if_flags = ifp->if_flags;
1412
1413 #ifdef DWC_GMAC_DEBUG
1414 dwc_gmac_dump_ffilt(sc, ffilt);
1415 #endif
1416 return;
1417
1418 special_filter:
1419 #ifdef DWC_GMAC_DEBUG
1420 dwc_gmac_dump_ffilt(sc, ffilt);
1421 #endif
1422 /* no MAC hashes, ALLMULTI or PROMISC */
1423 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1424 ffilt);
1425 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1426 0xffffffff);
1427 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1428 0xffffffff);
1429 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1430 }
1431
1432 int
1433 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1434 {
1435 uint32_t status, dma_status;
1436 int rv = 0;
1437
1438 if (sc->sc_stopping)
1439 return 0;
1440
1441 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1442 if (status & AWIN_GMAC_MII_IRQ) {
1443 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1444 AWIN_GMAC_MII_STATUS);
1445 rv = 1;
1446 mii_pollstat(&sc->sc_mii);
1447 }
1448
1449 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1450 AWIN_GMAC_DMA_STATUS);
1451
1452 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1453 rv = 1;
1454
1455 if (dma_status & GMAC_DMA_INT_TIE)
1456 dwc_gmac_tx_intr(sc);
1457
1458 if (dma_status & GMAC_DMA_INT_RIE)
1459 dwc_gmac_rx_intr(sc);
1460
1461 /*
1462 * Check error conditions
1463 */
1464 if (dma_status & GMAC_DMA_INT_ERRORS) {
1465 sc->sc_ec.ec_if.if_oerrors++;
1466 #ifdef DWC_GMAC_DEBUG
1467 dwc_dump_and_abort(sc, "interrupt error condition");
1468 #endif
1469 }
1470
1471 rnd_add_uint32(&sc->rnd_source, dma_status);
1472
1473 /* ack interrupt */
1474 if (dma_status)
1475 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1476 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1477
1478 /*
1479 * Get more packets
1480 */
1481 if (rv)
1482 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1483
1484 return rv;
1485 }
1486
1487 static void
1488 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1489 {
1490
1491 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1492 }
1493
1494 static int
1495 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1496 {
1497
1498 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1499 }
1500
1501 static void
1502 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1503 {
1504 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1505
1506 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1507 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1508 }
1509
1510 static uint32_t
1511 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1512 {
1513
1514 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1515 }
1516
1517 static void
1518 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1519 {
1520
1521 desc->ddesc_status0 = 0;
1522 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1523 }
1524
1525 static void
1526 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1527 {
1528 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1529
1530 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1531 }
1532
1533 static void
1534 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1535 {
1536 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1537
1538 desc->ddesc_cntl1 = htole32(cntl |
1539 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1540 }
1541
1542 static void
1543 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1544 {
1545
1546 desc->ddesc_status0 = 0;
1547 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1548 }
1549
1550 static int
1551 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1552 return !!(le32toh(desc->ddesc_status0) &
1553 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1554 }
1555
1556 static void
1557 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1558 {
1559 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1560
1561 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1562 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1563 }
1564
1565 static uint32_t
1566 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1567 {
1568
1569 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1570 }
1571
1572 static void
1573 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1574 {
1575
1576 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1577 desc->ddesc_cntl1 = 0;
1578 }
1579
1580 static void
1581 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1582 {
1583 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1584
1585 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1586 }
1587
1588 static void
1589 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1590 {
1591 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1592
1593 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1594 }
1595
1596 static void
1597 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1598 {
1599
1600 desc->ddesc_status0 = 0;
1601 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1602 }
1603
1604 static int
1605 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1606 {
1607
1608 return !!(le32toh(desc->ddesc_status0) &
1609 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1610 }
1611
1612 #ifdef DWC_GMAC_DEBUG
1613 static void
1614 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1615 {
1616 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1617 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1618 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1619 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1620 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1621 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1622 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1623 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1624 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1625 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1626 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1627 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1628 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1629 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1630 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1631 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1632 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1633 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1634 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1635 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1636 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1637 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1638 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1639 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1640 }
1641
1642 static void
1643 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1644 {
1645 int i;
1646
1647 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1648 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1649 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1650 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1651 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1652 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1653 "data: %08x next: %08x\n",
1654 i, sc->sc_txq.t_physaddr +
1655 i*sizeof(struct dwc_gmac_dev_dmadesc),
1656 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1657 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1658 }
1659 }
1660
1661 static void
1662 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1663 {
1664 int i;
1665
1666 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1667 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1668 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1669 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1670 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1671 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1672 "data: %08x next: %08x\n",
1673 i, sc->sc_rxq.r_physaddr +
1674 i*sizeof(struct dwc_gmac_dev_dmadesc),
1675 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1676 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1677 }
1678 }
1679
1680 static void
1681 dwc_dump_status(struct dwc_gmac_softc *sc)
1682 {
1683 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1684 AWIN_GMAC_MAC_INTR);
1685 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1686 AWIN_GMAC_DMA_STATUS);
1687 char buf[200];
1688
1689 /* print interrupt state */
1690 snprintb(buf, sizeof(buf), "\177\20"
1691 "b\x10""NI\0"
1692 "b\x0f""AI\0"
1693 "b\x0e""ER\0"
1694 "b\x0d""FB\0"
1695 "b\x0a""ET\0"
1696 "b\x09""RW\0"
1697 "b\x08""RS\0"
1698 "b\x07""RU\0"
1699 "b\x06""RI\0"
1700 "b\x05""UN\0"
1701 "b\x04""OV\0"
1702 "b\x03""TJ\0"
1703 "b\x02""TU\0"
1704 "b\x01""TS\0"
1705 "b\x00""TI\0"
1706 "\0", dma_status);
1707 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1708 status, buf);
1709 }
1710
1711 static void
1712 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1713 {
1714 dwc_dump_status(sc);
1715 dwc_gmac_dump_ffilt(sc,
1716 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1717 dwc_gmac_dump_dma(sc);
1718 dwc_gmac_dump_tx_desc(sc);
1719 dwc_gmac_dump_rx_desc(sc);
1720
1721 panic("%s", msg);
1722 }
1723
1724 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1725 {
1726 char buf[200];
1727
1728 /* print filter setup */
1729 snprintb(buf, sizeof(buf), "\177\20"
1730 "b\x1f""RA\0"
1731 "b\x0a""HPF\0"
1732 "b\x09""SAF\0"
1733 "b\x08""SAIF\0"
1734 "b\x05""DBF\0"
1735 "b\x04""PM\0"
1736 "b\x03""DAIF\0"
1737 "b\x02""HMC\0"
1738 "b\x01""HUC\0"
1739 "b\x00""PR\0"
1740 "\0", ffilt);
1741 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1742 }
1743 #endif
1744