dwc_gmac.c revision 1.64 1 /* $NetBSD: dwc_gmac.c,v 1.64 2019/07/21 08:24:32 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.64 2019/07/21 08:24:32 mrg Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60 #include <sys/rndsource.h>
61
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_media.h>
65 #include <net/bpf.h>
66 #ifdef INET
67 #include <netinet/if_inarp.h>
68 #endif
69
70 #include <dev/mii/miivar.h>
71
72 #include <dev/ic/dwc_gmac_reg.h>
73 #include <dev/ic/dwc_gmac_var.h>
74
75 static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 static void dwc_gmac_miibus_statchg(struct ifnet *);
78
79 static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 static int dwc_gmac_init(struct ifnet *);
91 static int dwc_gmac_init_locked(struct ifnet *);
92 static void dwc_gmac_stop(struct ifnet *, int);
93 static void dwc_gmac_stop_locked(struct ifnet *, int);
94 static void dwc_gmac_start(struct ifnet *);
95 static void dwc_gmac_start_locked(struct ifnet *);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t);
103 static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119
120 static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 .tx_set_len = dwc_gmac_desc_std_set_len,
125 .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 .rx_set_len = dwc_gmac_desc_std_set_len,
131 .rx_get_len = dwc_gmac_desc_std_get_len,
132 .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 };
134
135 static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 .tx_set_len = dwc_gmac_desc_enh_set_len,
140 .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 .rx_set_len = dwc_gmac_desc_enh_set_len,
146 .rx_get_len = dwc_gmac_desc_enh_get_len,
147 .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 };
149
150
151 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 *sizeof(struct dwc_gmac_dev_dmadesc))
153 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154
155 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157
158
159
160 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163
164 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 GMAC_DMA_INT_FBE | \
166 GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 GMAC_DMA_INT_TJE)
169
170 #define AWIN_DEF_MAC_INTRMASK \
171 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 AWIN_GMAC_MAC_INT_LINKCHG)
173
174 #ifdef DWC_GMAC_DEBUG
175 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 static void dwc_dump_status(struct dwc_gmac_softc *);
180 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 #endif
182
183 int
184 dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 {
186 uint8_t enaddr[ETHER_ADDR_LEN];
187 uint32_t maclo, machi, ver, hwft;
188 struct mii_data * const mii = &sc->sc_mii;
189 struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 prop_dictionary_t dict;
191 int rv;
192
193 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
194 sc->sc_mii_clk = mii_clk & 7;
195
196 dict = device_properties(sc->sc_dev);
197 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
198 if (ea != NULL) {
199 /*
200 * If the MAC address is overriden by a device property,
201 * use that.
202 */
203 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
204 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
205 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
206 } else {
207 /*
208 * If we did not get an externaly configure address,
209 * try to read one from the current filter setup,
210 * before resetting the chip.
211 */
212 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
213 AWIN_GMAC_MAC_ADDR0LO);
214 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
215 AWIN_GMAC_MAC_ADDR0HI);
216
217 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
218 /* fake MAC address */
219 maclo = 0x00f2 | (cprng_strong32() << 16);
220 machi = cprng_strong32();
221 }
222
223 enaddr[0] = maclo & 0x0ff;
224 enaddr[1] = (maclo >> 8) & 0x0ff;
225 enaddr[2] = (maclo >> 16) & 0x0ff;
226 enaddr[3] = (maclo >> 24) & 0x0ff;
227 enaddr[4] = machi & 0x0ff;
228 enaddr[5] = (machi >> 8) & 0x0ff;
229 }
230
231 ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
232 aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
233
234 /*
235 * Init chip and do initial setup
236 */
237 if (dwc_gmac_reset(sc) != 0)
238 return ENXIO; /* not much to cleanup, haven't attached yet */
239 dwc_gmac_write_hwaddr(sc, enaddr);
240 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 ether_sprintf(enaddr));
242
243 hwft = 0;
244 if (ver >= 0x35) {
245 hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 AWIN_GMAC_DMA_HWFEATURES);
247 aprint_normal_dev(sc->sc_dev,
248 "HW feature mask: %x\n", hwft);
249 }
250 if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
251 aprint_normal_dev(sc->sc_dev,
252 "Using enhanced descriptor format\n");
253 sc->sc_descm = &desc_methods_enhanced;
254 } else {
255 sc->sc_descm = &desc_methods_standard;
256 }
257
258 /*
259 * Allocate Tx and Rx rings
260 */
261 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
262 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
263 goto fail;
264 }
265
266 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
267 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
268 goto fail;
269 }
270
271 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
272 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
273 goto fail;
274 }
275
276 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
277 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
278 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
279
280 /*
281 * Prepare interface data
282 */
283 ifp->if_softc = sc;
284 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
285 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
286 #ifdef DWCGMAC_MPSAFE
287 ifp->if_extflags = IFEF_MPSAFE;
288 #endif
289 ifp->if_ioctl = dwc_gmac_ioctl;
290 ifp->if_start = dwc_gmac_start;
291 ifp->if_init = dwc_gmac_init;
292 ifp->if_stop = dwc_gmac_stop;
293 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
294 IFQ_SET_READY(&ifp->if_snd);
295
296 /*
297 * Attach MII subdevices
298 */
299 sc->sc_ec.ec_mii = &sc->sc_mii;
300 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
301 mii->mii_ifp = ifp;
302 mii->mii_readreg = dwc_gmac_miibus_read_reg;
303 mii->mii_writereg = dwc_gmac_miibus_write_reg;
304 mii->mii_statchg = dwc_gmac_miibus_statchg;
305 mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
306 MIIF_DOPAUSE);
307
308 if (LIST_EMPTY(&mii->mii_phys)) {
309 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
310 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
311 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
312 } else {
313 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
314 }
315
316 /*
317 * We can support 802.1Q VLAN-sized frames.
318 */
319 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
320
321 /*
322 * Ready, attach interface
323 */
324 /* Attach the interface. */
325 rv = if_initialize(ifp);
326 if (rv != 0)
327 goto fail_2;
328 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
329 if_deferred_start_init(ifp, NULL);
330 ether_ifattach(ifp, enaddr);
331 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
332 if_register(ifp);
333 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
334 RND_TYPE_NET, RND_FLAG_DEFAULT);
335
336 /*
337 * Enable interrupts
338 */
339 mutex_enter(sc->sc_lock);
340 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
341 AWIN_DEF_MAC_INTRMASK);
342 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
343 GMAC_DEF_DMA_INT_MASK);
344 mutex_exit(sc->sc_lock);
345
346 return 0;
347
348 fail_2:
349 ifmedia_removeall(&mii->mii_media);
350 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
351 mutex_destroy(&sc->sc_txq.t_mtx);
352 mutex_destroy(&sc->sc_rxq.r_mtx);
353 mutex_obj_free(sc->sc_lock);
354 fail:
355 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
356 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
357 dwc_gmac_free_dma_rings(sc);
358 mutex_destroy(&sc->sc_mdio_lock);
359
360 return ENXIO;
361 }
362
363
364
365 static int
366 dwc_gmac_reset(struct dwc_gmac_softc *sc)
367 {
368 size_t cnt;
369 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
370 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
371 | GMAC_BUSMODE_RESET);
372 for (cnt = 0; cnt < 3000; cnt++) {
373 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
374 & GMAC_BUSMODE_RESET) == 0)
375 return 0;
376 delay(10);
377 }
378
379 aprint_error_dev(sc->sc_dev, "reset timed out\n");
380 return EIO;
381 }
382
383 static void
384 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
385 uint8_t enaddr[ETHER_ADDR_LEN])
386 {
387 uint32_t hi, lo;
388
389 hi = enaddr[4] | (enaddr[5] << 8);
390 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
391 | (enaddr[3] << 24);
392 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
393 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
394 }
395
396 static int
397 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
398 {
399 struct dwc_gmac_softc * const sc = device_private(self);
400 uint16_t mii;
401 size_t cnt;
402
403 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
404 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
405 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
406 | GMAC_MII_BUSY;
407
408 mutex_enter(&sc->sc_mdio_lock);
409 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
410
411 for (cnt = 0; cnt < 1000; cnt++) {
412 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
413 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
414 *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
415 AWIN_GMAC_MAC_MIIDATA);
416 break;
417 }
418 delay(10);
419 }
420
421 mutex_exit(&sc->sc_mdio_lock);
422
423 if (cnt >= 1000)
424 return ETIMEDOUT;
425
426 return 0;
427 }
428
429 static int
430 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
431 {
432 struct dwc_gmac_softc * const sc = device_private(self);
433 uint16_t mii;
434 size_t cnt;
435
436 mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
437 | __SHIFTIN(reg, GMAC_MII_REG_MASK)
438 | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
439 | GMAC_MII_BUSY | GMAC_MII_WRITE;
440
441 mutex_enter(&sc->sc_mdio_lock);
442 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
443 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
444
445 for (cnt = 0; cnt < 1000; cnt++) {
446 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
447 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
448 break;
449 delay(10);
450 }
451
452 mutex_exit(&sc->sc_mdio_lock);
453
454 if (cnt >= 1000)
455 return ETIMEDOUT;
456
457 return 0;
458 }
459
460 static int
461 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
462 struct dwc_gmac_rx_ring *ring)
463 {
464 struct dwc_gmac_rx_data *data;
465 bus_addr_t physaddr;
466 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
467 int error, i, next;
468
469 ring->r_cur = ring->r_next = 0;
470 memset(ring->r_desc, 0, descsize);
471
472 /*
473 * Pre-allocate Rx buffers and populate Rx ring.
474 */
475 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
476 struct dwc_gmac_dev_dmadesc *desc;
477
478 data = &sc->sc_rxq.r_data[i];
479
480 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
481 if (data->rd_m == NULL) {
482 aprint_error_dev(sc->sc_dev,
483 "could not allocate rx mbuf #%d\n", i);
484 error = ENOMEM;
485 goto fail;
486 }
487 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
488 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
489 if (error != 0) {
490 aprint_error_dev(sc->sc_dev,
491 "could not create DMA map\n");
492 data->rd_map = NULL;
493 goto fail;
494 }
495 MCLGET(data->rd_m, M_DONTWAIT);
496 if (!(data->rd_m->m_flags & M_EXT)) {
497 aprint_error_dev(sc->sc_dev,
498 "could not allocate mbuf cluster #%d\n", i);
499 error = ENOMEM;
500 goto fail;
501 }
502
503 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
504 mtod(data->rd_m, void *), MCLBYTES, NULL,
505 BUS_DMA_READ | BUS_DMA_NOWAIT);
506 if (error != 0) {
507 aprint_error_dev(sc->sc_dev,
508 "could not load rx buf DMA map #%d", i);
509 goto fail;
510 }
511 physaddr = data->rd_map->dm_segs[0].ds_addr;
512
513 desc = &sc->sc_rxq.r_desc[i];
514 desc->ddesc_data = htole32(physaddr);
515 next = RX_NEXT(i);
516 desc->ddesc_next = htole32(ring->r_physaddr
517 + next * sizeof(*desc));
518 sc->sc_descm->rx_init_flags(desc);
519 sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
520 sc->sc_descm->rx_set_owned_by_dev(desc);
521 }
522
523 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
524 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
525 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
526 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
527 ring->r_physaddr);
528
529 return 0;
530
531 fail:
532 dwc_gmac_free_rx_ring(sc, ring);
533 return error;
534 }
535
536 static void
537 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
538 struct dwc_gmac_rx_ring *ring)
539 {
540 struct dwc_gmac_dev_dmadesc *desc;
541 int i;
542
543 mutex_enter(&ring->r_mtx);
544 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
545 desc = &sc->sc_rxq.r_desc[i];
546 sc->sc_descm->rx_init_flags(desc);
547 sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
548 sc->sc_descm->rx_set_owned_by_dev(desc);
549 }
550
551 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
552 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
553 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
554
555 ring->r_cur = ring->r_next = 0;
556 /* reset DMA address to start of ring */
557 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
558 sc->sc_rxq.r_physaddr);
559 mutex_exit(&ring->r_mtx);
560 }
561
562 static int
563 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
564 {
565 const size_t descsize = AWGE_TOTAL_RING_COUNT *
566 sizeof(struct dwc_gmac_dev_dmadesc);
567 int error, nsegs;
568 void *rings;
569
570 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
571 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
572 if (error != 0) {
573 aprint_error_dev(sc->sc_dev,
574 "could not create desc DMA map\n");
575 sc->sc_dma_ring_map = NULL;
576 goto fail;
577 }
578
579 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
580 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
581 if (error != 0) {
582 aprint_error_dev(sc->sc_dev,
583 "could not map DMA memory\n");
584 goto fail;
585 }
586
587 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
588 descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
589 if (error != 0) {
590 aprint_error_dev(sc->sc_dev,
591 "could not allocate DMA memory\n");
592 goto fail;
593 }
594
595 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
596 descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
597 if (error != 0) {
598 aprint_error_dev(sc->sc_dev,
599 "could not load desc DMA map\n");
600 goto fail;
601 }
602
603 /* give first AWGE_RX_RING_COUNT to the RX side */
604 sc->sc_rxq.r_desc = rings;
605 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
606
607 /* and next rings to the TX side */
608 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
609 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
610 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
611
612 return 0;
613
614 fail:
615 dwc_gmac_free_dma_rings(sc);
616 return error;
617 }
618
619 static void
620 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
621 {
622 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
623 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
624 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
625 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
626 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
627 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
628 }
629
630 static void
631 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
632 {
633 struct dwc_gmac_rx_data *data;
634 int i;
635
636 if (ring->r_desc == NULL)
637 return;
638
639
640 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
641 data = &ring->r_data[i];
642
643 if (data->rd_map != NULL) {
644 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
645 AWGE_RX_RING_COUNT
646 *sizeof(struct dwc_gmac_dev_dmadesc),
647 BUS_DMASYNC_POSTREAD);
648 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
649 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
650 }
651 if (data->rd_m != NULL)
652 m_freem(data->rd_m);
653 }
654 }
655
656 static int
657 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
658 struct dwc_gmac_tx_ring *ring)
659 {
660 int i, error = 0;
661
662 ring->t_queued = 0;
663 ring->t_cur = ring->t_next = 0;
664
665 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
666 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
667 TX_DESC_OFFSET(0),
668 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
669 BUS_DMASYNC_POSTWRITE);
670
671 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
672 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
673 AWGE_TX_RING_COUNT, MCLBYTES, 0,
674 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
675 &ring->t_data[i].td_map);
676 if (error != 0) {
677 aprint_error_dev(sc->sc_dev,
678 "could not create TX DMA map #%d\n", i);
679 ring->t_data[i].td_map = NULL;
680 goto fail;
681 }
682 ring->t_desc[i].ddesc_next = htole32(
683 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
684 *TX_NEXT(i));
685 }
686
687 return 0;
688
689 fail:
690 dwc_gmac_free_tx_ring(sc, ring);
691 return error;
692 }
693
694 static void
695 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
696 {
697 /* 'end' is pointing one descriptor beyond the last we want to sync */
698 if (end > start) {
699 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
700 TX_DESC_OFFSET(start),
701 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
702 ops);
703 return;
704 }
705 /* sync from 'start' to end of ring */
706 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
707 TX_DESC_OFFSET(start),
708 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
709 ops);
710 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
711 /* sync from start of ring to 'end' */
712 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
713 TX_DESC_OFFSET(0),
714 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
715 ops);
716 }
717 }
718
719 static void
720 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
721 struct dwc_gmac_tx_ring *ring)
722 {
723 int i;
724
725 mutex_enter(&ring->t_mtx);
726 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
727 struct dwc_gmac_tx_data *data = &ring->t_data[i];
728
729 if (data->td_m != NULL) {
730 bus_dmamap_sync(sc->sc_dmat, data->td_active,
731 0, data->td_active->dm_mapsize,
732 BUS_DMASYNC_POSTWRITE);
733 bus_dmamap_unload(sc->sc_dmat, data->td_active);
734 m_freem(data->td_m);
735 data->td_m = NULL;
736 }
737 }
738
739 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
740 TX_DESC_OFFSET(0),
741 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
742 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
743 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
744 sc->sc_txq.t_physaddr);
745
746 ring->t_queued = 0;
747 ring->t_cur = ring->t_next = 0;
748 mutex_exit(&ring->t_mtx);
749 }
750
751 static void
752 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
753 struct dwc_gmac_tx_ring *ring)
754 {
755 int i;
756
757 /* unload the maps */
758 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
759 struct dwc_gmac_tx_data *data = &ring->t_data[i];
760
761 if (data->td_m != NULL) {
762 bus_dmamap_sync(sc->sc_dmat, data->td_active,
763 0, data->td_map->dm_mapsize,
764 BUS_DMASYNC_POSTWRITE);
765 bus_dmamap_unload(sc->sc_dmat, data->td_active);
766 m_freem(data->td_m);
767 data->td_m = NULL;
768 }
769 }
770
771 /* and actually free them */
772 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
773 struct dwc_gmac_tx_data *data = &ring->t_data[i];
774
775 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
776 }
777 }
778
779 static void
780 dwc_gmac_miibus_statchg(struct ifnet *ifp)
781 {
782 struct dwc_gmac_softc * const sc = ifp->if_softc;
783 struct mii_data * const mii = &sc->sc_mii;
784 uint32_t conf, flow;
785
786 /*
787 * Set MII or GMII interface based on the speed
788 * negotiated by the PHY.
789 */
790 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
791 conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
792 | AWIN_GMAC_MAC_CONF_FULLDPLX);
793 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
794 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
795 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
796 | AWIN_GMAC_MAC_CONF_ACS
797 | AWIN_GMAC_MAC_CONF_RXENABLE
798 | AWIN_GMAC_MAC_CONF_TXENABLE;
799 switch (IFM_SUBTYPE(mii->mii_media_active)) {
800 case IFM_10_T:
801 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
802 break;
803 case IFM_100_TX:
804 conf |= AWIN_GMAC_MAC_CONF_FES100 |
805 AWIN_GMAC_MAC_CONF_MIISEL;
806 break;
807 case IFM_1000_T:
808 break;
809 }
810 if (sc->sc_set_speed)
811 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
812
813 flow = 0;
814 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
815 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
816 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
817 }
818 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
819 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
820 }
821 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
822 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
823 }
824 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
825 AWIN_GMAC_MAC_FLOWCTRL, flow);
826
827 #ifdef DWC_GMAC_DEBUG
828 aprint_normal_dev(sc->sc_dev,
829 "setting MAC conf register: %08x\n", conf);
830 #endif
831
832 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
833 AWIN_GMAC_MAC_CONF, conf);
834 }
835
836 static int
837 dwc_gmac_init(struct ifnet *ifp)
838 {
839 struct dwc_gmac_softc *sc = ifp->if_softc;
840
841 mutex_enter(sc->sc_lock);
842 int ret = dwc_gmac_init_locked(ifp);
843 mutex_exit(sc->sc_lock);
844
845 return ret;
846 }
847
848 static int
849 dwc_gmac_init_locked(struct ifnet *ifp)
850 {
851 struct dwc_gmac_softc *sc = ifp->if_softc;
852 uint32_t ffilt;
853
854 if (ifp->if_flags & IFF_RUNNING)
855 return 0;
856
857 dwc_gmac_stop_locked(ifp, 0);
858
859 /*
860 * Configure DMA burst/transfer mode and RX/TX priorities.
861 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
862 */
863 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
864 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
865 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
866 __SHIFTIN(2, GMAC_BUSMODE_PBL));
867
868 /*
869 * Set up address filter
870 */
871 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
872 if (ifp->if_flags & IFF_PROMISC) {
873 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
874 } else {
875 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
876 }
877 if (ifp->if_flags & IFF_BROADCAST) {
878 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
879 } else {
880 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
881 }
882 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
883
884 /*
885 * Set up multicast filter
886 */
887 dwc_gmac_setmulti(sc);
888
889 /*
890 * Set up dma pointer for RX and TX ring
891 */
892 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
893 sc->sc_rxq.r_physaddr);
894 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
895 sc->sc_txq.t_physaddr);
896
897 /*
898 * Start RX/TX part
899 */
900 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
901 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
902 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
903 }
904 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
905
906 sc->sc_stopping = false;
907
908 ifp->if_flags |= IFF_RUNNING;
909 ifp->if_flags &= ~IFF_OACTIVE;
910
911 return 0;
912 }
913
914 static void
915 dwc_gmac_start(struct ifnet *ifp)
916 {
917 struct dwc_gmac_softc *sc = ifp->if_softc;
918 #ifdef DWCGMAC_MPSAFE
919 KASSERT(if_is_mpsafe(ifp));
920 #endif
921
922 mutex_enter(sc->sc_lock);
923 if (!sc->sc_stopping) {
924 mutex_enter(&sc->sc_txq.t_mtx);
925 dwc_gmac_start_locked(ifp);
926 mutex_exit(&sc->sc_txq.t_mtx);
927 }
928 mutex_exit(sc->sc_lock);
929 }
930
931 static void
932 dwc_gmac_start_locked(struct ifnet *ifp)
933 {
934 struct dwc_gmac_softc *sc = ifp->if_softc;
935 int old = sc->sc_txq.t_queued;
936 int start = sc->sc_txq.t_cur;
937 struct mbuf *m0;
938
939 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
940 return;
941
942 for (;;) {
943 IFQ_POLL(&ifp->if_snd, m0);
944 if (m0 == NULL)
945 break;
946 if (dwc_gmac_queue(sc, m0) != 0) {
947 ifp->if_flags |= IFF_OACTIVE;
948 break;
949 }
950 IFQ_DEQUEUE(&ifp->if_snd, m0);
951 bpf_mtap(ifp, m0, BPF_D_OUT);
952 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
953 ifp->if_flags |= IFF_OACTIVE;
954 break;
955 }
956 }
957
958 if (sc->sc_txq.t_queued != old) {
959 /* packets have been queued, kick it off */
960 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962
963 #ifdef DWC_GMAC_DEBUG
964 dwc_dump_status(sc);
965 #endif
966 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
967 AWIN_GMAC_DMA_TXPOLL, ~0U);
968 }
969 }
970
971 static void
972 dwc_gmac_stop(struct ifnet *ifp, int disable)
973 {
974 struct dwc_gmac_softc *sc = ifp->if_softc;
975
976 mutex_enter(sc->sc_lock);
977 dwc_gmac_stop_locked(ifp, disable);
978 mutex_exit(sc->sc_lock);
979 }
980
981 static void
982 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
983 {
984 struct dwc_gmac_softc *sc = ifp->if_softc;
985
986 sc->sc_stopping = true;
987
988 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
989 AWIN_GMAC_DMA_OPMODE,
990 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
991 AWIN_GMAC_DMA_OPMODE)
992 & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
993 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
994 AWIN_GMAC_DMA_OPMODE,
995 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
996 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
997
998 mii_down(&sc->sc_mii);
999 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1000 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1001
1002 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1003 }
1004
1005 /*
1006 * Add m0 to the TX ring
1007 */
1008 static int
1009 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1010 {
1011 struct dwc_gmac_dev_dmadesc *desc = NULL;
1012 struct dwc_gmac_tx_data *data = NULL;
1013 bus_dmamap_t map;
1014 int error, i, first;
1015
1016 #ifdef DWC_GMAC_DEBUG
1017 aprint_normal_dev(sc->sc_dev,
1018 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1019 #endif
1020
1021 first = sc->sc_txq.t_cur;
1022 map = sc->sc_txq.t_data[first].td_map;
1023
1024 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1025 BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1026 if (error != 0) {
1027 aprint_error_dev(sc->sc_dev, "could not map mbuf "
1028 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1029 return error;
1030 }
1031
1032 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1033 bus_dmamap_unload(sc->sc_dmat, map);
1034 return ENOBUFS;
1035 }
1036
1037 for (i = 0; i < map->dm_nsegs; i++) {
1038 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1039 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1040
1041 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1042
1043 #ifdef DWC_GMAC_DEBUG
1044 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1045 "len %lu\n", sc->sc_txq.t_cur,
1046 (unsigned long)map->dm_segs[i].ds_addr,
1047 (unsigned long)map->dm_segs[i].ds_len);
1048 #endif
1049
1050 sc->sc_descm->tx_init_flags(desc);
1051 sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1052
1053 if (i == 0)
1054 sc->sc_descm->tx_set_first_frag(desc);
1055
1056 /*
1057 * Defer passing ownership of the first descriptor
1058 * until we are done.
1059 */
1060 if (i != 0)
1061 sc->sc_descm->tx_set_owned_by_dev(desc);
1062
1063 sc->sc_txq.t_queued++;
1064 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1065 }
1066
1067 sc->sc_descm->tx_set_last_frag(desc);
1068
1069 data->td_m = m0;
1070 data->td_active = map;
1071
1072 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1073 BUS_DMASYNC_PREWRITE);
1074
1075 /* Pass first to device */
1076 sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1077
1078 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1079 BUS_DMASYNC_PREWRITE);
1080
1081 return 0;
1082 }
1083
1084 /*
1085 * If the interface is up and running, only modify the receive
1086 * filter when setting promiscuous or debug mode. Otherwise fall
1087 * through to ether_ioctl, which will reset the chip.
1088 */
1089 static int
1090 dwc_gmac_ifflags_cb(struct ethercom *ec)
1091 {
1092 struct ifnet *ifp = &ec->ec_if;
1093 struct dwc_gmac_softc *sc = ifp->if_softc;
1094 int ret = 0;
1095
1096 mutex_enter(sc->sc_lock);
1097 int change = ifp->if_flags ^ sc->sc_if_flags;
1098 sc->sc_if_flags = ifp->if_flags;
1099
1100 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1101 ret = ENETRESET;
1102 goto out;
1103 }
1104 if ((change & IFF_PROMISC) != 0) {
1105 dwc_gmac_setmulti(sc);
1106 }
1107 out:
1108 mutex_exit(sc->sc_lock);
1109
1110 return ret;
1111 }
1112
1113 static int
1114 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1115 {
1116 struct dwc_gmac_softc *sc = ifp->if_softc;
1117 int error = 0;
1118
1119 int s = splnet();
1120 error = ether_ioctl(ifp, cmd, data);
1121
1122 #ifdef DWCGMAC_MPSAFE
1123 splx(s);
1124 #endif
1125
1126 if (error == ENETRESET) {
1127 error = 0;
1128 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1129 ;
1130 else if (ifp->if_flags & IFF_RUNNING) {
1131 /*
1132 * Multicast list has changed; set the hardware filter
1133 * accordingly.
1134 */
1135 mutex_enter(sc->sc_lock);
1136 dwc_gmac_setmulti(sc);
1137 mutex_exit(sc->sc_lock);
1138 }
1139 }
1140
1141 /* Try to get things going again */
1142 if (ifp->if_flags & IFF_UP)
1143 dwc_gmac_start(ifp);
1144 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1145
1146 #ifndef DWCGMAC_MPSAFE
1147 splx(s);
1148 #endif
1149
1150 return error;
1151 }
1152
1153 static void
1154 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1155 {
1156 struct ifnet *ifp = &sc->sc_ec.ec_if;
1157 struct dwc_gmac_tx_data *data;
1158 struct dwc_gmac_dev_dmadesc *desc;
1159 int i, nsegs;
1160
1161 mutex_enter(&sc->sc_txq.t_mtx);
1162
1163 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1164 #ifdef DWC_GMAC_DEBUG
1165 aprint_normal_dev(sc->sc_dev,
1166 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1167 i, sc->sc_txq.t_queued);
1168 #endif
1169
1170 /*
1171 * i+1 does not need to be a valid descriptor,
1172 * this is just a special notion to just sync
1173 * a single tx descriptor (i)
1174 */
1175 dwc_gmac_txdesc_sync(sc, i, i+1,
1176 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1177
1178 desc = &sc->sc_txq.t_desc[i];
1179 if (sc->sc_descm->tx_is_owned_by_dev(desc))
1180 break;
1181
1182 data = &sc->sc_txq.t_data[i];
1183 if (data->td_m == NULL)
1184 continue;
1185
1186 ifp->if_opackets++;
1187 nsegs = data->td_active->dm_nsegs;
1188 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1189 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1190 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1191
1192 #ifdef DWC_GMAC_DEBUG
1193 aprint_normal_dev(sc->sc_dev,
1194 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1195 "freeing mbuf %p\n", i, data->td_m);
1196 #endif
1197
1198 m_freem(data->td_m);
1199 data->td_m = NULL;
1200
1201 sc->sc_txq.t_queued -= nsegs;
1202 }
1203
1204 sc->sc_txq.t_next = i;
1205
1206 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1207 ifp->if_flags &= ~IFF_OACTIVE;
1208 }
1209 mutex_exit(&sc->sc_txq.t_mtx);
1210 }
1211
1212 static void
1213 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1214 {
1215 struct ifnet *ifp = &sc->sc_ec.ec_if;
1216 struct dwc_gmac_dev_dmadesc *desc;
1217 struct dwc_gmac_rx_data *data;
1218 bus_addr_t physaddr;
1219 struct mbuf *m, *mnew;
1220 int i, len, error;
1221
1222 mutex_enter(&sc->sc_rxq.r_mtx);
1223 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1224 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1225 RX_DESC_OFFSET(i), sizeof(*desc),
1226 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1227 desc = &sc->sc_rxq.r_desc[i];
1228 data = &sc->sc_rxq.r_data[i];
1229
1230 if (sc->sc_descm->rx_is_owned_by_dev(desc))
1231 break;
1232
1233 if (sc->sc_descm->rx_has_error(desc)) {
1234 #ifdef DWC_GMAC_DEBUG
1235 aprint_normal_dev(sc->sc_dev,
1236 "RX error: descriptor status %08x, skipping\n",
1237 le32toh(desc->ddesc_status0));
1238 #endif
1239 ifp->if_ierrors++;
1240 goto skip;
1241 }
1242
1243 len = sc->sc_descm->rx_get_len(desc);
1244
1245 #ifdef DWC_GMAC_DEBUG
1246 aprint_normal_dev(sc->sc_dev,
1247 "rx int: device is done with descriptor #%d, len: %d\n",
1248 i, len);
1249 #endif
1250
1251 /*
1252 * Try to get a new mbuf before passing this one
1253 * up, if that fails, drop the packet and reuse
1254 * the existing one.
1255 */
1256 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1257 if (mnew == NULL) {
1258 ifp->if_ierrors++;
1259 goto skip;
1260 }
1261 MCLGET(mnew, M_DONTWAIT);
1262 if ((mnew->m_flags & M_EXT) == 0) {
1263 m_freem(mnew);
1264 ifp->if_ierrors++;
1265 goto skip;
1266 }
1267
1268 /* unload old DMA map */
1269 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1270 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1271 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1272
1273 /* and reload with new mbuf */
1274 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1275 mtod(mnew, void*), MCLBYTES, NULL,
1276 BUS_DMA_READ | BUS_DMA_NOWAIT);
1277 if (error != 0) {
1278 m_freem(mnew);
1279 /* try to reload old mbuf */
1280 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1281 mtod(data->rd_m, void*), MCLBYTES, NULL,
1282 BUS_DMA_READ | BUS_DMA_NOWAIT);
1283 if (error != 0) {
1284 panic("%s: could not load old rx mbuf",
1285 device_xname(sc->sc_dev));
1286 }
1287 ifp->if_ierrors++;
1288 goto skip;
1289 }
1290 physaddr = data->rd_map->dm_segs[0].ds_addr;
1291
1292 /*
1293 * New mbuf loaded, update RX ring and continue
1294 */
1295 m = data->rd_m;
1296 data->rd_m = mnew;
1297 desc->ddesc_data = htole32(physaddr);
1298
1299 /* finalize mbuf */
1300 m->m_pkthdr.len = m->m_len = len;
1301 m_set_rcvif(m, ifp);
1302 m->m_flags |= M_HASFCS;
1303
1304 if_percpuq_enqueue(sc->sc_ipq, m);
1305
1306 skip:
1307 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1308 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1309
1310 sc->sc_descm->rx_init_flags(desc);
1311 sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
1312 sc->sc_descm->rx_set_owned_by_dev(desc);
1313
1314 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1315 RX_DESC_OFFSET(i), sizeof(*desc),
1316 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1317 }
1318
1319 /* update RX pointer */
1320 sc->sc_rxq.r_cur = i;
1321
1322 mutex_exit(&sc->sc_rxq.r_mtx);
1323 }
1324
1325 /*
1326 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1327 */
1328 static uint32_t
1329 bitrev32(uint32_t x)
1330 {
1331 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1332 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1333 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1334 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1335
1336 return (x >> 16) | (x << 16);
1337 }
1338
1339 static void
1340 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1341 {
1342 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1343 struct ether_multi *enm;
1344 struct ether_multistep step;
1345 struct ethercom *ec = &sc->sc_ec;
1346 uint32_t hashes[2] = { 0, 0 };
1347 uint32_t ffilt, h;
1348 int mcnt;
1349
1350 KASSERT(mutex_owned(sc->sc_lock));
1351
1352 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1353
1354 if (ifp->if_flags & IFF_PROMISC) {
1355 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1356 goto special_filter;
1357 }
1358
1359 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1360
1361 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1362 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1363
1364 ETHER_LOCK(ec);
1365 ec->ec_flags &= ~ETHER_F_ALLMULTI;
1366 ETHER_FIRST_MULTI(step, ec, enm);
1367 mcnt = 0;
1368 while (enm != NULL) {
1369 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1370 ETHER_ADDR_LEN) != 0) {
1371 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1372 ec->ec_flags |= ETHER_F_ALLMULTI;
1373 ETHER_UNLOCK(ec);
1374 goto special_filter;
1375 }
1376
1377 h = bitrev32(
1378 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1379 ) >> 26;
1380 hashes[h >> 5] |= (1 << (h & 0x1f));
1381
1382 mcnt++;
1383 ETHER_NEXT_MULTI(step, enm);
1384 }
1385 ETHER_UNLOCK(ec);
1386
1387 if (mcnt)
1388 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1389 else
1390 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1391
1392 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1393 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1394 hashes[0]);
1395 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1396 hashes[1]);
1397 sc->sc_if_flags = ifp->if_flags;
1398
1399 #ifdef DWC_GMAC_DEBUG
1400 dwc_gmac_dump_ffilt(sc, ffilt);
1401 #endif
1402 return;
1403
1404 special_filter:
1405 #ifdef DWC_GMAC_DEBUG
1406 dwc_gmac_dump_ffilt(sc, ffilt);
1407 #endif
1408 /* no MAC hashes, ALLMULTI or PROMISC */
1409 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1410 ffilt);
1411 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1412 0xffffffff);
1413 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1414 0xffffffff);
1415 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1416 }
1417
1418 int
1419 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1420 {
1421 uint32_t status, dma_status;
1422 int rv = 0;
1423
1424 if (sc->sc_stopping)
1425 return 0;
1426
1427 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1428 if (status & AWIN_GMAC_MII_IRQ) {
1429 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1430 AWIN_GMAC_MII_STATUS);
1431 rv = 1;
1432 mii_pollstat(&sc->sc_mii);
1433 }
1434
1435 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1436 AWIN_GMAC_DMA_STATUS);
1437
1438 if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1439 rv = 1;
1440
1441 if (dma_status & GMAC_DMA_INT_TIE)
1442 dwc_gmac_tx_intr(sc);
1443
1444 if (dma_status & GMAC_DMA_INT_RIE)
1445 dwc_gmac_rx_intr(sc);
1446
1447 /*
1448 * Check error conditions
1449 */
1450 if (dma_status & GMAC_DMA_INT_ERRORS) {
1451 sc->sc_ec.ec_if.if_oerrors++;
1452 #ifdef DWC_GMAC_DEBUG
1453 dwc_dump_and_abort(sc, "interrupt error condition");
1454 #endif
1455 }
1456
1457 rnd_add_uint32(&sc->rnd_source, dma_status);
1458
1459 /* ack interrupt */
1460 if (dma_status)
1461 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1462 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1463
1464 /*
1465 * Get more packets
1466 */
1467 if (rv)
1468 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1469
1470 return rv;
1471 }
1472
1473 static void
1474 dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1475 {
1476
1477 desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1478 }
1479
1480 static int
1481 dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1482 {
1483
1484 return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1485 }
1486
1487 static void
1488 dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1489 {
1490 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1491
1492 desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1493 __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1494 }
1495
1496 static uint32_t
1497 dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1498 {
1499
1500 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1501 }
1502
1503 static void
1504 dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1505 {
1506
1507 desc->ddesc_status0 = 0;
1508 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1509 }
1510
1511 static void
1512 dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1513 {
1514 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1515
1516 desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1517 }
1518
1519 static void
1520 dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1521 {
1522 uint32_t cntl = le32toh(desc->ddesc_cntl1);
1523
1524 desc->ddesc_cntl1 = htole32(cntl |
1525 DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1526 }
1527
1528 static void
1529 dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1530 {
1531
1532 desc->ddesc_status0 = 0;
1533 desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1534 }
1535
1536 static int
1537 dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1538 return !!(le32toh(desc->ddesc_status0) &
1539 (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1540 }
1541
1542 static void
1543 dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1544 {
1545 uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1546
1547 desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1548 __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1549 }
1550
1551 static uint32_t
1552 dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1553 {
1554
1555 return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1556 }
1557
1558 static void
1559 dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1560 {
1561
1562 desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1563 desc->ddesc_cntl1 = 0;
1564 }
1565
1566 static void
1567 dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1568 {
1569 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1570
1571 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1572 }
1573
1574 static void
1575 dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1576 {
1577 uint32_t tdes0 = le32toh(desc->ddesc_status0);
1578
1579 desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1580 }
1581
1582 static void
1583 dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1584 {
1585
1586 desc->ddesc_status0 = 0;
1587 desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1588 }
1589
1590 static int
1591 dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1592 {
1593
1594 return !!(le32toh(desc->ddesc_status0) &
1595 (DDESC_RDES0_ES | DDESC_RDES0_LE));
1596 }
1597
1598 #ifdef DWC_GMAC_DEBUG
1599 static void
1600 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1601 {
1602 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1603 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1604 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1605 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1606 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1607 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1608 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1609 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1610 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1611 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1612 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1613 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1614 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1615 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1616 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1617 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1618 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1619 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1620 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1621 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1622 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1623 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1624 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1625 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1626 }
1627
1628 static void
1629 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1630 {
1631 int i;
1632
1633 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1634 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1635 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1636 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1637 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1638 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1639 "data: %08x next: %08x\n",
1640 i, sc->sc_txq.t_physaddr +
1641 i*sizeof(struct dwc_gmac_dev_dmadesc),
1642 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1643 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1644 }
1645 }
1646
1647 static void
1648 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1649 {
1650 int i;
1651
1652 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1653 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1654 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1655 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1656 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1657 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1658 "data: %08x next: %08x\n",
1659 i, sc->sc_rxq.r_physaddr +
1660 i*sizeof(struct dwc_gmac_dev_dmadesc),
1661 le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1662 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1663 }
1664 }
1665
1666 static void
1667 dwc_dump_status(struct dwc_gmac_softc *sc)
1668 {
1669 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1670 AWIN_GMAC_MAC_INTR);
1671 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1672 AWIN_GMAC_DMA_STATUS);
1673 char buf[200];
1674
1675 /* print interrupt state */
1676 snprintb(buf, sizeof(buf), "\177\20"
1677 "b\x10""NI\0"
1678 "b\x0f""AI\0"
1679 "b\x0e""ER\0"
1680 "b\x0d""FB\0"
1681 "b\x0a""ET\0"
1682 "b\x09""RW\0"
1683 "b\x08""RS\0"
1684 "b\x07""RU\0"
1685 "b\x06""RI\0"
1686 "b\x05""UN\0"
1687 "b\x04""OV\0"
1688 "b\x03""TJ\0"
1689 "b\x02""TU\0"
1690 "b\x01""TS\0"
1691 "b\x00""TI\0"
1692 "\0", dma_status);
1693 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1694 status, buf);
1695 }
1696
1697 static void
1698 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1699 {
1700 dwc_dump_status(sc);
1701 dwc_gmac_dump_ffilt(sc,
1702 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1703 dwc_gmac_dump_dma(sc);
1704 dwc_gmac_dump_tx_desc(sc);
1705 dwc_gmac_dump_rx_desc(sc);
1706
1707 panic("%s", msg);
1708 }
1709
1710 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1711 {
1712 char buf[200];
1713
1714 /* print filter setup */
1715 snprintb(buf, sizeof(buf), "\177\20"
1716 "b\x1f""RA\0"
1717 "b\x0a""HPF\0"
1718 "b\x09""SAF\0"
1719 "b\x08""SAIF\0"
1720 "b\x05""DBF\0"
1721 "b\x04""PM\0"
1722 "b\x03""DAIF\0"
1723 "b\x02""HMC\0"
1724 "b\x01""HUC\0"
1725 "b\x00""PR\0"
1726 "\0", ffilt);
1727 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1728 }
1729 #endif
1730