dwc_gmac.c revision 1.40.6.5 1 /* $NetBSD: dwc_gmac.c,v 1.40.6.5 2022/08/12 15:13:45 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.40.6.5 2022/08/12 15:13:45 martin Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t x);
103
104 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
105 *sizeof(struct dwc_gmac_dev_dmadesc))
106 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
107
108 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
110
111
112
113 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116
117 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 GMAC_DMA_INT_FBE| \
119 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 GMAC_DMA_INT_TJE)
122
123 #define AWIN_DEF_MAC_INTRMASK \
124 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
125 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126
127
128 #ifdef DWC_GMAC_DEBUG
129 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
130 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
131 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
132 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
133 static void dwc_dump_status(struct dwc_gmac_softc *sc);
134 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
135 #endif
136
137 #ifdef NET_MPSAFE
138 #define DWCGMAC_MPSAFE 1
139 #endif
140
141 void
142 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
143 {
144 uint8_t enaddr[ETHER_ADDR_LEN];
145 uint32_t maclo, machi;
146 struct mii_data * const mii = &sc->sc_mii;
147 struct ifnet * const ifp = &sc->sc_ec.ec_if;
148 prop_dictionary_t dict;
149 int rv;
150
151 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
152 sc->sc_mii_clk = mii_clk & 7;
153
154 dict = device_properties(sc->sc_dev);
155 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
156 if (ea != NULL) {
157 /*
158 * If the MAC address is overriden by a device property,
159 * use that.
160 */
161 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
162 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
163 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
164 } else {
165 /*
166 * If we did not get an externaly configure address,
167 * try to read one from the current filter setup,
168 * before resetting the chip.
169 */
170 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
171 AWIN_GMAC_MAC_ADDR0LO);
172 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
173 AWIN_GMAC_MAC_ADDR0HI);
174
175 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
176 /* fake MAC address */
177 maclo = 0x00f2 | (cprng_strong32() << 16);
178 machi = cprng_strong32();
179 }
180
181 enaddr[0] = maclo & 0x0ff;
182 enaddr[1] = (maclo >> 8) & 0x0ff;
183 enaddr[2] = (maclo >> 16) & 0x0ff;
184 enaddr[3] = (maclo >> 24) & 0x0ff;
185 enaddr[4] = machi & 0x0ff;
186 enaddr[5] = (machi >> 8) & 0x0ff;
187 }
188
189 /*
190 * Init chip and do initial setup
191 */
192 if (dwc_gmac_reset(sc) != 0)
193 return; /* not much to cleanup, haven't attached yet */
194 dwc_gmac_write_hwaddr(sc, enaddr);
195 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
196 ether_sprintf(enaddr));
197
198 /*
199 * Allocate Tx and Rx rings
200 */
201 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
202 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
203 goto fail;
204 }
205
206 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
207 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
208 goto fail;
209 }
210
211 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
212 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
213 goto fail;
214 }
215
216 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
217 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
218 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
219
220 /*
221 * Prepare interface data
222 */
223 ifp->if_softc = sc;
224 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226 #ifdef DWCGMAC_MPSAFE
227 ifp->if_extflags = IFEF_MPSAFE;
228 #endif
229 ifp->if_ioctl = dwc_gmac_ioctl;
230 ifp->if_start = dwc_gmac_start;
231 ifp->if_init = dwc_gmac_init;
232 ifp->if_stop = dwc_gmac_stop;
233 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
234 IFQ_SET_READY(&ifp->if_snd);
235
236 /*
237 * Attach MII subdevices
238 */
239 sc->sc_ec.ec_mii = &sc->sc_mii;
240 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
241 mii->mii_ifp = ifp;
242 mii->mii_readreg = dwc_gmac_miibus_read_reg;
243 mii->mii_writereg = dwc_gmac_miibus_write_reg;
244 mii->mii_statchg = dwc_gmac_miibus_statchg;
245 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
246 MIIF_DOPAUSE);
247
248 if (LIST_EMPTY(&mii->mii_phys)) {
249 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
250 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
251 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
252 } else {
253 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
254 }
255
256 /*
257 * We can support 802.1Q VLAN-sized frames.
258 */
259 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
260
261 /*
262 * Ready, attach interface
263 */
264 /* Attach the interface. */
265 rv = if_initialize(ifp);
266 if (rv != 0)
267 goto fail_2;
268 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
269 if_deferred_start_init(ifp, NULL);
270 ether_ifattach(ifp, enaddr);
271 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
272 if_register(ifp);
273
274 /*
275 * Enable interrupts
276 */
277 mutex_enter(sc->sc_lock);
278 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
279 AWIN_DEF_MAC_INTRMASK);
280 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
281 GMAC_DEF_DMA_INT_MASK);
282 mutex_exit(sc->sc_lock);
283
284 return;
285 fail_2:
286 ifmedia_removeall(&mii->mii_media);
287 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
288 mutex_destroy(&sc->sc_txq.t_mtx);
289 mutex_destroy(&sc->sc_rxq.r_mtx);
290 mutex_obj_free(sc->sc_lock);
291 fail:
292 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
293 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
294 dwc_gmac_free_dma_rings(sc);
295 mutex_destroy(&sc->sc_mdio_lock);
296 }
297
298
299
300 static int
301 dwc_gmac_reset(struct dwc_gmac_softc *sc)
302 {
303 size_t cnt;
304 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
305 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
306 for (cnt = 0; cnt < 3000; cnt++) {
307 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
308 & GMAC_BUSMODE_RESET) == 0)
309 return 0;
310 delay(10);
311 }
312
313 aprint_error_dev(sc->sc_dev, "reset timed out\n");
314 return EIO;
315 }
316
317 static void
318 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
319 uint8_t enaddr[ETHER_ADDR_LEN])
320 {
321 uint32_t lo, hi;
322
323 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
324 | (enaddr[3] << 24);
325 hi = enaddr[4] | (enaddr[5] << 8);
326 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
327 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
328 }
329
330 static int
331 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
332 {
333 struct dwc_gmac_softc * const sc = device_private(self);
334 uint16_t mii;
335 size_t cnt;
336 int rv = 0;
337
338 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
339 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
340 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
341 | GMAC_MII_BUSY;
342
343 mutex_enter(&sc->sc_mdio_lock);
344 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
345
346 for (cnt = 0; cnt < 1000; cnt++) {
347 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
348 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
349 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
350 AWIN_GMAC_MAC_MIIDATA);
351 break;
352 }
353 delay(10);
354 }
355
356 mutex_exit(&sc->sc_mdio_lock);
357
358 return rv;
359 }
360
361 static void
362 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
363 {
364 struct dwc_gmac_softc * const sc = device_private(self);
365 uint16_t mii;
366 size_t cnt;
367
368 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
369 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
370 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
371 | GMAC_MII_BUSY | GMAC_MII_WRITE;
372
373 mutex_enter(&sc->sc_mdio_lock);
374 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
375 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
376
377 for (cnt = 0; cnt < 1000; cnt++) {
378 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
379 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
380 break;
381 delay(10);
382 }
383
384 mutex_exit(&sc->sc_mdio_lock);
385 }
386
387 static int
388 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
389 struct dwc_gmac_rx_ring *ring)
390 {
391 struct dwc_gmac_rx_data *data;
392 bus_addr_t physaddr;
393 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
394 int error, i, next;
395
396 ring->r_cur = ring->r_next = 0;
397 memset(ring->r_desc, 0, descsize);
398
399 /*
400 * Pre-allocate Rx buffers and populate Rx ring.
401 */
402 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
403 struct dwc_gmac_dev_dmadesc *desc;
404
405 data = &sc->sc_rxq.r_data[i];
406
407 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
408 if (data->rd_m == NULL) {
409 aprint_error_dev(sc->sc_dev,
410 "could not allocate rx mbuf #%d\n", i);
411 error = ENOMEM;
412 goto fail;
413 }
414 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
415 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
416 if (error != 0) {
417 aprint_error_dev(sc->sc_dev,
418 "could not create DMA map\n");
419 data->rd_map = NULL;
420 goto fail;
421 }
422 MCLGET(data->rd_m, M_DONTWAIT);
423 if (!(data->rd_m->m_flags & M_EXT)) {
424 aprint_error_dev(sc->sc_dev,
425 "could not allocate mbuf cluster #%d\n", i);
426 error = ENOMEM;
427 goto fail;
428 }
429
430 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
431 mtod(data->rd_m, void *), MCLBYTES, NULL,
432 BUS_DMA_READ | BUS_DMA_NOWAIT);
433 if (error != 0) {
434 aprint_error_dev(sc->sc_dev,
435 "could not load rx buf DMA map #%d", i);
436 goto fail;
437 }
438 physaddr = data->rd_map->dm_segs[0].ds_addr;
439
440 desc = &sc->sc_rxq.r_desc[i];
441 desc->ddesc_data = htole32(physaddr);
442 next = RX_NEXT(i);
443 desc->ddesc_next = htole32(ring->r_physaddr
444 + next * sizeof(*desc));
445 desc->ddesc_cntl = htole32(
446 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
447 DDESC_CNTL_RXCHAIN);
448 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
449 }
450
451 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
452 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
453 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
454 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
455 ring->r_physaddr);
456
457 return 0;
458
459 fail:
460 dwc_gmac_free_rx_ring(sc, ring);
461 return error;
462 }
463
464 static void
465 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
466 struct dwc_gmac_rx_ring *ring)
467 {
468 struct dwc_gmac_dev_dmadesc *desc;
469 int i;
470
471 mutex_enter(&ring->r_mtx);
472 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
473 desc = &sc->sc_rxq.r_desc[i];
474 desc->ddesc_cntl = htole32(
475 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
476 DDESC_CNTL_RXCHAIN);
477 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
478 }
479
480 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
481 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
482 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
483
484 ring->r_cur = ring->r_next = 0;
485 /* reset DMA address to start of ring */
486 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
487 sc->sc_rxq.r_physaddr);
488 mutex_exit(&ring->r_mtx);
489 }
490
491 static int
492 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
493 {
494 const size_t descsize = AWGE_TOTAL_RING_COUNT *
495 sizeof(struct dwc_gmac_dev_dmadesc);
496 int error, nsegs;
497 void *rings;
498
499 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
500 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
501 if (error != 0) {
502 aprint_error_dev(sc->sc_dev,
503 "could not create desc DMA map\n");
504 sc->sc_dma_ring_map = NULL;
505 goto fail;
506 }
507
508 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
509 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
510 if (error != 0) {
511 aprint_error_dev(sc->sc_dev,
512 "could not map DMA memory\n");
513 goto fail;
514 }
515
516 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
517 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
518 if (error != 0) {
519 aprint_error_dev(sc->sc_dev,
520 "could not allocate DMA memory\n");
521 goto fail;
522 }
523
524 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
525 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
526 if (error != 0) {
527 aprint_error_dev(sc->sc_dev,
528 "could not load desc DMA map\n");
529 goto fail;
530 }
531
532 /* give first AWGE_RX_RING_COUNT to the RX side */
533 sc->sc_rxq.r_desc = rings;
534 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
535
536 /* and next rings to the TX side */
537 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
538 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
539 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
540
541 return 0;
542
543 fail:
544 dwc_gmac_free_dma_rings(sc);
545 return error;
546 }
547
548 static void
549 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
550 {
551 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
552 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
553 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
554 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
555 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
556 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
557 }
558
559 static void
560 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
561 {
562 struct dwc_gmac_rx_data *data;
563 int i;
564
565 if (ring->r_desc == NULL)
566 return;
567
568
569 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
570 data = &ring->r_data[i];
571
572 if (data->rd_map != NULL) {
573 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
574 AWGE_RX_RING_COUNT
575 *sizeof(struct dwc_gmac_dev_dmadesc),
576 BUS_DMASYNC_POSTREAD);
577 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
578 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
579 }
580 if (data->rd_m != NULL)
581 m_freem(data->rd_m);
582 }
583 }
584
585 static int
586 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
587 struct dwc_gmac_tx_ring *ring)
588 {
589 int i, error = 0;
590
591 ring->t_queued = 0;
592 ring->t_cur = ring->t_next = 0;
593
594 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
595 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
596 TX_DESC_OFFSET(0),
597 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
598 BUS_DMASYNC_POSTWRITE);
599
600 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
601 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
602 AWGE_TX_RING_COUNT, MCLBYTES, 0,
603 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
604 &ring->t_data[i].td_map);
605 if (error != 0) {
606 aprint_error_dev(sc->sc_dev,
607 "could not create TX DMA map #%d\n", i);
608 ring->t_data[i].td_map = NULL;
609 goto fail;
610 }
611 ring->t_desc[i].ddesc_next = htole32(
612 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
613 *TX_NEXT(i));
614 }
615
616 return 0;
617
618 fail:
619 dwc_gmac_free_tx_ring(sc, ring);
620 return error;
621 }
622
623 static void
624 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
625 {
626 /* 'end' is pointing one descriptor beyound the last we want to sync */
627 if (end > start) {
628 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
629 TX_DESC_OFFSET(start),
630 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
631 ops);
632 return;
633 }
634 /* sync from 'start' to end of ring */
635 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
636 TX_DESC_OFFSET(start),
637 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
638 ops);
639 if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
640 /* sync from start of ring to 'end' */
641 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
642 TX_DESC_OFFSET(0),
643 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
644 ops);
645 }
646 }
647
648 static void
649 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
650 struct dwc_gmac_tx_ring *ring)
651 {
652 int i;
653
654 mutex_enter(&ring->t_mtx);
655 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
656 struct dwc_gmac_tx_data *data = &ring->t_data[i];
657
658 if (data->td_m != NULL) {
659 bus_dmamap_sync(sc->sc_dmat, data->td_active,
660 0, data->td_active->dm_mapsize,
661 BUS_DMASYNC_POSTWRITE);
662 bus_dmamap_unload(sc->sc_dmat, data->td_active);
663 m_freem(data->td_m);
664 data->td_m = NULL;
665 }
666 }
667
668 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
669 TX_DESC_OFFSET(0),
670 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
671 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
672 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
673 sc->sc_txq.t_physaddr);
674
675 ring->t_queued = 0;
676 ring->t_cur = ring->t_next = 0;
677 mutex_exit(&ring->t_mtx);
678 }
679
680 static void
681 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
682 struct dwc_gmac_tx_ring *ring)
683 {
684 int i;
685
686 /* unload the maps */
687 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
688 struct dwc_gmac_tx_data *data = &ring->t_data[i];
689
690 if (data->td_m != NULL) {
691 bus_dmamap_sync(sc->sc_dmat, data->td_active,
692 0, data->td_map->dm_mapsize,
693 BUS_DMASYNC_POSTWRITE);
694 bus_dmamap_unload(sc->sc_dmat, data->td_active);
695 m_freem(data->td_m);
696 data->td_m = NULL;
697 }
698 }
699
700 /* and actually free them */
701 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
702 struct dwc_gmac_tx_data *data = &ring->t_data[i];
703
704 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
705 }
706 }
707
708 static void
709 dwc_gmac_miibus_statchg(struct ifnet *ifp)
710 {
711 struct dwc_gmac_softc * const sc = ifp->if_softc;
712 struct mii_data * const mii = &sc->sc_mii;
713 uint32_t conf, flow;
714
715 /*
716 * Set MII or GMII interface based on the speed
717 * negotiated by the PHY.
718 */
719 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
720 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
721 |AWIN_GMAC_MAC_CONF_FULLDPLX);
722 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
723 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
724 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
725 | AWIN_GMAC_MAC_CONF_RXENABLE
726 | AWIN_GMAC_MAC_CONF_TXENABLE;
727 switch (IFM_SUBTYPE(mii->mii_media_active)) {
728 case IFM_10_T:
729 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
730 break;
731 case IFM_100_TX:
732 conf |= AWIN_GMAC_MAC_CONF_FES100 |
733 AWIN_GMAC_MAC_CONF_MIISEL;
734 break;
735 case IFM_1000_T:
736 break;
737 }
738
739 flow = 0;
740 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
741 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
742 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
743 }
744 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
745 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
746 }
747 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
748 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
749 }
750 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
751 AWIN_GMAC_MAC_FLOWCTRL, flow);
752
753 #ifdef DWC_GMAC_DEBUG
754 aprint_normal_dev(sc->sc_dev,
755 "setting MAC conf register: %08x\n", conf);
756 #endif
757
758 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
759 AWIN_GMAC_MAC_CONF, conf);
760 }
761
762 static int
763 dwc_gmac_init(struct ifnet *ifp)
764 {
765 struct dwc_gmac_softc *sc = ifp->if_softc;
766
767 mutex_enter(sc->sc_lock);
768 int ret = dwc_gmac_init_locked(ifp);
769 mutex_exit(sc->sc_lock);
770
771 return ret;
772 }
773
774 static int
775 dwc_gmac_init_locked(struct ifnet *ifp)
776 {
777 struct dwc_gmac_softc *sc = ifp->if_softc;
778 uint32_t ffilt;
779
780 if (ifp->if_flags & IFF_RUNNING)
781 return 0;
782
783 dwc_gmac_stop_locked(ifp, 0);
784
785 /*
786 * Configure DMA burst/transfer mode and RX/TX priorities.
787 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
788 */
789 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
790 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
791 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
792 __SHIFTIN(2, GMAC_BUSMODE_PBL));
793
794 /*
795 * Set up address filter
796 */
797 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
798 if (ifp->if_flags & IFF_PROMISC) {
799 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
800 } else {
801 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
802 }
803 if (ifp->if_flags & IFF_BROADCAST) {
804 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
805 } else {
806 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
807 }
808 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
809
810 /*
811 * Set up multicast filter
812 */
813 dwc_gmac_setmulti(sc);
814
815 /*
816 * Set up dma pointer for RX and TX ring
817 */
818 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
819 sc->sc_rxq.r_physaddr);
820 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
821 sc->sc_txq.t_physaddr);
822
823 /*
824 * Start RX/TX part
825 */
826 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
827 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
828 GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
829
830 sc->sc_stopping = false;
831
832 ifp->if_flags |= IFF_RUNNING;
833 ifp->if_flags &= ~IFF_OACTIVE;
834
835 return 0;
836 }
837
838 static void
839 dwc_gmac_start(struct ifnet *ifp)
840 {
841 struct dwc_gmac_softc *sc = ifp->if_softc;
842 #ifdef DWCGMAC_MPSAFE
843 KASSERT(if_is_mpsafe(ifp));
844 #endif
845
846 mutex_enter(sc->sc_lock);
847 if (!sc->sc_stopping) {
848 mutex_enter(&sc->sc_txq.t_mtx);
849 dwc_gmac_start_locked(ifp);
850 mutex_exit(&sc->sc_txq.t_mtx);
851 }
852 mutex_exit(sc->sc_lock);
853 }
854
855 static void
856 dwc_gmac_start_locked(struct ifnet *ifp)
857 {
858 struct dwc_gmac_softc *sc = ifp->if_softc;
859 int old = sc->sc_txq.t_queued;
860 int start = sc->sc_txq.t_cur;
861 struct mbuf *m0;
862
863 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
864 return;
865
866 for (;;) {
867 IFQ_POLL(&ifp->if_snd, m0);
868 if (m0 == NULL)
869 break;
870 if (dwc_gmac_queue(sc, m0) != 0) {
871 ifp->if_flags |= IFF_OACTIVE;
872 break;
873 }
874 IFQ_DEQUEUE(&ifp->if_snd, m0);
875 bpf_mtap(ifp, m0);
876 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
877 ifp->if_flags |= IFF_OACTIVE;
878 break;
879 }
880 }
881
882 if (sc->sc_txq.t_queued != old) {
883 /* packets have been queued, kick it off */
884 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
885 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
886
887 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
888 AWIN_GMAC_DMA_TXPOLL, ~0U);
889 #ifdef DWC_GMAC_DEBUG
890 dwc_dump_status(sc);
891 #endif
892 }
893 }
894
895 static void
896 dwc_gmac_stop(struct ifnet *ifp, int disable)
897 {
898 struct dwc_gmac_softc *sc = ifp->if_softc;
899
900 mutex_enter(sc->sc_lock);
901 dwc_gmac_stop_locked(ifp, disable);
902 mutex_exit(sc->sc_lock);
903 }
904
905 static void
906 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
907 {
908 struct dwc_gmac_softc *sc = ifp->if_softc;
909
910 sc->sc_stopping = true;
911
912 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
913 AWIN_GMAC_DMA_OPMODE,
914 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
915 AWIN_GMAC_DMA_OPMODE)
916 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
917 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
918 AWIN_GMAC_DMA_OPMODE,
919 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
920 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
921
922 mii_down(&sc->sc_mii);
923 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
924 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
925 }
926
927 /*
928 * Add m0 to the TX ring
929 */
930 static int
931 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
932 {
933 struct dwc_gmac_dev_dmadesc *desc = NULL;
934 struct dwc_gmac_tx_data *data = NULL;
935 bus_dmamap_t map;
936 uint32_t flags, len, status;
937 int error, i, first;
938
939 #ifdef DWC_GMAC_DEBUG
940 aprint_normal_dev(sc->sc_dev,
941 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
942 #endif
943
944 first = sc->sc_txq.t_cur;
945 map = sc->sc_txq.t_data[first].td_map;
946
947 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
948 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
949 if (error != 0) {
950 aprint_error_dev(sc->sc_dev, "could not map mbuf "
951 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
952 return error;
953 }
954
955 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
956 bus_dmamap_unload(sc->sc_dmat, map);
957 return ENOBUFS;
958 }
959
960 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
961 status = 0;
962 for (i = 0; i < map->dm_nsegs; i++) {
963 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
964 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
965
966 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
967 len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
968
969 #ifdef DWC_GMAC_DEBUG
970 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
971 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
972 (unsigned long)map->dm_segs[i].ds_addr,
973 (unsigned long)map->dm_segs[i].ds_len,
974 flags, len);
975 #endif
976
977 desc->ddesc_cntl = htole32(len|flags);
978 flags &= ~DDESC_CNTL_TXFIRST;
979
980 /*
981 * Defer passing ownership of the first descriptor
982 * until we are done.
983 */
984 desc->ddesc_status = htole32(status);
985 status |= DDESC_STATUS_OWNEDBYDEV;
986
987 sc->sc_txq.t_queued++;
988 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
989 }
990
991 desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
992
993 data->td_m = m0;
994 data->td_active = map;
995
996 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
997 BUS_DMASYNC_PREWRITE);
998
999 /* Pass first to device */
1000 sc->sc_txq.t_desc[first].ddesc_status =
1001 htole32(DDESC_STATUS_OWNEDBYDEV);
1002
1003 return 0;
1004 }
1005
1006 /*
1007 * If the interface is up and running, only modify the receive
1008 * filter when setting promiscuous or debug mode. Otherwise fall
1009 * through to ether_ioctl, which will reset the chip.
1010 */
1011 static int
1012 dwc_gmac_ifflags_cb(struct ethercom *ec)
1013 {
1014 struct ifnet *ifp = &ec->ec_if;
1015 struct dwc_gmac_softc *sc = ifp->if_softc;
1016 int ret = 0;
1017
1018 mutex_enter(sc->sc_lock);
1019 int change = ifp->if_flags ^ sc->sc_if_flags;
1020 sc->sc_if_flags = ifp->if_flags;
1021
1022 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1023 ret = ENETRESET;
1024 goto out;
1025 }
1026 if ((change & IFF_PROMISC) != 0) {
1027 dwc_gmac_setmulti(sc);
1028 }
1029 out:
1030 mutex_exit(sc->sc_lock);
1031
1032 return ret;
1033 }
1034
1035 static int
1036 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1037 {
1038 struct dwc_gmac_softc *sc = ifp->if_softc;
1039 int error = 0;
1040
1041 int s = splnet();
1042 error = ether_ioctl(ifp, cmd, data);
1043
1044 #ifdef DWCGMAC_MPSAFE
1045 splx(s);
1046 #endif
1047
1048 if (error == ENETRESET) {
1049 error = 0;
1050 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1051 ;
1052 else if (ifp->if_flags & IFF_RUNNING) {
1053 /*
1054 * Multicast list has changed; set the hardware filter
1055 * accordingly.
1056 */
1057 mutex_enter(sc->sc_lock);
1058 dwc_gmac_setmulti(sc);
1059 mutex_exit(sc->sc_lock);
1060 }
1061 }
1062
1063 /* Try to get things going again */
1064 if (ifp->if_flags & IFF_UP)
1065 dwc_gmac_start(ifp);
1066 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1067
1068 #ifndef DWCGMAC_MPSAFE
1069 splx(s);
1070 #endif
1071
1072 return error;
1073 }
1074
1075 static void
1076 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1077 {
1078 struct ifnet *ifp = &sc->sc_ec.ec_if;
1079 struct dwc_gmac_tx_data *data;
1080 struct dwc_gmac_dev_dmadesc *desc;
1081 uint32_t status;
1082 int i, nsegs;
1083
1084 mutex_enter(&sc->sc_txq.t_mtx);
1085
1086 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1087 #ifdef DWC_GMAC_DEBUG
1088 aprint_normal_dev(sc->sc_dev,
1089 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1090 i, sc->sc_txq.t_queued);
1091 #endif
1092
1093 /*
1094 * i+1 does not need to be a valid descriptor,
1095 * this is just a special notion to just sync
1096 * a single tx descriptor (i)
1097 */
1098 dwc_gmac_txdesc_sync(sc, i, i+1,
1099 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1100
1101 desc = &sc->sc_txq.t_desc[i];
1102 status = le32toh(desc->ddesc_status);
1103 if (status & DDESC_STATUS_OWNEDBYDEV)
1104 break;
1105
1106 data = &sc->sc_txq.t_data[i];
1107 if (data->td_m == NULL)
1108 continue;
1109
1110 ifp->if_opackets++;
1111 nsegs = data->td_active->dm_nsegs;
1112 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1113 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1114 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1115
1116 #ifdef DWC_GMAC_DEBUG
1117 aprint_normal_dev(sc->sc_dev,
1118 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1119 "freeing mbuf %p\n", i, data->td_m);
1120 #endif
1121
1122 m_freem(data->td_m);
1123 data->td_m = NULL;
1124
1125 sc->sc_txq.t_queued -= nsegs;
1126 }
1127
1128 sc->sc_txq.t_next = i;
1129
1130 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1131 ifp->if_flags &= ~IFF_OACTIVE;
1132 }
1133 mutex_exit(&sc->sc_txq.t_mtx);
1134 }
1135
1136 static void
1137 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1138 {
1139 struct ifnet *ifp = &sc->sc_ec.ec_if;
1140 struct dwc_gmac_dev_dmadesc *desc;
1141 struct dwc_gmac_rx_data *data;
1142 bus_addr_t physaddr;
1143 uint32_t status;
1144 struct mbuf *m, *mnew;
1145 int i, len, error;
1146
1147 mutex_enter(&sc->sc_rxq.r_mtx);
1148 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1149 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1150 RX_DESC_OFFSET(i), sizeof(*desc),
1151 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1152 desc = &sc->sc_rxq.r_desc[i];
1153 data = &sc->sc_rxq.r_data[i];
1154
1155 status = le32toh(desc->ddesc_status);
1156 if (status & DDESC_STATUS_OWNEDBYDEV)
1157 break;
1158
1159 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1160 #ifdef DWC_GMAC_DEBUG
1161 aprint_normal_dev(sc->sc_dev,
1162 "RX error: descriptor status %08x, skipping\n",
1163 status);
1164 #endif
1165 ifp->if_ierrors++;
1166 goto skip;
1167 }
1168
1169 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1170
1171 #ifdef DWC_GMAC_DEBUG
1172 aprint_normal_dev(sc->sc_dev,
1173 "rx int: device is done with descriptor #%d, len: %d\n",
1174 i, len);
1175 #endif
1176
1177 /*
1178 * Try to get a new mbuf before passing this one
1179 * up, if that fails, drop the packet and reuse
1180 * the existing one.
1181 */
1182 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1183 if (mnew == NULL) {
1184 ifp->if_ierrors++;
1185 goto skip;
1186 }
1187 MCLGET(mnew, M_DONTWAIT);
1188 if ((mnew->m_flags & M_EXT) == 0) {
1189 m_freem(mnew);
1190 ifp->if_ierrors++;
1191 goto skip;
1192 }
1193
1194 /* unload old DMA map */
1195 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1196 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1197 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1198
1199 /* and reload with new mbuf */
1200 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1201 mtod(mnew, void*), MCLBYTES, NULL,
1202 BUS_DMA_READ | BUS_DMA_NOWAIT);
1203 if (error != 0) {
1204 m_freem(mnew);
1205 /* try to reload old mbuf */
1206 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1207 mtod(data->rd_m, void*), MCLBYTES, NULL,
1208 BUS_DMA_READ | BUS_DMA_NOWAIT);
1209 if (error != 0) {
1210 panic("%s: could not load old rx mbuf",
1211 device_xname(sc->sc_dev));
1212 }
1213 ifp->if_ierrors++;
1214 goto skip;
1215 }
1216 physaddr = data->rd_map->dm_segs[0].ds_addr;
1217
1218 /*
1219 * New mbuf loaded, update RX ring and continue
1220 */
1221 m = data->rd_m;
1222 data->rd_m = mnew;
1223 desc->ddesc_data = htole32(physaddr);
1224
1225 /* finalize mbuf */
1226 m->m_pkthdr.len = m->m_len = len;
1227 m_set_rcvif(m, ifp);
1228 m->m_flags |= M_HASFCS;
1229
1230 if_percpuq_enqueue(sc->sc_ipq, m);
1231
1232 skip:
1233 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1234 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1235 desc->ddesc_cntl = htole32(
1236 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1237 DDESC_CNTL_RXCHAIN);
1238 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1239 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1240 RX_DESC_OFFSET(i), sizeof(*desc),
1241 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1242 }
1243
1244 /* update RX pointer */
1245 sc->sc_rxq.r_cur = i;
1246
1247 mutex_exit(&sc->sc_rxq.r_mtx);
1248 }
1249
1250 /*
1251 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1252 */
1253 static uint32_t
1254 bitrev32(uint32_t x)
1255 {
1256 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1257 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1258 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1259 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1260
1261 return (x >> 16) | (x << 16);
1262 }
1263
1264 static void
1265 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1266 {
1267 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1268 struct ether_multi *enm;
1269 struct ether_multistep step;
1270 uint32_t hashes[2] = { 0, 0 };
1271 uint32_t ffilt, h;
1272 int mcnt;
1273
1274 KASSERT(mutex_owned(sc->sc_lock));
1275
1276 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1277
1278 if (ifp->if_flags & IFF_PROMISC) {
1279 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1280 goto special_filter;
1281 }
1282
1283 ifp->if_flags &= ~IFF_ALLMULTI;
1284 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1285
1286 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1287 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1288
1289 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1290 mcnt = 0;
1291 while (enm != NULL) {
1292 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1293 ETHER_ADDR_LEN) != 0) {
1294 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1295 ifp->if_flags |= IFF_ALLMULTI;
1296 goto special_filter;
1297 }
1298
1299 h = bitrev32(
1300 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1301 ) >> 26;
1302 hashes[h >> 5] |= (1 << (h & 0x1f));
1303
1304 mcnt++;
1305 ETHER_NEXT_MULTI(step, enm);
1306 }
1307
1308 if (mcnt)
1309 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1310 else
1311 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1312
1313 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1314 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1315 hashes[0]);
1316 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1317 hashes[1]);
1318 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1319
1320 #ifdef DWC_GMAC_DEBUG
1321 dwc_gmac_dump_ffilt(sc, ffilt);
1322 #endif
1323 return;
1324
1325 special_filter:
1326 #ifdef DWC_GMAC_DEBUG
1327 dwc_gmac_dump_ffilt(sc, ffilt);
1328 #endif
1329 /* no MAC hashes, ALLMULTI or PROMISC */
1330 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1331 ffilt);
1332 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1333 0xffffffff);
1334 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1335 0xffffffff);
1336 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1337 }
1338
1339 int
1340 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1341 {
1342 uint32_t status, dma_status;
1343 int rv = 0;
1344
1345 if (sc->sc_stopping)
1346 return 0;
1347
1348 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1349 if (status & AWIN_GMAC_MII_IRQ) {
1350 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1351 AWIN_GMAC_MII_STATUS);
1352 rv = 1;
1353 mii_pollstat(&sc->sc_mii);
1354 }
1355
1356 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1357 AWIN_GMAC_DMA_STATUS);
1358
1359 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1360 rv = 1;
1361
1362 if (dma_status & GMAC_DMA_INT_TIE)
1363 dwc_gmac_tx_intr(sc);
1364
1365 if (dma_status & GMAC_DMA_INT_RIE)
1366 dwc_gmac_rx_intr(sc);
1367
1368 /*
1369 * Check error conditions
1370 */
1371 if (dma_status & GMAC_DMA_INT_ERRORS) {
1372 sc->sc_ec.ec_if.if_oerrors++;
1373 #ifdef DWC_GMAC_DEBUG
1374 dwc_dump_and_abort(sc, "interrupt error condition");
1375 #endif
1376 }
1377
1378 /* ack interrupt */
1379 if (dma_status)
1380 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1381 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1382
1383 /*
1384 * Get more packets
1385 */
1386 if (rv)
1387 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1388
1389 return rv;
1390 }
1391
1392 #ifdef DWC_GMAC_DEBUG
1393 static void
1394 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1395 {
1396 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1397 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1398 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1399 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1400 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1401 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1402 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1403 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1404 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1405 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1406 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1407 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1408 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1409 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1410 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1411 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1412 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1413 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1414 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1415 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1416 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1417 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1418 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1419 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1420 }
1421
1422 static void
1423 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1424 {
1425 int i;
1426
1427 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1428 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1429 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1430 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1431 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1432 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1433 "data: %08x next: %08x\n",
1434 i, sc->sc_txq.t_physaddr +
1435 i*sizeof(struct dwc_gmac_dev_dmadesc),
1436 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1437 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1438 }
1439 }
1440
1441 static void
1442 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1443 {
1444 int i;
1445
1446 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1447 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1448 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1449 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1450 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1451 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1452 "data: %08x next: %08x\n",
1453 i, sc->sc_rxq.r_physaddr +
1454 i*sizeof(struct dwc_gmac_dev_dmadesc),
1455 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1456 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1457 }
1458 }
1459
1460 static void
1461 dwc_dump_status(struct dwc_gmac_softc *sc)
1462 {
1463 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1464 AWIN_GMAC_MAC_INTR);
1465 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1466 AWIN_GMAC_DMA_STATUS);
1467 char buf[200];
1468
1469 /* print interrupt state */
1470 snprintb(buf, sizeof(buf), "\177\20"
1471 "b\x10""NI\0"
1472 "b\x0f""AI\0"
1473 "b\x0e""ER\0"
1474 "b\x0d""FB\0"
1475 "b\x0a""ET\0"
1476 "b\x09""RW\0"
1477 "b\x08""RS\0"
1478 "b\x07""RU\0"
1479 "b\x06""RI\0"
1480 "b\x05""UN\0"
1481 "b\x04""OV\0"
1482 "b\x03""TJ\0"
1483 "b\x02""TU\0"
1484 "b\x01""TS\0"
1485 "b\x00""TI\0"
1486 "\0", dma_status);
1487 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1488 status, buf);
1489 }
1490
1491 static void
1492 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1493 {
1494 dwc_dump_status(sc);
1495 dwc_gmac_dump_ffilt(sc,
1496 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1497 dwc_gmac_dump_dma(sc);
1498 dwc_gmac_dump_tx_desc(sc);
1499 dwc_gmac_dump_rx_desc(sc);
1500
1501 panic("%s", msg);
1502 }
1503
1504 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1505 {
1506 char buf[200];
1507
1508 /* print filter setup */
1509 snprintb(buf, sizeof(buf), "\177\20"
1510 "b\x1f""RA\0"
1511 "b\x0a""HPF\0"
1512 "b\x09""SAF\0"
1513 "b\x08""SAIF\0"
1514 "b\x05""DBF\0"
1515 "b\x04""PM\0"
1516 "b\x03""DAIF\0"
1517 "b\x02""HMC\0"
1518 "b\x01""HUC\0"
1519 "b\x00""PR\0"
1520 "\0", ffilt);
1521 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1522 }
1523 #endif
1524