dwc_gmac.c revision 1.46 1 /* $NetBSD: dwc_gmac.c,v 1.46 2018/06/16 00:15:00 jmcneill Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.46 2018/06/16 00:15:00 jmcneill Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t x);
103
104 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
105 *sizeof(struct dwc_gmac_dev_dmadesc))
106 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
107
108 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
110
111
112
113 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116
117 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 GMAC_DMA_INT_FBE| \
119 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 GMAC_DMA_INT_TJE)
122
123 #define AWIN_DEF_MAC_INTRMASK \
124 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
125 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126
127 #ifdef DWC_GMAC_DEBUG
128 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
129 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
130 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
131 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
132 static void dwc_dump_status(struct dwc_gmac_softc *sc);
133 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
134 #endif
135
136 #ifdef NET_MPSAFE
137 #define DWCGMAC_MPSAFE 1
138 #endif
139
140 void
141 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
142 {
143 uint8_t enaddr[ETHER_ADDR_LEN];
144 uint32_t maclo, machi;
145 struct mii_data * const mii = &sc->sc_mii;
146 struct ifnet * const ifp = &sc->sc_ec.ec_if;
147 prop_dictionary_t dict;
148 int rv;
149
150 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
151 sc->sc_mii_clk = mii_clk & 7;
152
153 dict = device_properties(sc->sc_dev);
154 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
155 if (ea != NULL) {
156 /*
157 * If the MAC address is overriden by a device property,
158 * use that.
159 */
160 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
161 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
162 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
163 } else {
164 /*
165 * If we did not get an externaly configure address,
166 * try to read one from the current filter setup,
167 * before resetting the chip.
168 */
169 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
170 AWIN_GMAC_MAC_ADDR0LO);
171 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
172 AWIN_GMAC_MAC_ADDR0HI);
173
174 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
175 /* fake MAC address */
176 maclo = 0x00f2 | (cprng_strong32() << 16);
177 machi = cprng_strong32();
178 }
179
180 enaddr[0] = maclo & 0x0ff;
181 enaddr[1] = (maclo >> 8) & 0x0ff;
182 enaddr[2] = (maclo >> 16) & 0x0ff;
183 enaddr[3] = (maclo >> 24) & 0x0ff;
184 enaddr[4] = machi & 0x0ff;
185 enaddr[5] = (machi >> 8) & 0x0ff;
186 }
187
188 /*
189 * Init chip and do initial setup
190 */
191 if (dwc_gmac_reset(sc) != 0)
192 return; /* not much to cleanup, haven't attached yet */
193 dwc_gmac_write_hwaddr(sc, enaddr);
194 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
195 ether_sprintf(enaddr));
196
197 /*
198 * Allocate Tx and Rx rings
199 */
200 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
201 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
202 goto fail;
203 }
204
205 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
206 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
207 goto fail;
208 }
209
210 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
211 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
212 goto fail;
213 }
214
215 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
216 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
217 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
218
219 /*
220 * Prepare interface data
221 */
222 ifp->if_softc = sc;
223 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
224 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
225 #ifdef DWCGMAC_MPSAFE
226 ifp->if_extflags = IFEF_MPSAFE;
227 #endif
228 ifp->if_ioctl = dwc_gmac_ioctl;
229 ifp->if_start = dwc_gmac_start;
230 ifp->if_init = dwc_gmac_init;
231 ifp->if_stop = dwc_gmac_stop;
232 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
233 IFQ_SET_READY(&ifp->if_snd);
234
235 /*
236 * Attach MII subdevices
237 */
238 sc->sc_ec.ec_mii = &sc->sc_mii;
239 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
240 mii->mii_ifp = ifp;
241 mii->mii_readreg = dwc_gmac_miibus_read_reg;
242 mii->mii_writereg = dwc_gmac_miibus_write_reg;
243 mii->mii_statchg = dwc_gmac_miibus_statchg;
244 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
245 MIIF_DOPAUSE);
246
247 if (LIST_EMPTY(&mii->mii_phys)) {
248 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
249 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
250 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
251 } else {
252 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
253 }
254
255 /*
256 * We can support 802.1Q VLAN-sized frames.
257 */
258 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
259
260 /*
261 * Ready, attach interface
262 */
263 /* Attach the interface. */
264 rv = if_initialize(ifp);
265 if (rv != 0)
266 goto fail_2;
267 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
268 if_deferred_start_init(ifp, NULL);
269 ether_ifattach(ifp, enaddr);
270 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
271 if_register(ifp);
272
273 /*
274 * Enable interrupts
275 */
276 mutex_enter(sc->sc_lock);
277 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
278 AWIN_DEF_MAC_INTRMASK);
279 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
280 GMAC_DEF_DMA_INT_MASK);
281 mutex_exit(sc->sc_lock);
282
283 return;
284 fail_2:
285 ifmedia_removeall(&mii->mii_media);
286 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
287 mutex_destroy(&sc->sc_txq.t_mtx);
288 mutex_destroy(&sc->sc_rxq.r_mtx);
289 mutex_obj_free(sc->sc_lock);
290 fail:
291 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
292 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
293 dwc_gmac_free_dma_rings(sc);
294 mutex_destroy(&sc->sc_mdio_lock);
295 }
296
297
298
299 static int
300 dwc_gmac_reset(struct dwc_gmac_softc *sc)
301 {
302 size_t cnt;
303 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
304 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
305 for (cnt = 0; cnt < 3000; cnt++) {
306 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
307 & GMAC_BUSMODE_RESET) == 0)
308 return 0;
309 delay(10);
310 }
311
312 aprint_error_dev(sc->sc_dev, "reset timed out\n");
313 return EIO;
314 }
315
316 static void
317 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
318 uint8_t enaddr[ETHER_ADDR_LEN])
319 {
320 uint32_t lo, hi;
321
322 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
323 | (enaddr[3] << 24);
324 hi = enaddr[4] | (enaddr[5] << 8);
325 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
326 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
327 }
328
329 static int
330 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
331 {
332 struct dwc_gmac_softc * const sc = device_private(self);
333 uint16_t mii;
334 size_t cnt;
335 int rv = 0;
336
337 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
338 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
339 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
340 | GMAC_MII_BUSY;
341
342 mutex_enter(&sc->sc_mdio_lock);
343 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
344
345 for (cnt = 0; cnt < 1000; cnt++) {
346 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
347 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
348 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
349 AWIN_GMAC_MAC_MIIDATA);
350 break;
351 }
352 delay(10);
353 }
354
355 mutex_exit(&sc->sc_mdio_lock);
356
357 return rv;
358 }
359
360 static void
361 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
362 {
363 struct dwc_gmac_softc * const sc = device_private(self);
364 uint16_t mii;
365 size_t cnt;
366
367 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
368 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
369 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
370 | GMAC_MII_BUSY | GMAC_MII_WRITE;
371
372 mutex_enter(&sc->sc_mdio_lock);
373 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
374 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
375
376 for (cnt = 0; cnt < 1000; cnt++) {
377 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
378 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
379 break;
380 delay(10);
381 }
382
383 mutex_exit(&sc->sc_mdio_lock);
384 }
385
386 static int
387 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
388 struct dwc_gmac_rx_ring *ring)
389 {
390 struct dwc_gmac_rx_data *data;
391 bus_addr_t physaddr;
392 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
393 int error, i, next;
394
395 ring->r_cur = ring->r_next = 0;
396 memset(ring->r_desc, 0, descsize);
397
398 /*
399 * Pre-allocate Rx buffers and populate Rx ring.
400 */
401 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
402 struct dwc_gmac_dev_dmadesc *desc;
403
404 data = &sc->sc_rxq.r_data[i];
405
406 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
407 if (data->rd_m == NULL) {
408 aprint_error_dev(sc->sc_dev,
409 "could not allocate rx mbuf #%d\n", i);
410 error = ENOMEM;
411 goto fail;
412 }
413 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
414 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
415 if (error != 0) {
416 aprint_error_dev(sc->sc_dev,
417 "could not create DMA map\n");
418 data->rd_map = NULL;
419 goto fail;
420 }
421 MCLGET(data->rd_m, M_DONTWAIT);
422 if (!(data->rd_m->m_flags & M_EXT)) {
423 aprint_error_dev(sc->sc_dev,
424 "could not allocate mbuf cluster #%d\n", i);
425 error = ENOMEM;
426 goto fail;
427 }
428
429 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
430 mtod(data->rd_m, void *), MCLBYTES, NULL,
431 BUS_DMA_READ | BUS_DMA_NOWAIT);
432 if (error != 0) {
433 aprint_error_dev(sc->sc_dev,
434 "could not load rx buf DMA map #%d", i);
435 goto fail;
436 }
437 physaddr = data->rd_map->dm_segs[0].ds_addr;
438
439 desc = &sc->sc_rxq.r_desc[i];
440 desc->ddesc_data = htole32(physaddr);
441 next = RX_NEXT(i);
442 desc->ddesc_next = htole32(ring->r_physaddr
443 + next * sizeof(*desc));
444 desc->ddesc_cntl = htole32(
445 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
446 DDESC_CNTL_RXCHAIN);
447 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
448 }
449
450 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
451 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
452 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
453 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
454 ring->r_physaddr);
455
456 return 0;
457
458 fail:
459 dwc_gmac_free_rx_ring(sc, ring);
460 return error;
461 }
462
463 static void
464 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
465 struct dwc_gmac_rx_ring *ring)
466 {
467 struct dwc_gmac_dev_dmadesc *desc;
468 int i;
469
470 mutex_enter(&ring->r_mtx);
471 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
472 desc = &sc->sc_rxq.r_desc[i];
473 desc->ddesc_cntl = htole32(
474 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
475 DDESC_CNTL_RXCHAIN);
476 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
477 }
478
479 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
480 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
481 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
482
483 ring->r_cur = ring->r_next = 0;
484 /* reset DMA address to start of ring */
485 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
486 sc->sc_rxq.r_physaddr);
487 mutex_exit(&ring->r_mtx);
488 }
489
490 static int
491 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
492 {
493 const size_t descsize = AWGE_TOTAL_RING_COUNT *
494 sizeof(struct dwc_gmac_dev_dmadesc);
495 int error, nsegs;
496 void *rings;
497
498 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
499 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
500 if (error != 0) {
501 aprint_error_dev(sc->sc_dev,
502 "could not create desc DMA map\n");
503 sc->sc_dma_ring_map = NULL;
504 goto fail;
505 }
506
507 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
508 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
509 if (error != 0) {
510 aprint_error_dev(sc->sc_dev,
511 "could not map DMA memory\n");
512 goto fail;
513 }
514
515 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
516 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
517 if (error != 0) {
518 aprint_error_dev(sc->sc_dev,
519 "could not allocate DMA memory\n");
520 goto fail;
521 }
522
523 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
524 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
525 if (error != 0) {
526 aprint_error_dev(sc->sc_dev,
527 "could not load desc DMA map\n");
528 goto fail;
529 }
530
531 /* give first AWGE_RX_RING_COUNT to the RX side */
532 sc->sc_rxq.r_desc = rings;
533 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
534
535 /* and next rings to the TX side */
536 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
537 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
538 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
539
540 return 0;
541
542 fail:
543 dwc_gmac_free_dma_rings(sc);
544 return error;
545 }
546
547 static void
548 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
549 {
550 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
551 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
552 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
553 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
554 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
555 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
556 }
557
558 static void
559 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
560 {
561 struct dwc_gmac_rx_data *data;
562 int i;
563
564 if (ring->r_desc == NULL)
565 return;
566
567
568 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
569 data = &ring->r_data[i];
570
571 if (data->rd_map != NULL) {
572 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
573 AWGE_RX_RING_COUNT
574 *sizeof(struct dwc_gmac_dev_dmadesc),
575 BUS_DMASYNC_POSTREAD);
576 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
577 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
578 }
579 if (data->rd_m != NULL)
580 m_freem(data->rd_m);
581 }
582 }
583
584 static int
585 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
586 struct dwc_gmac_tx_ring *ring)
587 {
588 int i, error = 0;
589
590 ring->t_queued = 0;
591 ring->t_cur = ring->t_next = 0;
592
593 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
594 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
595 TX_DESC_OFFSET(0),
596 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
597 BUS_DMASYNC_POSTWRITE);
598
599 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
600 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
601 AWGE_TX_RING_COUNT, MCLBYTES, 0,
602 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
603 &ring->t_data[i].td_map);
604 if (error != 0) {
605 aprint_error_dev(sc->sc_dev,
606 "could not create TX DMA map #%d\n", i);
607 ring->t_data[i].td_map = NULL;
608 goto fail;
609 }
610 ring->t_desc[i].ddesc_next = htole32(
611 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
612 *TX_NEXT(i));
613 }
614
615 return 0;
616
617 fail:
618 dwc_gmac_free_tx_ring(sc, ring);
619 return error;
620 }
621
622 static void
623 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
624 {
625 /* 'end' is pointing one descriptor beyound the last we want to sync */
626 if (end > start) {
627 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
628 TX_DESC_OFFSET(start),
629 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
630 ops);
631 return;
632 }
633 /* sync from 'start' to end of ring */
634 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
635 TX_DESC_OFFSET(start),
636 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
637 ops);
638 /* sync from start of ring to 'end' */
639 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
640 TX_DESC_OFFSET(0),
641 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
642 ops);
643 }
644
645 static void
646 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
647 struct dwc_gmac_tx_ring *ring)
648 {
649 int i;
650
651 mutex_enter(&ring->t_mtx);
652 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
653 struct dwc_gmac_tx_data *data = &ring->t_data[i];
654
655 if (data->td_m != NULL) {
656 bus_dmamap_sync(sc->sc_dmat, data->td_active,
657 0, data->td_active->dm_mapsize,
658 BUS_DMASYNC_POSTWRITE);
659 bus_dmamap_unload(sc->sc_dmat, data->td_active);
660 m_freem(data->td_m);
661 data->td_m = NULL;
662 }
663 }
664
665 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
666 TX_DESC_OFFSET(0),
667 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
668 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
669 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
670 sc->sc_txq.t_physaddr);
671
672 ring->t_queued = 0;
673 ring->t_cur = ring->t_next = 0;
674 mutex_exit(&ring->t_mtx);
675 }
676
677 static void
678 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
679 struct dwc_gmac_tx_ring *ring)
680 {
681 int i;
682
683 /* unload the maps */
684 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
685 struct dwc_gmac_tx_data *data = &ring->t_data[i];
686
687 if (data->td_m != NULL) {
688 bus_dmamap_sync(sc->sc_dmat, data->td_active,
689 0, data->td_map->dm_mapsize,
690 BUS_DMASYNC_POSTWRITE);
691 bus_dmamap_unload(sc->sc_dmat, data->td_active);
692 m_freem(data->td_m);
693 data->td_m = NULL;
694 }
695 }
696
697 /* and actually free them */
698 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
699 struct dwc_gmac_tx_data *data = &ring->t_data[i];
700
701 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
702 }
703 }
704
705 static void
706 dwc_gmac_miibus_statchg(struct ifnet *ifp)
707 {
708 struct dwc_gmac_softc * const sc = ifp->if_softc;
709 struct mii_data * const mii = &sc->sc_mii;
710 uint32_t conf, flow;
711
712 /*
713 * Set MII or GMII interface based on the speed
714 * negotiated by the PHY.
715 */
716 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
717 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
718 |AWIN_GMAC_MAC_CONF_FULLDPLX);
719 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
720 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
721 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
722 | AWIN_GMAC_MAC_CONF_ACS
723 | AWIN_GMAC_MAC_CONF_RXENABLE
724 | AWIN_GMAC_MAC_CONF_TXENABLE;
725 switch (IFM_SUBTYPE(mii->mii_media_active)) {
726 case IFM_10_T:
727 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
728 break;
729 case IFM_100_TX:
730 conf |= AWIN_GMAC_MAC_CONF_FES100 |
731 AWIN_GMAC_MAC_CONF_MIISEL;
732 break;
733 case IFM_1000_T:
734 break;
735 }
736 if (sc->sc_set_speed)
737 sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
738
739 flow = 0;
740 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
741 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
742 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
743 }
744 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
745 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
746 }
747 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
748 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
749 }
750 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
751 AWIN_GMAC_MAC_FLOWCTRL, flow);
752
753 #ifdef DWC_GMAC_DEBUG
754 aprint_normal_dev(sc->sc_dev,
755 "setting MAC conf register: %08x\n", conf);
756 #endif
757
758 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
759 AWIN_GMAC_MAC_CONF, conf);
760 }
761
762 static int
763 dwc_gmac_init(struct ifnet *ifp)
764 {
765 struct dwc_gmac_softc *sc = ifp->if_softc;
766
767 mutex_enter(sc->sc_lock);
768 int ret = dwc_gmac_init_locked(ifp);
769 mutex_exit(sc->sc_lock);
770
771 return ret;
772 }
773
774 static int
775 dwc_gmac_init_locked(struct ifnet *ifp)
776 {
777 struct dwc_gmac_softc *sc = ifp->if_softc;
778 uint32_t ffilt;
779
780 if (ifp->if_flags & IFF_RUNNING)
781 return 0;
782
783 dwc_gmac_stop_locked(ifp, 0);
784
785 /*
786 * Configure DMA burst/transfer mode and RX/TX priorities.
787 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
788 */
789 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
790 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
791 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
792 __SHIFTIN(2, GMAC_BUSMODE_PBL));
793
794 /*
795 * Set up address filter
796 */
797 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
798 if (ifp->if_flags & IFF_PROMISC) {
799 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
800 } else {
801 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
802 }
803 if (ifp->if_flags & IFF_BROADCAST) {
804 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
805 } else {
806 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
807 }
808 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
809
810 /*
811 * Set up multicast filter
812 */
813 dwc_gmac_setmulti(sc);
814
815 /*
816 * Set up dma pointer for RX and TX ring
817 */
818 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
819 sc->sc_rxq.r_physaddr);
820 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
821 sc->sc_txq.t_physaddr);
822
823 /*
824 * Start RX/TX part
825 */
826 uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
827 if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
828 opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
829 }
830 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
831
832 sc->sc_stopping = false;
833
834 ifp->if_flags |= IFF_RUNNING;
835 ifp->if_flags &= ~IFF_OACTIVE;
836
837 return 0;
838 }
839
840 static void
841 dwc_gmac_start(struct ifnet *ifp)
842 {
843 struct dwc_gmac_softc *sc = ifp->if_softc;
844 #ifdef DWCGMAC_MPSAFE
845 KASSERT(if_is_mpsafe(ifp));
846 #endif
847
848 mutex_enter(sc->sc_lock);
849 if (!sc->sc_stopping) {
850 mutex_enter(&sc->sc_txq.t_mtx);
851 dwc_gmac_start_locked(ifp);
852 mutex_exit(&sc->sc_txq.t_mtx);
853 }
854 mutex_exit(sc->sc_lock);
855 }
856
857 static void
858 dwc_gmac_start_locked(struct ifnet *ifp)
859 {
860 struct dwc_gmac_softc *sc = ifp->if_softc;
861 int old = sc->sc_txq.t_queued;
862 int start = sc->sc_txq.t_cur;
863 struct mbuf *m0;
864
865 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
866 return;
867
868 for (;;) {
869 IFQ_POLL(&ifp->if_snd, m0);
870 if (m0 == NULL)
871 break;
872 if (dwc_gmac_queue(sc, m0) != 0) {
873 ifp->if_flags |= IFF_OACTIVE;
874 break;
875 }
876 IFQ_DEQUEUE(&ifp->if_snd, m0);
877 bpf_mtap(ifp, m0);
878 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
879 ifp->if_flags |= IFF_OACTIVE;
880 break;
881 }
882 }
883
884 if (sc->sc_txq.t_queued != old) {
885 /* packets have been queued, kick it off */
886 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
887 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
888
889 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
890 AWIN_GMAC_DMA_TXPOLL, ~0U);
891 #ifdef DWC_GMAC_DEBUG
892 dwc_dump_status(sc);
893 #endif
894 }
895 }
896
897 static void
898 dwc_gmac_stop(struct ifnet *ifp, int disable)
899 {
900 struct dwc_gmac_softc *sc = ifp->if_softc;
901
902 mutex_enter(sc->sc_lock);
903 dwc_gmac_stop_locked(ifp, disable);
904 mutex_exit(sc->sc_lock);
905 }
906
907 static void
908 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
909 {
910 struct dwc_gmac_softc *sc = ifp->if_softc;
911
912 sc->sc_stopping = true;
913
914 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
915 AWIN_GMAC_DMA_OPMODE,
916 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
917 AWIN_GMAC_DMA_OPMODE)
918 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
919 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
920 AWIN_GMAC_DMA_OPMODE,
921 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
922 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
923
924 mii_down(&sc->sc_mii);
925 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
926 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
927 }
928
929 /*
930 * Add m0 to the TX ring
931 */
932 static int
933 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
934 {
935 struct dwc_gmac_dev_dmadesc *desc = NULL;
936 struct dwc_gmac_tx_data *data = NULL;
937 bus_dmamap_t map;
938 uint32_t flags, len, status;
939 int error, i, first;
940
941 #ifdef DWC_GMAC_DEBUG
942 aprint_normal_dev(sc->sc_dev,
943 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
944 #endif
945
946 first = sc->sc_txq.t_cur;
947 map = sc->sc_txq.t_data[first].td_map;
948
949 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
950 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
951 if (error != 0) {
952 aprint_error_dev(sc->sc_dev, "could not map mbuf "
953 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
954 return error;
955 }
956
957 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
958 bus_dmamap_unload(sc->sc_dmat, map);
959 return ENOBUFS;
960 }
961
962 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
963 status = 0;
964 for (i = 0; i < map->dm_nsegs; i++) {
965 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
966 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
967
968 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
969 len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
970
971 #ifdef DWC_GMAC_DEBUG
972 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
973 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
974 (unsigned long)map->dm_segs[i].ds_addr,
975 (unsigned long)map->dm_segs[i].ds_len,
976 flags, len);
977 #endif
978
979 desc->ddesc_cntl = htole32(len|flags);
980 flags &= ~DDESC_CNTL_TXFIRST;
981
982 /*
983 * Defer passing ownership of the first descriptor
984 * until we are done.
985 */
986 desc->ddesc_status = htole32(status);
987 status |= DDESC_STATUS_OWNEDBYDEV;
988
989 sc->sc_txq.t_queued++;
990 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
991 }
992
993 desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
994
995 data->td_m = m0;
996 data->td_active = map;
997
998 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
999 BUS_DMASYNC_PREWRITE);
1000
1001 /* Pass first to device */
1002 sc->sc_txq.t_desc[first].ddesc_status =
1003 htole32(DDESC_STATUS_OWNEDBYDEV);
1004
1005 return 0;
1006 }
1007
1008 /*
1009 * If the interface is up and running, only modify the receive
1010 * filter when setting promiscuous or debug mode. Otherwise fall
1011 * through to ether_ioctl, which will reset the chip.
1012 */
1013 static int
1014 dwc_gmac_ifflags_cb(struct ethercom *ec)
1015 {
1016 struct ifnet *ifp = &ec->ec_if;
1017 struct dwc_gmac_softc *sc = ifp->if_softc;
1018 int ret = 0;
1019
1020 mutex_enter(sc->sc_lock);
1021 int change = ifp->if_flags ^ sc->sc_if_flags;
1022 sc->sc_if_flags = ifp->if_flags;
1023
1024 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1025 ret = ENETRESET;
1026 goto out;
1027 }
1028 if ((change & IFF_PROMISC) != 0) {
1029 dwc_gmac_setmulti(sc);
1030 }
1031 out:
1032 mutex_exit(sc->sc_lock);
1033
1034 return ret;
1035 }
1036
1037 static int
1038 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1039 {
1040 struct dwc_gmac_softc *sc = ifp->if_softc;
1041 int error = 0;
1042
1043 int s = splnet();
1044 error = ether_ioctl(ifp, cmd, data);
1045
1046 #ifdef DWCGMAC_MPSAFE
1047 splx(s);
1048 #endif
1049
1050 if (error == ENETRESET) {
1051 error = 0;
1052 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1053 ;
1054 else if (ifp->if_flags & IFF_RUNNING) {
1055 /*
1056 * Multicast list has changed; set the hardware filter
1057 * accordingly.
1058 */
1059 mutex_enter(sc->sc_lock);
1060 dwc_gmac_setmulti(sc);
1061 mutex_exit(sc->sc_lock);
1062 }
1063 }
1064
1065 /* Try to get things going again */
1066 if (ifp->if_flags & IFF_UP)
1067 dwc_gmac_start(ifp);
1068 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1069
1070 #ifndef DWCGMAC_MPSAFE
1071 splx(s);
1072 #endif
1073
1074 return error;
1075 }
1076
1077 static void
1078 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1079 {
1080 struct ifnet *ifp = &sc->sc_ec.ec_if;
1081 struct dwc_gmac_tx_data *data;
1082 struct dwc_gmac_dev_dmadesc *desc;
1083 uint32_t status;
1084 int i, nsegs;
1085
1086 mutex_enter(&sc->sc_txq.t_mtx);
1087
1088 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1089 #ifdef DWC_GMAC_DEBUG
1090 aprint_normal_dev(sc->sc_dev,
1091 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1092 i, sc->sc_txq.t_queued);
1093 #endif
1094
1095 /*
1096 * i+1 does not need to be a valid descriptor,
1097 * this is just a special notion to just sync
1098 * a single tx descriptor (i)
1099 */
1100 dwc_gmac_txdesc_sync(sc, i, i+1,
1101 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1102
1103 desc = &sc->sc_txq.t_desc[i];
1104 status = le32toh(desc->ddesc_status);
1105 if (status & DDESC_STATUS_OWNEDBYDEV)
1106 break;
1107
1108 data = &sc->sc_txq.t_data[i];
1109 if (data->td_m == NULL)
1110 continue;
1111
1112 ifp->if_opackets++;
1113 nsegs = data->td_active->dm_nsegs;
1114 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1115 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1116 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1117
1118 #ifdef DWC_GMAC_DEBUG
1119 aprint_normal_dev(sc->sc_dev,
1120 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1121 "freeing mbuf %p\n", i, data->td_m);
1122 #endif
1123
1124 m_freem(data->td_m);
1125 data->td_m = NULL;
1126
1127 sc->sc_txq.t_queued -= nsegs;
1128 }
1129
1130 sc->sc_txq.t_next = i;
1131
1132 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1133 ifp->if_flags &= ~IFF_OACTIVE;
1134 }
1135 mutex_exit(&sc->sc_txq.t_mtx);
1136 }
1137
1138 static void
1139 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1140 {
1141 struct ifnet *ifp = &sc->sc_ec.ec_if;
1142 struct dwc_gmac_dev_dmadesc *desc;
1143 struct dwc_gmac_rx_data *data;
1144 bus_addr_t physaddr;
1145 uint32_t status;
1146 struct mbuf *m, *mnew;
1147 int i, len, error;
1148
1149 mutex_enter(&sc->sc_rxq.r_mtx);
1150 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1151 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1152 RX_DESC_OFFSET(i), sizeof(*desc),
1153 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1154 desc = &sc->sc_rxq.r_desc[i];
1155 data = &sc->sc_rxq.r_data[i];
1156
1157 status = le32toh(desc->ddesc_status);
1158 if (status & DDESC_STATUS_OWNEDBYDEV)
1159 break;
1160
1161 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1162 #ifdef DWC_GMAC_DEBUG
1163 aprint_normal_dev(sc->sc_dev,
1164 "RX error: descriptor status %08x, skipping\n",
1165 status);
1166 #endif
1167 ifp->if_ierrors++;
1168 goto skip;
1169 }
1170
1171 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1172
1173 #ifdef DWC_GMAC_DEBUG
1174 aprint_normal_dev(sc->sc_dev,
1175 "rx int: device is done with descriptor #%d, len: %d\n",
1176 i, len);
1177 #endif
1178
1179 /*
1180 * Try to get a new mbuf before passing this one
1181 * up, if that fails, drop the packet and reuse
1182 * the existing one.
1183 */
1184 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1185 if (mnew == NULL) {
1186 ifp->if_ierrors++;
1187 goto skip;
1188 }
1189 MCLGET(mnew, M_DONTWAIT);
1190 if ((mnew->m_flags & M_EXT) == 0) {
1191 m_freem(mnew);
1192 ifp->if_ierrors++;
1193 goto skip;
1194 }
1195
1196 /* unload old DMA map */
1197 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1198 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1199 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1200
1201 /* and reload with new mbuf */
1202 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1203 mtod(mnew, void*), MCLBYTES, NULL,
1204 BUS_DMA_READ | BUS_DMA_NOWAIT);
1205 if (error != 0) {
1206 m_freem(mnew);
1207 /* try to reload old mbuf */
1208 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1209 mtod(data->rd_m, void*), MCLBYTES, NULL,
1210 BUS_DMA_READ | BUS_DMA_NOWAIT);
1211 if (error != 0) {
1212 panic("%s: could not load old rx mbuf",
1213 device_xname(sc->sc_dev));
1214 }
1215 ifp->if_ierrors++;
1216 goto skip;
1217 }
1218 physaddr = data->rd_map->dm_segs[0].ds_addr;
1219
1220 /*
1221 * New mbuf loaded, update RX ring and continue
1222 */
1223 m = data->rd_m;
1224 data->rd_m = mnew;
1225 desc->ddesc_data = htole32(physaddr);
1226
1227 /* finalize mbuf */
1228 m->m_pkthdr.len = m->m_len = len;
1229 m_set_rcvif(m, ifp);
1230 m->m_flags |= M_HASFCS;
1231
1232 if_percpuq_enqueue(sc->sc_ipq, m);
1233
1234 skip:
1235 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1236 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1237 desc->ddesc_cntl = htole32(
1238 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1239 DDESC_CNTL_RXCHAIN);
1240 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1241 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1242 RX_DESC_OFFSET(i), sizeof(*desc),
1243 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1244 }
1245
1246 /* update RX pointer */
1247 sc->sc_rxq.r_cur = i;
1248
1249 mutex_exit(&sc->sc_rxq.r_mtx);
1250 }
1251
1252 /*
1253 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1254 */
1255 static uint32_t
1256 bitrev32(uint32_t x)
1257 {
1258 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1259 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1260 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1261 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1262
1263 return (x >> 16) | (x << 16);
1264 }
1265
1266 static void
1267 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1268 {
1269 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1270 struct ether_multi *enm;
1271 struct ether_multistep step;
1272 uint32_t hashes[2] = { 0, 0 };
1273 uint32_t ffilt, h;
1274 int mcnt;
1275
1276 KASSERT(mutex_owned(sc->sc_lock));
1277
1278 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1279
1280 if (ifp->if_flags & IFF_PROMISC) {
1281 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1282 goto special_filter;
1283 }
1284
1285 ifp->if_flags &= ~IFF_ALLMULTI;
1286 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1287
1288 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1289 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1290
1291 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1292 mcnt = 0;
1293 while (enm != NULL) {
1294 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1295 ETHER_ADDR_LEN) != 0) {
1296 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1297 ifp->if_flags |= IFF_ALLMULTI;
1298 goto special_filter;
1299 }
1300
1301 h = bitrev32(
1302 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1303 ) >> 26;
1304 hashes[h >> 5] |= (1 << (h & 0x1f));
1305
1306 mcnt++;
1307 ETHER_NEXT_MULTI(step, enm);
1308 }
1309
1310 if (mcnt)
1311 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1312 else
1313 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1314
1315 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1316 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1317 hashes[0]);
1318 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1319 hashes[1]);
1320 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1321
1322 #ifdef DWC_GMAC_DEBUG
1323 dwc_gmac_dump_ffilt(sc, ffilt);
1324 #endif
1325 return;
1326
1327 special_filter:
1328 #ifdef DWC_GMAC_DEBUG
1329 dwc_gmac_dump_ffilt(sc, ffilt);
1330 #endif
1331 /* no MAC hashes, ALLMULTI or PROMISC */
1332 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1333 ffilt);
1334 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1335 0xffffffff);
1336 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1337 0xffffffff);
1338 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1339 }
1340
1341 int
1342 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1343 {
1344 uint32_t status, dma_status;
1345 int rv = 0;
1346
1347 if (sc->sc_stopping)
1348 return 0;
1349
1350 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1351 if (status & AWIN_GMAC_MII_IRQ) {
1352 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1353 AWIN_GMAC_MII_STATUS);
1354 rv = 1;
1355 mii_pollstat(&sc->sc_mii);
1356 }
1357
1358 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1359 AWIN_GMAC_DMA_STATUS);
1360
1361 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1362 rv = 1;
1363
1364 if (dma_status & GMAC_DMA_INT_TIE)
1365 dwc_gmac_tx_intr(sc);
1366
1367 if (dma_status & GMAC_DMA_INT_RIE)
1368 dwc_gmac_rx_intr(sc);
1369
1370 /*
1371 * Check error conditions
1372 */
1373 if (dma_status & GMAC_DMA_INT_ERRORS) {
1374 sc->sc_ec.ec_if.if_oerrors++;
1375 #ifdef DWC_GMAC_DEBUG
1376 dwc_dump_and_abort(sc, "interrupt error condition");
1377 #endif
1378 }
1379
1380 /* ack interrupt */
1381 if (dma_status)
1382 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1383 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1384
1385 /*
1386 * Get more packets
1387 */
1388 if (rv)
1389 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1390
1391 return rv;
1392 }
1393
1394 #ifdef DWC_GMAC_DEBUG
1395 static void
1396 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1397 {
1398 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1399 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1400 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1401 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1402 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1403 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1404 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1405 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1406 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1407 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1408 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1409 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1410 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1411 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1412 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1413 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1414 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1415 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1416 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1417 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1418 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1419 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1420 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1421 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1422 }
1423
1424 static void
1425 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1426 {
1427 int i;
1428
1429 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1430 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1431 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1432 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1433 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1434 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1435 "data: %08x next: %08x\n",
1436 i, sc->sc_txq.t_physaddr +
1437 i*sizeof(struct dwc_gmac_dev_dmadesc),
1438 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1439 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1440 }
1441 }
1442
1443 static void
1444 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1445 {
1446 int i;
1447
1448 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1449 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1450 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1451 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1452 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1453 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1454 "data: %08x next: %08x\n",
1455 i, sc->sc_rxq.r_physaddr +
1456 i*sizeof(struct dwc_gmac_dev_dmadesc),
1457 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1458 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1459 }
1460 }
1461
1462 static void
1463 dwc_dump_status(struct dwc_gmac_softc *sc)
1464 {
1465 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1466 AWIN_GMAC_MAC_INTR);
1467 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1468 AWIN_GMAC_DMA_STATUS);
1469 char buf[200];
1470
1471 /* print interrupt state */
1472 snprintb(buf, sizeof(buf), "\177\20"
1473 "b\x10""NI\0"
1474 "b\x0f""AI\0"
1475 "b\x0e""ER\0"
1476 "b\x0d""FB\0"
1477 "b\x0a""ET\0"
1478 "b\x09""RW\0"
1479 "b\x08""RS\0"
1480 "b\x07""RU\0"
1481 "b\x06""RI\0"
1482 "b\x05""UN\0"
1483 "b\x04""OV\0"
1484 "b\x03""TJ\0"
1485 "b\x02""TU\0"
1486 "b\x01""TS\0"
1487 "b\x00""TI\0"
1488 "\0", dma_status);
1489 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1490 status, buf);
1491 }
1492
1493 static void
1494 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1495 {
1496 dwc_dump_status(sc);
1497 dwc_gmac_dump_ffilt(sc,
1498 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1499 dwc_gmac_dump_dma(sc);
1500 dwc_gmac_dump_tx_desc(sc);
1501 dwc_gmac_dump_rx_desc(sc);
1502
1503 panic("%s", msg);
1504 }
1505
1506 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1507 {
1508 char buf[200];
1509
1510 /* print filter setup */
1511 snprintb(buf, sizeof(buf), "\177\20"
1512 "b\x1f""RA\0"
1513 "b\x0a""HPF\0"
1514 "b\x09""SAF\0"
1515 "b\x08""SAIF\0"
1516 "b\x05""DBF\0"
1517 "b\x04""PM\0"
1518 "b\x03""DAIF\0"
1519 "b\x02""HMC\0"
1520 "b\x01""HUC\0"
1521 "b\x00""PR\0"
1522 "\0", ffilt);
1523 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1524 }
1525 #endif
1526