dwc_gmac.c revision 1.39 1 /* $NetBSD: dwc_gmac.c,v 1.39 2017/01/23 08:36:23 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.39 2017/01/23 08:36:23 skrll Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t x);
103
104 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
105 *sizeof(struct dwc_gmac_dev_dmadesc))
106 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
107
108 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
110
111
112
113 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116
117 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 GMAC_DMA_INT_FBE| \
119 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 GMAC_DMA_INT_TJE)
122
123 #define AWIN_DEF_MAC_INTRMASK \
124 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
125 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126
127
128 #ifdef DWC_GMAC_DEBUG
129 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
130 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
131 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
132 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
133 static void dwc_dump_status(struct dwc_gmac_softc *sc);
134 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
135 #endif
136
137 #ifdef NET_MPSAFE
138 #define DWCGMAC_MPSAFE 1
139 #endif
140
141 void
142 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
143 {
144 uint8_t enaddr[ETHER_ADDR_LEN];
145 uint32_t maclo, machi;
146 struct mii_data * const mii = &sc->sc_mii;
147 struct ifnet * const ifp = &sc->sc_ec.ec_if;
148 prop_dictionary_t dict;
149
150 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
151 sc->sc_mii_clk = mii_clk & 7;
152
153 dict = device_properties(sc->sc_dev);
154 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
155 if (ea != NULL) {
156 /*
157 * If the MAC address is overriden by a device property,
158 * use that.
159 */
160 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
161 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
162 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
163 } else {
164 /*
165 * If we did not get an externaly configure address,
166 * try to read one from the current filter setup,
167 * before resetting the chip.
168 */
169 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
170 AWIN_GMAC_MAC_ADDR0LO);
171 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
172 AWIN_GMAC_MAC_ADDR0HI);
173
174 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
175 /* fake MAC address */
176 maclo = 0x00f2 | (cprng_strong32() << 16);
177 machi = cprng_strong32();
178 }
179
180 enaddr[0] = maclo & 0x0ff;
181 enaddr[1] = (maclo >> 8) & 0x0ff;
182 enaddr[2] = (maclo >> 16) & 0x0ff;
183 enaddr[3] = (maclo >> 24) & 0x0ff;
184 enaddr[4] = machi & 0x0ff;
185 enaddr[5] = (machi >> 8) & 0x0ff;
186 }
187
188 /*
189 * Init chip and do initial setup
190 */
191 if (dwc_gmac_reset(sc) != 0)
192 return; /* not much to cleanup, haven't attached yet */
193 dwc_gmac_write_hwaddr(sc, enaddr);
194 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
195 ether_sprintf(enaddr));
196
197 /*
198 * Allocate Tx and Rx rings
199 */
200 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
201 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
202 goto fail;
203 }
204
205 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
206 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
207 goto fail;
208 }
209
210 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
211 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
212 goto fail;
213 }
214
215 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
216 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
217 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
218
219 /*
220 * Prepare interface data
221 */
222 ifp->if_softc = sc;
223 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
224 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
225 ifp->if_extflags = IFEF_START_MPSAFE;
226 ifp->if_ioctl = dwc_gmac_ioctl;
227 ifp->if_start = dwc_gmac_start;
228 ifp->if_init = dwc_gmac_init;
229 ifp->if_stop = dwc_gmac_stop;
230 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
231 IFQ_SET_READY(&ifp->if_snd);
232
233 /*
234 * Attach MII subdevices
235 */
236 sc->sc_ec.ec_mii = &sc->sc_mii;
237 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
238 mii->mii_ifp = ifp;
239 mii->mii_readreg = dwc_gmac_miibus_read_reg;
240 mii->mii_writereg = dwc_gmac_miibus_write_reg;
241 mii->mii_statchg = dwc_gmac_miibus_statchg;
242 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
243 MIIF_DOPAUSE);
244
245 if (LIST_EMPTY(&mii->mii_phys)) {
246 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
247 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
248 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
249 } else {
250 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
251 }
252
253 /*
254 * We can support 802.1Q VLAN-sized frames.
255 */
256 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
257
258 /*
259 * Ready, attach interface
260 */
261 /* Attach the interface. */
262 if_initialize(ifp);
263 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
264 ether_ifattach(ifp, enaddr);
265 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
266 if_register(ifp);
267
268 /*
269 * Enable interrupts
270 */
271 mutex_enter(sc->sc_lock);
272 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
273 AWIN_DEF_MAC_INTRMASK);
274 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
275 GMAC_DEF_DMA_INT_MASK);
276 mutex_exit(sc->sc_lock);
277
278 return;
279
280 fail:
281 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
282 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
283 }
284
285
286
287 static int
288 dwc_gmac_reset(struct dwc_gmac_softc *sc)
289 {
290 size_t cnt;
291 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
292 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
293 for (cnt = 0; cnt < 3000; cnt++) {
294 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
295 & GMAC_BUSMODE_RESET) == 0)
296 return 0;
297 delay(10);
298 }
299
300 aprint_error_dev(sc->sc_dev, "reset timed out\n");
301 return EIO;
302 }
303
304 static void
305 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
306 uint8_t enaddr[ETHER_ADDR_LEN])
307 {
308 uint32_t lo, hi;
309
310 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
311 | (enaddr[3] << 24);
312 hi = enaddr[4] | (enaddr[5] << 8);
313 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
314 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
315 }
316
317 static int
318 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
319 {
320 struct dwc_gmac_softc * const sc = device_private(self);
321 uint16_t mii;
322 size_t cnt;
323 int rv = 0;
324
325 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
326 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
327 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
328 | GMAC_MII_BUSY;
329
330 mutex_enter(&sc->sc_mdio_lock);
331 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
332
333 for (cnt = 0; cnt < 1000; cnt++) {
334 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
335 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
336 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
337 AWIN_GMAC_MAC_MIIDATA);
338 break;
339 }
340 delay(10);
341 }
342
343 mutex_exit(&sc->sc_mdio_lock);
344
345 return rv;
346 }
347
348 static void
349 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
350 {
351 struct dwc_gmac_softc * const sc = device_private(self);
352 uint16_t mii;
353 size_t cnt;
354
355 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
356 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
357 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
358 | GMAC_MII_BUSY | GMAC_MII_WRITE;
359
360 mutex_enter(&sc->sc_mdio_lock);
361 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
362 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
363
364 for (cnt = 0; cnt < 1000; cnt++) {
365 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
366 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
367 break;
368 delay(10);
369 }
370
371 mutex_exit(&sc->sc_mdio_lock);
372 }
373
374 static int
375 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
376 struct dwc_gmac_rx_ring *ring)
377 {
378 struct dwc_gmac_rx_data *data;
379 bus_addr_t physaddr;
380 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
381 int error, i, next;
382
383 ring->r_cur = ring->r_next = 0;
384 memset(ring->r_desc, 0, descsize);
385
386 /*
387 * Pre-allocate Rx buffers and populate Rx ring.
388 */
389 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
390 struct dwc_gmac_dev_dmadesc *desc;
391
392 data = &sc->sc_rxq.r_data[i];
393
394 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
395 if (data->rd_m == NULL) {
396 aprint_error_dev(sc->sc_dev,
397 "could not allocate rx mbuf #%d\n", i);
398 error = ENOMEM;
399 goto fail;
400 }
401 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
402 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
403 if (error != 0) {
404 aprint_error_dev(sc->sc_dev,
405 "could not create DMA map\n");
406 data->rd_map = NULL;
407 goto fail;
408 }
409 MCLGET(data->rd_m, M_DONTWAIT);
410 if (!(data->rd_m->m_flags & M_EXT)) {
411 aprint_error_dev(sc->sc_dev,
412 "could not allocate mbuf cluster #%d\n", i);
413 error = ENOMEM;
414 goto fail;
415 }
416
417 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
418 mtod(data->rd_m, void *), MCLBYTES, NULL,
419 BUS_DMA_READ | BUS_DMA_NOWAIT);
420 if (error != 0) {
421 aprint_error_dev(sc->sc_dev,
422 "could not load rx buf DMA map #%d", i);
423 goto fail;
424 }
425 physaddr = data->rd_map->dm_segs[0].ds_addr;
426
427 desc = &sc->sc_rxq.r_desc[i];
428 desc->ddesc_data = htole32(physaddr);
429 next = RX_NEXT(i);
430 desc->ddesc_next = htole32(ring->r_physaddr
431 + next * sizeof(*desc));
432 desc->ddesc_cntl = htole32(
433 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
434 DDESC_CNTL_RXCHAIN);
435 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
436 }
437
438 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
439 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
440 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
441 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
442 ring->r_physaddr);
443
444 return 0;
445
446 fail:
447 dwc_gmac_free_rx_ring(sc, ring);
448 return error;
449 }
450
451 static void
452 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
453 struct dwc_gmac_rx_ring *ring)
454 {
455 struct dwc_gmac_dev_dmadesc *desc;
456 int i;
457
458 mutex_enter(&ring->r_mtx);
459 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
460 desc = &sc->sc_rxq.r_desc[i];
461 desc->ddesc_cntl = htole32(
462 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
463 DDESC_CNTL_RXCHAIN);
464 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
465 }
466
467 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
468 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
469 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
470
471 ring->r_cur = ring->r_next = 0;
472 /* reset DMA address to start of ring */
473 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
474 sc->sc_rxq.r_physaddr);
475 mutex_exit(&ring->r_mtx);
476 }
477
478 static int
479 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
480 {
481 const size_t descsize = AWGE_TOTAL_RING_COUNT *
482 sizeof(struct dwc_gmac_dev_dmadesc);
483 int error, nsegs;
484 void *rings;
485
486 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
487 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
488 if (error != 0) {
489 aprint_error_dev(sc->sc_dev,
490 "could not create desc DMA map\n");
491 sc->sc_dma_ring_map = NULL;
492 goto fail;
493 }
494
495 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
496 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
497 if (error != 0) {
498 aprint_error_dev(sc->sc_dev,
499 "could not map DMA memory\n");
500 goto fail;
501 }
502
503 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
504 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
505 if (error != 0) {
506 aprint_error_dev(sc->sc_dev,
507 "could not allocate DMA memory\n");
508 goto fail;
509 }
510
511 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
512 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
513 if (error != 0) {
514 aprint_error_dev(sc->sc_dev,
515 "could not load desc DMA map\n");
516 goto fail;
517 }
518
519 /* give first AWGE_RX_RING_COUNT to the RX side */
520 sc->sc_rxq.r_desc = rings;
521 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
522
523 /* and next rings to the TX side */
524 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
525 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
526 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
527
528 return 0;
529
530 fail:
531 dwc_gmac_free_dma_rings(sc);
532 return error;
533 }
534
535 static void
536 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
537 {
538 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
539 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
540 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
541 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
542 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
543 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
544 }
545
546 static void
547 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
548 {
549 struct dwc_gmac_rx_data *data;
550 int i;
551
552 if (ring->r_desc == NULL)
553 return;
554
555
556 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
557 data = &ring->r_data[i];
558
559 if (data->rd_map != NULL) {
560 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
561 AWGE_RX_RING_COUNT
562 *sizeof(struct dwc_gmac_dev_dmadesc),
563 BUS_DMASYNC_POSTREAD);
564 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
565 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
566 }
567 if (data->rd_m != NULL)
568 m_freem(data->rd_m);
569 }
570 }
571
572 static int
573 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
574 struct dwc_gmac_tx_ring *ring)
575 {
576 int i, error = 0;
577
578 ring->t_queued = 0;
579 ring->t_cur = ring->t_next = 0;
580
581 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
582 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
583 TX_DESC_OFFSET(0),
584 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
585 BUS_DMASYNC_POSTWRITE);
586
587 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
588 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
589 AWGE_TX_RING_COUNT, MCLBYTES, 0,
590 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
591 &ring->t_data[i].td_map);
592 if (error != 0) {
593 aprint_error_dev(sc->sc_dev,
594 "could not create TX DMA map #%d\n", i);
595 ring->t_data[i].td_map = NULL;
596 goto fail;
597 }
598 ring->t_desc[i].ddesc_next = htole32(
599 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
600 *TX_NEXT(i));
601 }
602
603 return 0;
604
605 fail:
606 dwc_gmac_free_tx_ring(sc, ring);
607 return error;
608 }
609
610 static void
611 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
612 {
613 /* 'end' is pointing one descriptor beyound the last we want to sync */
614 if (end > start) {
615 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
616 TX_DESC_OFFSET(start),
617 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
618 ops);
619 return;
620 }
621 /* sync from 'start' to end of ring */
622 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
623 TX_DESC_OFFSET(start),
624 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
625 ops);
626 /* sync from start of ring to 'end' */
627 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
628 TX_DESC_OFFSET(0),
629 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
630 ops);
631 }
632
633 static void
634 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
635 struct dwc_gmac_tx_ring *ring)
636 {
637 int i;
638
639 mutex_enter(&ring->t_mtx);
640 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
641 struct dwc_gmac_tx_data *data = &ring->t_data[i];
642
643 if (data->td_m != NULL) {
644 bus_dmamap_sync(sc->sc_dmat, data->td_active,
645 0, data->td_active->dm_mapsize,
646 BUS_DMASYNC_POSTWRITE);
647 bus_dmamap_unload(sc->sc_dmat, data->td_active);
648 m_freem(data->td_m);
649 data->td_m = NULL;
650 }
651 }
652
653 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
654 TX_DESC_OFFSET(0),
655 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
656 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
657 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
658 sc->sc_txq.t_physaddr);
659
660 ring->t_queued = 0;
661 ring->t_cur = ring->t_next = 0;
662 mutex_exit(&ring->t_mtx);
663 }
664
665 static void
666 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
667 struct dwc_gmac_tx_ring *ring)
668 {
669 int i;
670
671 /* unload the maps */
672 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
673 struct dwc_gmac_tx_data *data = &ring->t_data[i];
674
675 if (data->td_m != NULL) {
676 bus_dmamap_sync(sc->sc_dmat, data->td_active,
677 0, data->td_map->dm_mapsize,
678 BUS_DMASYNC_POSTWRITE);
679 bus_dmamap_unload(sc->sc_dmat, data->td_active);
680 m_freem(data->td_m);
681 data->td_m = NULL;
682 }
683 }
684
685 /* and actually free them */
686 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
687 struct dwc_gmac_tx_data *data = &ring->t_data[i];
688
689 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
690 }
691 }
692
693 static void
694 dwc_gmac_miibus_statchg(struct ifnet *ifp)
695 {
696 struct dwc_gmac_softc * const sc = ifp->if_softc;
697 struct mii_data * const mii = &sc->sc_mii;
698 uint32_t conf, flow;
699
700 /*
701 * Set MII or GMII interface based on the speed
702 * negotiated by the PHY.
703 */
704 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
705 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
706 |AWIN_GMAC_MAC_CONF_FULLDPLX);
707 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
708 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
709 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
710 | AWIN_GMAC_MAC_CONF_ACS
711 | AWIN_GMAC_MAC_CONF_RXENABLE
712 | AWIN_GMAC_MAC_CONF_TXENABLE;
713 switch (IFM_SUBTYPE(mii->mii_media_active)) {
714 case IFM_10_T:
715 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
716 break;
717 case IFM_100_TX:
718 conf |= AWIN_GMAC_MAC_CONF_FES100 |
719 AWIN_GMAC_MAC_CONF_MIISEL;
720 break;
721 case IFM_1000_T:
722 break;
723 }
724
725 flow = 0;
726 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
727 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
728 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
729 }
730 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
731 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
732 }
733 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
734 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
735 }
736 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
737 AWIN_GMAC_MAC_FLOWCTRL, flow);
738
739 #ifdef DWC_GMAC_DEBUG
740 aprint_normal_dev(sc->sc_dev,
741 "setting MAC conf register: %08x\n", conf);
742 #endif
743
744 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
745 AWIN_GMAC_MAC_CONF, conf);
746 }
747
748 static int
749 dwc_gmac_init(struct ifnet *ifp)
750 {
751 struct dwc_gmac_softc *sc = ifp->if_softc;
752
753 mutex_enter(sc->sc_lock);
754 int ret = dwc_gmac_init_locked(ifp);
755 mutex_exit(sc->sc_lock);
756
757 return ret;
758 }
759
760 static int
761 dwc_gmac_init_locked(struct ifnet *ifp)
762 {
763 struct dwc_gmac_softc *sc = ifp->if_softc;
764 uint32_t ffilt;
765
766 if (ifp->if_flags & IFF_RUNNING)
767 return 0;
768
769 dwc_gmac_stop_locked(ifp, 0);
770
771 /*
772 * Configure DMA burst/transfer mode and RX/TX priorities.
773 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
774 */
775 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
776 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
777 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
778 __SHIFTIN(2, GMAC_BUSMODE_PBL));
779
780 /*
781 * Set up address filter
782 */
783 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
784 if (ifp->if_flags & IFF_PROMISC) {
785 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
786 } else {
787 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
788 }
789 if (ifp->if_flags & IFF_BROADCAST) {
790 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
791 } else {
792 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
793 }
794 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
795
796 /*
797 * Set up multicast filter
798 */
799 dwc_gmac_setmulti(sc);
800
801 /*
802 * Set up dma pointer for RX and TX ring
803 */
804 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
805 sc->sc_rxq.r_physaddr);
806 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
807 sc->sc_txq.t_physaddr);
808
809 /*
810 * Start RX/TX part
811 */
812 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
813 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
814 GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
815
816 sc->sc_stopping = false;
817
818 ifp->if_flags |= IFF_RUNNING;
819 ifp->if_flags &= ~IFF_OACTIVE;
820
821 return 0;
822 }
823
824 static void
825 dwc_gmac_start(struct ifnet *ifp)
826 {
827 struct dwc_gmac_softc *sc = ifp->if_softc;
828 KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
829
830 mutex_enter(sc->sc_lock);
831 if (!sc->sc_stopping) {
832 mutex_enter(&sc->sc_txq.t_mtx);
833 dwc_gmac_start_locked(ifp);
834 mutex_exit(&sc->sc_txq.t_mtx);
835 }
836 mutex_exit(sc->sc_lock);
837 }
838
839 static void
840 dwc_gmac_start_locked(struct ifnet *ifp)
841 {
842 struct dwc_gmac_softc *sc = ifp->if_softc;
843 int old = sc->sc_txq.t_queued;
844 int start = sc->sc_txq.t_cur;
845 struct mbuf *m0;
846
847 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
848 return;
849
850 for (;;) {
851 IFQ_POLL(&ifp->if_snd, m0);
852 if (m0 == NULL)
853 break;
854 if (dwc_gmac_queue(sc, m0) != 0) {
855 ifp->if_flags |= IFF_OACTIVE;
856 break;
857 }
858 IFQ_DEQUEUE(&ifp->if_snd, m0);
859 bpf_mtap(ifp, m0);
860 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
861 ifp->if_flags |= IFF_OACTIVE;
862 break;
863 }
864 }
865
866 if (sc->sc_txq.t_queued != old) {
867 /* packets have been queued, kick it off */
868 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
869 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
870
871 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
872 AWIN_GMAC_DMA_TXPOLL, ~0U);
873 #ifdef DWC_GMAC_DEBUG
874 dwc_dump_status(sc);
875 #endif
876 }
877 }
878
879 static void
880 dwc_gmac_stop(struct ifnet *ifp, int disable)
881 {
882 struct dwc_gmac_softc *sc = ifp->if_softc;
883
884 mutex_enter(sc->sc_lock);
885 dwc_gmac_stop_locked(ifp, disable);
886 mutex_exit(sc->sc_lock);
887 }
888
889 static void
890 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
891 {
892 struct dwc_gmac_softc *sc = ifp->if_softc;
893
894 sc->sc_stopping = true;
895
896 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
897 AWIN_GMAC_DMA_OPMODE,
898 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
899 AWIN_GMAC_DMA_OPMODE)
900 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
901 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
902 AWIN_GMAC_DMA_OPMODE,
903 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
904 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
905
906 mii_down(&sc->sc_mii);
907 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
908 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
909 }
910
911 /*
912 * Add m0 to the TX ring
913 */
914 static int
915 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
916 {
917 struct dwc_gmac_dev_dmadesc *desc = NULL;
918 struct dwc_gmac_tx_data *data = NULL;
919 bus_dmamap_t map;
920 uint32_t flags, len, status;
921 int error, i, first;
922
923 #ifdef DWC_GMAC_DEBUG
924 aprint_normal_dev(sc->sc_dev,
925 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
926 #endif
927
928 first = sc->sc_txq.t_cur;
929 map = sc->sc_txq.t_data[first].td_map;
930
931 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
932 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
933 if (error != 0) {
934 aprint_error_dev(sc->sc_dev, "could not map mbuf "
935 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
936 return error;
937 }
938
939 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
940 bus_dmamap_unload(sc->sc_dmat, map);
941 return ENOBUFS;
942 }
943
944 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
945 status = 0;
946 for (i = 0; i < map->dm_nsegs; i++) {
947 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
948 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
949
950 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
951 len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
952
953 #ifdef DWC_GMAC_DEBUG
954 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
955 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
956 (unsigned long)map->dm_segs[i].ds_addr,
957 (unsigned long)map->dm_segs[i].ds_len,
958 flags, len);
959 #endif
960
961 desc->ddesc_cntl = htole32(len|flags);
962 flags &= ~DDESC_CNTL_TXFIRST;
963
964 /*
965 * Defer passing ownership of the first descriptor
966 * until we are done.
967 */
968 desc->ddesc_status = htole32(status);
969 status |= DDESC_STATUS_OWNEDBYDEV;
970
971 sc->sc_txq.t_queued++;
972 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
973 }
974
975 desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
976
977 data->td_m = m0;
978 data->td_active = map;
979
980 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
981 BUS_DMASYNC_PREWRITE);
982
983 /* Pass first to device */
984 sc->sc_txq.t_desc[first].ddesc_status =
985 htole32(DDESC_STATUS_OWNEDBYDEV);
986
987 return 0;
988 }
989
990 /*
991 * If the interface is up and running, only modify the receive
992 * filter when setting promiscuous or debug mode. Otherwise fall
993 * through to ether_ioctl, which will reset the chip.
994 */
995 static int
996 dwc_gmac_ifflags_cb(struct ethercom *ec)
997 {
998 struct ifnet *ifp = &ec->ec_if;
999 struct dwc_gmac_softc *sc = ifp->if_softc;
1000 int ret = 0;
1001
1002 mutex_enter(sc->sc_lock);
1003 int change = ifp->if_flags ^ sc->sc_if_flags;
1004 sc->sc_if_flags = ifp->if_flags;
1005
1006 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1007 ret = ENETRESET;
1008 goto out;
1009 }
1010 if ((change & IFF_PROMISC) != 0) {
1011 dwc_gmac_setmulti(sc);
1012 }
1013 out:
1014 mutex_exit(sc->sc_lock);
1015
1016 return ret;
1017 }
1018
1019 static int
1020 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1021 {
1022 struct dwc_gmac_softc *sc = ifp->if_softc;
1023 int error = 0;
1024
1025 int s = splnet();
1026 error = ether_ioctl(ifp, cmd, data);
1027
1028 #ifdef DWCGMAC_MPSAFE
1029 splx(s);
1030 #endif
1031
1032 if (error == ENETRESET) {
1033 error = 0;
1034 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1035 ;
1036 else if (ifp->if_flags & IFF_RUNNING) {
1037 /*
1038 * Multicast list has changed; set the hardware filter
1039 * accordingly.
1040 */
1041 mutex_enter(sc->sc_lock);
1042 dwc_gmac_setmulti(sc);
1043 mutex_exit(sc->sc_lock);
1044 }
1045 }
1046
1047 /* Try to get things going again */
1048 if (ifp->if_flags & IFF_UP)
1049 dwc_gmac_start(ifp);
1050 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1051
1052 #ifndef DWCGMAC_MPSAFE
1053 splx(s);
1054 #endif
1055
1056 return error;
1057 }
1058
1059 static void
1060 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1061 {
1062 struct ifnet *ifp = &sc->sc_ec.ec_if;
1063 struct dwc_gmac_tx_data *data;
1064 struct dwc_gmac_dev_dmadesc *desc;
1065 uint32_t status;
1066 int i, nsegs;
1067
1068 mutex_enter(&sc->sc_txq.t_mtx);
1069
1070 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1071 #ifdef DWC_GMAC_DEBUG
1072 aprint_normal_dev(sc->sc_dev,
1073 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1074 i, sc->sc_txq.t_queued);
1075 #endif
1076
1077 /*
1078 * i+1 does not need to be a valid descriptor,
1079 * this is just a special notion to just sync
1080 * a single tx descriptor (i)
1081 */
1082 dwc_gmac_txdesc_sync(sc, i, i+1,
1083 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1084
1085 desc = &sc->sc_txq.t_desc[i];
1086 status = le32toh(desc->ddesc_status);
1087 if (status & DDESC_STATUS_OWNEDBYDEV)
1088 break;
1089
1090 data = &sc->sc_txq.t_data[i];
1091 if (data->td_m == NULL)
1092 continue;
1093
1094 ifp->if_opackets++;
1095 nsegs = data->td_active->dm_nsegs;
1096 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1097 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1098 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1099
1100 #ifdef DWC_GMAC_DEBUG
1101 aprint_normal_dev(sc->sc_dev,
1102 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1103 "freeing mbuf %p\n", i, data->td_m);
1104 #endif
1105
1106 m_freem(data->td_m);
1107 data->td_m = NULL;
1108
1109 sc->sc_txq.t_queued -= nsegs;
1110 }
1111
1112 sc->sc_txq.t_next = i;
1113
1114 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1115 ifp->if_flags &= ~IFF_OACTIVE;
1116 }
1117 mutex_exit(&sc->sc_txq.t_mtx);
1118 }
1119
1120 static void
1121 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1122 {
1123 struct ifnet *ifp = &sc->sc_ec.ec_if;
1124 struct dwc_gmac_dev_dmadesc *desc;
1125 struct dwc_gmac_rx_data *data;
1126 bus_addr_t physaddr;
1127 uint32_t status;
1128 struct mbuf *m, *mnew;
1129 int i, len, error;
1130
1131 mutex_enter(&sc->sc_rxq.r_mtx);
1132 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1133 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1134 RX_DESC_OFFSET(i), sizeof(*desc),
1135 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1136 desc = &sc->sc_rxq.r_desc[i];
1137 data = &sc->sc_rxq.r_data[i];
1138
1139 status = le32toh(desc->ddesc_status);
1140 if (status & DDESC_STATUS_OWNEDBYDEV)
1141 break;
1142
1143 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1144 #ifdef DWC_GMAC_DEBUG
1145 aprint_normal_dev(sc->sc_dev,
1146 "RX error: descriptor status %08x, skipping\n",
1147 status);
1148 #endif
1149 ifp->if_ierrors++;
1150 goto skip;
1151 }
1152
1153 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1154
1155 #ifdef DWC_GMAC_DEBUG
1156 aprint_normal_dev(sc->sc_dev,
1157 "rx int: device is done with descriptor #%d, len: %d\n",
1158 i, len);
1159 #endif
1160
1161 /*
1162 * Try to get a new mbuf before passing this one
1163 * up, if that fails, drop the packet and reuse
1164 * the existing one.
1165 */
1166 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1167 if (mnew == NULL) {
1168 ifp->if_ierrors++;
1169 goto skip;
1170 }
1171 MCLGET(mnew, M_DONTWAIT);
1172 if ((mnew->m_flags & M_EXT) == 0) {
1173 m_freem(mnew);
1174 ifp->if_ierrors++;
1175 goto skip;
1176 }
1177
1178 /* unload old DMA map */
1179 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1180 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1181 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1182
1183 /* and reload with new mbuf */
1184 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1185 mtod(mnew, void*), MCLBYTES, NULL,
1186 BUS_DMA_READ | BUS_DMA_NOWAIT);
1187 if (error != 0) {
1188 m_freem(mnew);
1189 /* try to reload old mbuf */
1190 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1191 mtod(data->rd_m, void*), MCLBYTES, NULL,
1192 BUS_DMA_READ | BUS_DMA_NOWAIT);
1193 if (error != 0) {
1194 panic("%s: could not load old rx mbuf",
1195 device_xname(sc->sc_dev));
1196 }
1197 ifp->if_ierrors++;
1198 goto skip;
1199 }
1200 physaddr = data->rd_map->dm_segs[0].ds_addr;
1201
1202 /*
1203 * New mbuf loaded, update RX ring and continue
1204 */
1205 m = data->rd_m;
1206 data->rd_m = mnew;
1207 desc->ddesc_data = htole32(physaddr);
1208
1209 /* finalize mbuf */
1210 m->m_pkthdr.len = m->m_len = len;
1211 m_set_rcvif(m, ifp);
1212 m->m_flags |= M_HASFCS;
1213
1214 if_percpuq_enqueue(sc->sc_ipq, m);
1215
1216 skip:
1217 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1218 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1219 desc->ddesc_cntl = htole32(
1220 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1221 DDESC_CNTL_RXCHAIN);
1222 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1223 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1224 RX_DESC_OFFSET(i), sizeof(*desc),
1225 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1226 }
1227
1228 /* update RX pointer */
1229 sc->sc_rxq.r_cur = i;
1230
1231 mutex_exit(&sc->sc_rxq.r_mtx);
1232 }
1233
1234 /*
1235 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1236 */
1237 static uint32_t
1238 bitrev32(uint32_t x)
1239 {
1240 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1241 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1242 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1243 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1244
1245 return (x >> 16) | (x << 16);
1246 }
1247
1248 static void
1249 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1250 {
1251 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1252 struct ether_multi *enm;
1253 struct ether_multistep step;
1254 uint32_t hashes[2] = { 0, 0 };
1255 uint32_t ffilt, h;
1256 int mcnt;
1257
1258 KASSERT(mutex_owned(sc->sc_lock));
1259
1260 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1261
1262 if (ifp->if_flags & IFF_PROMISC) {
1263 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1264 goto special_filter;
1265 }
1266
1267 ifp->if_flags &= ~IFF_ALLMULTI;
1268 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1269
1270 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1271 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1272
1273 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1274 mcnt = 0;
1275 while (enm != NULL) {
1276 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1277 ETHER_ADDR_LEN) != 0) {
1278 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1279 ifp->if_flags |= IFF_ALLMULTI;
1280 goto special_filter;
1281 }
1282
1283 h = bitrev32(
1284 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1285 ) >> 26;
1286 hashes[h >> 5] |= (1 << (h & 0x1f));
1287
1288 mcnt++;
1289 ETHER_NEXT_MULTI(step, enm);
1290 }
1291
1292 if (mcnt)
1293 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1294 else
1295 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1296
1297 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1298 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1299 hashes[0]);
1300 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1301 hashes[1]);
1302 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1303
1304 #ifdef DWC_GMAC_DEBUG
1305 dwc_gmac_dump_ffilt(sc, ffilt);
1306 #endif
1307 return;
1308
1309 special_filter:
1310 #ifdef DWC_GMAC_DEBUG
1311 dwc_gmac_dump_ffilt(sc, ffilt);
1312 #endif
1313 /* no MAC hashes, ALLMULTI or PROMISC */
1314 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1315 ffilt);
1316 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1317 0xffffffff);
1318 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1319 0xffffffff);
1320 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1321 }
1322
1323 int
1324 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1325 {
1326 uint32_t status, dma_status;
1327 int rv = 0;
1328
1329 if (sc->sc_stopping)
1330 return 0;
1331
1332 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1333 if (status & AWIN_GMAC_MII_IRQ) {
1334 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1335 AWIN_GMAC_MII_STATUS);
1336 rv = 1;
1337 mii_pollstat(&sc->sc_mii);
1338 }
1339
1340 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1341 AWIN_GMAC_DMA_STATUS);
1342
1343 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1344 rv = 1;
1345
1346 if (dma_status & GMAC_DMA_INT_TIE)
1347 dwc_gmac_tx_intr(sc);
1348
1349 if (dma_status & GMAC_DMA_INT_RIE)
1350 dwc_gmac_rx_intr(sc);
1351
1352 /*
1353 * Check error conditions
1354 */
1355 if (dma_status & GMAC_DMA_INT_ERRORS) {
1356 sc->sc_ec.ec_if.if_oerrors++;
1357 #ifdef DWC_GMAC_DEBUG
1358 dwc_dump_and_abort(sc, "interrupt error condition");
1359 #endif
1360 }
1361
1362 /* ack interrupt */
1363 if (dma_status)
1364 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1365 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1366
1367 /*
1368 * Get more packets
1369 */
1370 if (rv)
1371 sc->sc_ec.ec_if.if_start(&sc->sc_ec.ec_if);
1372
1373 return rv;
1374 }
1375
1376 #ifdef DWC_GMAC_DEBUG
1377 static void
1378 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1379 {
1380 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1381 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1382 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1383 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1384 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1385 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1386 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1387 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1388 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1389 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1390 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1391 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1392 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1393 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1394 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1395 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1396 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1397 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1398 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1399 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1400 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1401 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1402 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1403 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1404 }
1405
1406 static void
1407 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1408 {
1409 int i;
1410
1411 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1412 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1413 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1414 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1415 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1416 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1417 "data: %08x next: %08x\n",
1418 i, sc->sc_txq.t_physaddr +
1419 i*sizeof(struct dwc_gmac_dev_dmadesc),
1420 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1421 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1422 }
1423 }
1424
1425 static void
1426 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1427 {
1428 int i;
1429
1430 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1431 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1432 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1433 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1434 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1435 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1436 "data: %08x next: %08x\n",
1437 i, sc->sc_rxq.r_physaddr +
1438 i*sizeof(struct dwc_gmac_dev_dmadesc),
1439 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1440 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1441 }
1442 }
1443
1444 static void
1445 dwc_dump_status(struct dwc_gmac_softc *sc)
1446 {
1447 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1448 AWIN_GMAC_MAC_INTR);
1449 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1450 AWIN_GMAC_DMA_STATUS);
1451 char buf[200];
1452
1453 /* print interrupt state */
1454 snprintb(buf, sizeof(buf), "\177\20"
1455 "b\x10""NI\0"
1456 "b\x0f""AI\0"
1457 "b\x0e""ER\0"
1458 "b\x0d""FB\0"
1459 "b\x0a""ET\0"
1460 "b\x09""RW\0"
1461 "b\x08""RS\0"
1462 "b\x07""RU\0"
1463 "b\x06""RI\0"
1464 "b\x05""UN\0"
1465 "b\x04""OV\0"
1466 "b\x03""TJ\0"
1467 "b\x02""TU\0"
1468 "b\x01""TS\0"
1469 "b\x00""TI\0"
1470 "\0", dma_status);
1471 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1472 status, buf);
1473 }
1474
1475 static void
1476 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1477 {
1478 dwc_dump_status(sc);
1479 dwc_gmac_dump_ffilt(sc,
1480 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1481 dwc_gmac_dump_dma(sc);
1482 dwc_gmac_dump_tx_desc(sc);
1483 dwc_gmac_dump_rx_desc(sc);
1484
1485 panic("%s", msg);
1486 }
1487
1488 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1489 {
1490 char buf[200];
1491
1492 /* print filter setup */
1493 snprintb(buf, sizeof(buf), "\177\20"
1494 "b\x1f""RA\0"
1495 "b\x0a""HPF\0"
1496 "b\x09""SAF\0"
1497 "b\x08""SAIF\0"
1498 "b\x05""DBF\0"
1499 "b\x04""PM\0"
1500 "b\x03""DAIF\0"
1501 "b\x02""HMC\0"
1502 "b\x01""HUC\0"
1503 "b\x00""PR\0"
1504 "\0", ffilt);
1505 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1506 }
1507 #endif
1508