dwc_gmac.c revision 1.24.2.5 1 /* $NetBSD: dwc_gmac.c,v 1.24.2.5 2015/01/07 21:12:04 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.24.2.5 2015/01/07 21:12:04 msaitoh Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #include "opt_inet.h"
49
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56 #include <sys/cprng.h>
57
58 #include <net/if.h>
59 #include <net/if_ether.h>
60 #include <net/if_media.h>
61 #include <net/bpf.h>
62 #ifdef INET
63 #include <netinet/if_inarp.h>
64 #endif
65
66 #include <dev/mii/miivar.h>
67
68 #include <dev/ic/dwc_gmac_reg.h>
69 #include <dev/ic/dwc_gmac_var.h>
70
71 static int dwc_gmac_miibus_read_reg(device_t, int, int);
72 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
73 static void dwc_gmac_miibus_statchg(struct ifnet *);
74
75 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
77 uint8_t enaddr[ETHER_ADDR_LEN]);
78 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
80 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
83 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
86 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
87 static int dwc_gmac_init(struct ifnet *ifp);
88 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
89 static void dwc_gmac_start(struct ifnet *ifp);
90 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
91 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
92 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
93 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
94 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
95 static int dwc_gmac_ifflags_cb(struct ethercom *);
96 static uint32_t bitrev32(uint32_t x);
97
98 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
99 *sizeof(struct dwc_gmac_dev_dmadesc))
100 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
101
102 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
103 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
104
105
106
107 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
108 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
109 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
110
111 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
112 GMAC_DMA_INT_FBE| \
113 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
114 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
115 GMAC_DMA_INT_TJE)
116
117 #define AWIN_DEF_MAC_INTRMASK \
118 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
119 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
120
121
122 #ifdef DWC_GMAC_DEBUG
123 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
124 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
125 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
126 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
127 static void dwc_dump_status(struct dwc_gmac_softc *sc);
128 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
129 #endif
130
131 void
132 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
133 {
134 uint8_t enaddr[ETHER_ADDR_LEN];
135 uint32_t maclo, machi;
136 struct mii_data * const mii = &sc->sc_mii;
137 struct ifnet * const ifp = &sc->sc_ec.ec_if;
138 prop_dictionary_t dict;
139 int s;
140
141 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
142 sc->sc_mii_clk = mii_clk & 7;
143
144 dict = device_properties(sc->sc_dev);
145 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
146 if (ea != NULL) {
147 /*
148 * If the MAC address is overriden by a device property,
149 * use that.
150 */
151 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
152 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
153 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
154 } else {
155 /*
156 * If we did not get an externaly configure address,
157 * try to read one from the current filter setup,
158 * before resetting the chip.
159 */
160 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
161 AWIN_GMAC_MAC_ADDR0LO);
162 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
163 AWIN_GMAC_MAC_ADDR0HI);
164
165 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
166 /* fake MAC address */
167 maclo = 0x00f2 | (cprng_strong32() << 16);
168 machi = cprng_strong32();
169 }
170
171 enaddr[0] = maclo & 0x0ff;
172 enaddr[1] = (maclo >> 8) & 0x0ff;
173 enaddr[2] = (maclo >> 16) & 0x0ff;
174 enaddr[3] = (maclo >> 24) & 0x0ff;
175 enaddr[4] = machi & 0x0ff;
176 enaddr[5] = (machi >> 8) & 0x0ff;
177 }
178
179 /*
180 * Init chip and do initial setup
181 */
182 if (dwc_gmac_reset(sc) != 0)
183 return; /* not much to cleanup, haven't attached yet */
184 dwc_gmac_write_hwaddr(sc, enaddr);
185 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
186 ether_sprintf(enaddr));
187
188 /*
189 * Allocate Tx and Rx rings
190 */
191 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
192 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
193 goto fail;
194 }
195
196 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
197 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
198 goto fail;
199 }
200
201 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
202 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
203 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
204 goto fail;
205 }
206
207 /*
208 * Prepare interface data
209 */
210 ifp->if_softc = sc;
211 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
212 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
213 ifp->if_ioctl = dwc_gmac_ioctl;
214 ifp->if_start = dwc_gmac_start;
215 ifp->if_init = dwc_gmac_init;
216 ifp->if_stop = dwc_gmac_stop;
217 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
218 IFQ_SET_READY(&ifp->if_snd);
219
220 /*
221 * Attach MII subdevices
222 */
223 sc->sc_ec.ec_mii = &sc->sc_mii;
224 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
225 mii->mii_ifp = ifp;
226 mii->mii_readreg = dwc_gmac_miibus_read_reg;
227 mii->mii_writereg = dwc_gmac_miibus_write_reg;
228 mii->mii_statchg = dwc_gmac_miibus_statchg;
229 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
230
231 if (LIST_EMPTY(&mii->mii_phys)) {
232 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
233 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
234 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
235 } else {
236 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
237 }
238
239 /*
240 * Ready, attach interface
241 */
242 if_attach(ifp);
243 ether_ifattach(ifp, enaddr);
244 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
245
246 /*
247 * Enable interrupts
248 */
249 s = splnet();
250 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
251 AWIN_DEF_MAC_INTRMASK);
252 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
253 GMAC_DEF_DMA_INT_MASK);
254 splx(s);
255
256 return;
257
258 fail:
259 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
260 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
261 }
262
263
264
265 static int
266 dwc_gmac_reset(struct dwc_gmac_softc *sc)
267 {
268 size_t cnt;
269 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
270 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
271 for (cnt = 0; cnt < 3000; cnt++) {
272 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
273 & GMAC_BUSMODE_RESET) == 0)
274 return 0;
275 delay(10);
276 }
277
278 aprint_error_dev(sc->sc_dev, "reset timed out\n");
279 return EIO;
280 }
281
282 static void
283 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
284 uint8_t enaddr[ETHER_ADDR_LEN])
285 {
286 uint32_t lo, hi;
287
288 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
289 | (enaddr[3] << 24);
290 hi = enaddr[4] | (enaddr[5] << 8);
291 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
292 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
293 }
294
295 static int
296 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
297 {
298 struct dwc_gmac_softc * const sc = device_private(self);
299 uint16_t mii;
300 size_t cnt;
301 int rv = 0;
302
303 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
304 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
305 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
306 | GMAC_MII_BUSY;
307
308 mutex_enter(&sc->sc_mdio_lock);
309 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
310
311 for (cnt = 0; cnt < 1000; cnt++) {
312 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
313 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
314 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
315 AWIN_GMAC_MAC_MIIDATA);
316 break;
317 }
318 delay(10);
319 }
320
321 mutex_exit(&sc->sc_mdio_lock);
322
323 return rv;
324 }
325
326 static void
327 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
328 {
329 struct dwc_gmac_softc * const sc = device_private(self);
330 uint16_t mii;
331 size_t cnt;
332
333 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
334 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
335 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
336 | GMAC_MII_BUSY | GMAC_MII_WRITE;
337
338 mutex_enter(&sc->sc_mdio_lock);
339 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
340 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
341
342 for (cnt = 0; cnt < 1000; cnt++) {
343 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
344 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
345 break;
346 delay(10);
347 }
348
349 mutex_exit(&sc->sc_mdio_lock);
350 }
351
352 static int
353 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
354 struct dwc_gmac_rx_ring *ring)
355 {
356 struct dwc_gmac_rx_data *data;
357 bus_addr_t physaddr;
358 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
359 int error, i, next;
360
361 ring->r_cur = ring->r_next = 0;
362 memset(ring->r_desc, 0, descsize);
363
364 /*
365 * Pre-allocate Rx buffers and populate Rx ring.
366 */
367 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
368 struct dwc_gmac_dev_dmadesc *desc;
369
370 data = &sc->sc_rxq.r_data[i];
371
372 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
373 if (data->rd_m == NULL) {
374 aprint_error_dev(sc->sc_dev,
375 "could not allocate rx mbuf #%d\n", i);
376 error = ENOMEM;
377 goto fail;
378 }
379 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
380 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
381 if (error != 0) {
382 aprint_error_dev(sc->sc_dev,
383 "could not create DMA map\n");
384 data->rd_map = NULL;
385 goto fail;
386 }
387 MCLGET(data->rd_m, M_DONTWAIT);
388 if (!(data->rd_m->m_flags & M_EXT)) {
389 aprint_error_dev(sc->sc_dev,
390 "could not allocate mbuf cluster #%d\n", i);
391 error = ENOMEM;
392 goto fail;
393 }
394
395 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
396 mtod(data->rd_m, void *), MCLBYTES, NULL,
397 BUS_DMA_READ | BUS_DMA_NOWAIT);
398 if (error != 0) {
399 aprint_error_dev(sc->sc_dev,
400 "could not load rx buf DMA map #%d", i);
401 goto fail;
402 }
403 physaddr = data->rd_map->dm_segs[0].ds_addr;
404
405 desc = &sc->sc_rxq.r_desc[i];
406 desc->ddesc_data = htole32(physaddr);
407 next = RX_NEXT(i);
408 desc->ddesc_next = htole32(ring->r_physaddr
409 + next * sizeof(*desc));
410 desc->ddesc_cntl = htole32(
411 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
412 DDESC_CNTL_RXCHAIN);
413 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
414 }
415
416 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
417 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
418 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
419 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
420 ring->r_physaddr);
421
422 return 0;
423
424 fail:
425 dwc_gmac_free_rx_ring(sc, ring);
426 return error;
427 }
428
429 static void
430 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
431 struct dwc_gmac_rx_ring *ring)
432 {
433 struct dwc_gmac_dev_dmadesc *desc;
434 int i;
435
436 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
437 desc = &sc->sc_rxq.r_desc[i];
438 desc->ddesc_cntl = htole32(
439 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
440 DDESC_CNTL_RXCHAIN);
441 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
442 }
443
444 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
445 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
446 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
447
448 ring->r_cur = ring->r_next = 0;
449 /* reset DMA address to start of ring */
450 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
451 sc->sc_rxq.r_physaddr);
452 }
453
454 static int
455 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
456 {
457 const size_t descsize = AWGE_TOTAL_RING_COUNT *
458 sizeof(struct dwc_gmac_dev_dmadesc);
459 int error, nsegs;
460 void *rings;
461
462 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
463 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
464 if (error != 0) {
465 aprint_error_dev(sc->sc_dev,
466 "could not create desc DMA map\n");
467 sc->sc_dma_ring_map = NULL;
468 goto fail;
469 }
470
471 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
472 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
473 if (error != 0) {
474 aprint_error_dev(sc->sc_dev,
475 "could not map DMA memory\n");
476 goto fail;
477 }
478
479 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
480 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
481 if (error != 0) {
482 aprint_error_dev(sc->sc_dev,
483 "could not allocate DMA memory\n");
484 goto fail;
485 }
486
487 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
488 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
489 if (error != 0) {
490 aprint_error_dev(sc->sc_dev,
491 "could not load desc DMA map\n");
492 goto fail;
493 }
494
495 /* give first AWGE_RX_RING_COUNT to the RX side */
496 sc->sc_rxq.r_desc = rings;
497 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
498
499 /* and next rings to the TX side */
500 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
501 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
502 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
503
504 return 0;
505
506 fail:
507 dwc_gmac_free_dma_rings(sc);
508 return error;
509 }
510
511 static void
512 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
513 {
514 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
515 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
516 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
517 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
518 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
519 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
520 }
521
522 static void
523 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
524 {
525 struct dwc_gmac_rx_data *data;
526 int i;
527
528 if (ring->r_desc == NULL)
529 return;
530
531
532 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
533 data = &ring->r_data[i];
534
535 if (data->rd_map != NULL) {
536 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
537 AWGE_RX_RING_COUNT
538 *sizeof(struct dwc_gmac_dev_dmadesc),
539 BUS_DMASYNC_POSTREAD);
540 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
541 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
542 }
543 if (data->rd_m != NULL)
544 m_freem(data->rd_m);
545 }
546 }
547
548 static int
549 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
550 struct dwc_gmac_tx_ring *ring)
551 {
552 int i, error = 0;
553
554 ring->t_queued = 0;
555 ring->t_cur = ring->t_next = 0;
556
557 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
558 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
559 TX_DESC_OFFSET(0),
560 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
561 BUS_DMASYNC_POSTWRITE);
562
563 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
564 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
565 AWGE_TX_RING_COUNT, MCLBYTES, 0,
566 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
567 &ring->t_data[i].td_map);
568 if (error != 0) {
569 aprint_error_dev(sc->sc_dev,
570 "could not create TX DMA map #%d\n", i);
571 ring->t_data[i].td_map = NULL;
572 goto fail;
573 }
574 ring->t_desc[i].ddesc_next = htole32(
575 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
576 *TX_NEXT(i));
577 }
578
579 return 0;
580
581 fail:
582 dwc_gmac_free_tx_ring(sc, ring);
583 return error;
584 }
585
586 static void
587 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
588 {
589 /* 'end' is pointing one descriptor beyound the last we want to sync */
590 if (end > start) {
591 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
592 TX_DESC_OFFSET(start),
593 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
594 ops);
595 return;
596 }
597 /* sync from 'start' to end of ring */
598 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
599 TX_DESC_OFFSET(start),
600 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
601 ops);
602 /* sync from start of ring to 'end' */
603 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
604 TX_DESC_OFFSET(0),
605 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
606 ops);
607 }
608
609 static void
610 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
611 struct dwc_gmac_tx_ring *ring)
612 {
613 int i;
614
615 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
616 struct dwc_gmac_tx_data *data = &ring->t_data[i];
617
618 if (data->td_m != NULL) {
619 bus_dmamap_sync(sc->sc_dmat, data->td_active,
620 0, data->td_active->dm_mapsize,
621 BUS_DMASYNC_POSTWRITE);
622 bus_dmamap_unload(sc->sc_dmat, data->td_active);
623 m_freem(data->td_m);
624 data->td_m = NULL;
625 }
626 }
627
628 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
629 TX_DESC_OFFSET(0),
630 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
631 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
632 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
633 sc->sc_txq.t_physaddr);
634
635 ring->t_queued = 0;
636 ring->t_cur = ring->t_next = 0;
637 }
638
639 static void
640 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
641 struct dwc_gmac_tx_ring *ring)
642 {
643 int i;
644
645 /* unload the maps */
646 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
647 struct dwc_gmac_tx_data *data = &ring->t_data[i];
648
649 if (data->td_m != NULL) {
650 bus_dmamap_sync(sc->sc_dmat, data->td_active,
651 0, data->td_map->dm_mapsize,
652 BUS_DMASYNC_POSTWRITE);
653 bus_dmamap_unload(sc->sc_dmat, data->td_active);
654 m_freem(data->td_m);
655 data->td_m = NULL;
656 }
657 }
658
659 /* and actually free them */
660 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
661 struct dwc_gmac_tx_data *data = &ring->t_data[i];
662
663 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
664 }
665 }
666
667 static void
668 dwc_gmac_miibus_statchg(struct ifnet *ifp)
669 {
670 struct dwc_gmac_softc * const sc = ifp->if_softc;
671 struct mii_data * const mii = &sc->sc_mii;
672 uint32_t conf;
673
674 /*
675 * Set MII or GMII interface based on the speed
676 * negotiated by the PHY.
677 */
678 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
679 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
680 |AWIN_GMAC_MAC_CONF_FULLDPLX);
681 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
682 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
683 | AWIN_GMAC_MAC_CONF_RXENABLE
684 | AWIN_GMAC_MAC_CONF_TXENABLE;
685 switch (IFM_SUBTYPE(mii->mii_media_active)) {
686 case IFM_10_T:
687 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
688 break;
689 case IFM_100_TX:
690 conf |= AWIN_GMAC_MAC_CONF_FES100 |
691 AWIN_GMAC_MAC_CONF_MIISEL;
692 break;
693 case IFM_1000_T:
694 break;
695 }
696 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
697 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
698
699 #ifdef DWC_GMAC_DEBUG
700 aprint_normal_dev(sc->sc_dev,
701 "setting MAC conf register: %08x\n", conf);
702 #endif
703
704 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
705 AWIN_GMAC_MAC_CONF, conf);
706 }
707
708 static int
709 dwc_gmac_init(struct ifnet *ifp)
710 {
711 struct dwc_gmac_softc *sc = ifp->if_softc;
712 uint32_t ffilt;
713
714 if (ifp->if_flags & IFF_RUNNING)
715 return 0;
716
717 dwc_gmac_stop(ifp, 0);
718
719 /*
720 * Configure DMA burst/transfer mode and RX/TX priorities.
721 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
722 */
723 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
724 GMAC_BUSMODE_FIXEDBURST |
725 __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
726 __SHIFTIN(8, GMCA_BUSMODE_PBL));
727
728 /*
729 * Set up address filter
730 */
731 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
732 if (ifp->if_flags & IFF_PROMISC) {
733 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
734 } else {
735 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
736 }
737 if (ifp->if_flags & IFF_BROADCAST) {
738 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
739 } else {
740 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
741 }
742 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
743
744 /*
745 * Set up multicast filter
746 */
747 dwc_gmac_setmulti(sc);
748
749 /*
750 * Set up dma pointer for RX and TX ring
751 */
752 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
753 sc->sc_rxq.r_physaddr);
754 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
755 sc->sc_txq.t_physaddr);
756
757 /*
758 * Start RX/TX part
759 */
760 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
761 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
762 GMAC_DMA_OP_STOREFORWARD);
763
764 ifp->if_flags |= IFF_RUNNING;
765 ifp->if_flags &= ~IFF_OACTIVE;
766
767 return 0;
768 }
769
770 static void
771 dwc_gmac_start(struct ifnet *ifp)
772 {
773 struct dwc_gmac_softc *sc = ifp->if_softc;
774 int old = sc->sc_txq.t_queued;
775 int start = sc->sc_txq.t_cur;
776 struct mbuf *m0;
777
778 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
779 return;
780
781 for (;;) {
782 IFQ_POLL(&ifp->if_snd, m0);
783 if (m0 == NULL)
784 break;
785 if (dwc_gmac_queue(sc, m0) != 0) {
786 ifp->if_flags |= IFF_OACTIVE;
787 break;
788 }
789 IFQ_DEQUEUE(&ifp->if_snd, m0);
790 bpf_mtap(ifp, m0);
791 }
792
793 if (sc->sc_txq.t_queued != old) {
794 /* packets have been queued, kick it off */
795 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
796 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
797
798 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
799 AWIN_GMAC_DMA_TXPOLL, ~0U);
800 #ifdef DWC_GMAC_DEBUG
801 dwc_dump_status(sc);
802 #endif
803 }
804 }
805
806 static void
807 dwc_gmac_stop(struct ifnet *ifp, int disable)
808 {
809 struct dwc_gmac_softc *sc = ifp->if_softc;
810
811 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
812 AWIN_GMAC_DMA_OPMODE,
813 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
814 AWIN_GMAC_DMA_OPMODE)
815 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
816 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
817 AWIN_GMAC_DMA_OPMODE,
818 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
819 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
820
821 mii_down(&sc->sc_mii);
822 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
823 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
824 }
825
826 /*
827 * Add m0 to the TX ring
828 */
829 static int
830 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
831 {
832 struct dwc_gmac_dev_dmadesc *desc = NULL;
833 struct dwc_gmac_tx_data *data = NULL;
834 bus_dmamap_t map;
835 uint32_t flags, len;
836 int error, i, first;
837
838 #ifdef DWC_GMAC_DEBUG
839 aprint_normal_dev(sc->sc_dev,
840 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
841 #endif
842
843 first = sc->sc_txq.t_cur;
844 map = sc->sc_txq.t_data[first].td_map;
845 flags = 0;
846
847 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
848 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
849 if (error != 0) {
850 aprint_error_dev(sc->sc_dev, "could not map mbuf "
851 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
852 return error;
853 }
854
855 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
856 bus_dmamap_unload(sc->sc_dmat, map);
857 return ENOBUFS;
858 }
859
860 data = NULL;
861 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
862 for (i = 0; i < map->dm_nsegs; i++) {
863 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
864 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
865
866 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
867 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
868 if (i == map->dm_nsegs-1)
869 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
870
871 #ifdef DWC_GMAC_DEBUG
872 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
873 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
874 (unsigned long)map->dm_segs[i].ds_addr,
875 (unsigned long)map->dm_segs[i].ds_len,
876 flags, len);
877 #endif
878
879 desc->ddesc_cntl = htole32(len|flags);
880 flags &= ~DDESC_CNTL_TXFIRST;
881
882 /*
883 * Defer passing ownership of the first descriptor
884 * until we are done.
885 */
886 if (i)
887 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
888
889 sc->sc_txq.t_queued++;
890 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
891 }
892
893 /* Pass first to device */
894 sc->sc_txq.t_desc[first].ddesc_status
895 = htole32(DDESC_STATUS_OWNEDBYDEV);
896
897 data->td_m = m0;
898 data->td_active = map;
899
900 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
901 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
902
903 return 0;
904 }
905
906 /*
907 * If the interface is up and running, only modify the receive
908 * filter when setting promiscuous or debug mode. Otherwise fall
909 * through to ether_ioctl, which will reset the chip.
910 */
911 static int
912 dwc_gmac_ifflags_cb(struct ethercom *ec)
913 {
914 struct ifnet *ifp = &ec->ec_if;
915 struct dwc_gmac_softc *sc = ifp->if_softc;
916 int change = ifp->if_flags ^ sc->sc_if_flags;
917
918 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
919 return ENETRESET;
920 if ((change & IFF_PROMISC) != 0)
921 dwc_gmac_setmulti(sc);
922 return 0;
923 }
924
925 static int
926 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
927 {
928 struct dwc_gmac_softc *sc = ifp->if_softc;
929 int s, error = 0;
930
931 s = splnet();
932
933 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
934 error = 0;
935 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
936 ;
937 else if (ifp->if_flags & IFF_RUNNING) {
938 /*
939 * Multicast list has changed; set the hardware filter
940 * accordingly.
941 */
942 dwc_gmac_setmulti(sc);
943 }
944 }
945
946 /* Try to get things going again */
947 if (ifp->if_flags & IFF_UP)
948 dwc_gmac_start(ifp);
949 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
950 splx(s);
951 return error;
952 }
953
954 static void
955 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
956 {
957 struct dwc_gmac_tx_data *data;
958 struct dwc_gmac_dev_dmadesc *desc;
959 uint32_t flags;
960 int i;
961
962 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
963 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
964
965 #ifdef DWC_GMAC_DEBUG
966 aprint_normal_dev(sc->sc_dev,
967 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
968 i, sc->sc_txq.t_queued);
969 #endif
970
971 desc = &sc->sc_txq.t_desc[i];
972 dwc_gmac_txdesc_sync(sc, i, i+1,
973 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
974 flags = le32toh(desc->ddesc_status);
975
976 if (flags & DDESC_STATUS_OWNEDBYDEV)
977 break;
978
979 data = &sc->sc_txq.t_data[i];
980 if (data->td_m == NULL)
981 continue;
982 sc->sc_ec.ec_if.if_opackets++;
983 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
984 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
985 bus_dmamap_unload(sc->sc_dmat, data->td_active);
986
987 #ifdef DWC_GMAC_DEBUG
988 aprint_normal_dev(sc->sc_dev,
989 "dwc_gmac_tx_intr: done with packet at desc #%d, "
990 "freeing mbuf %p\n", i, data->td_m);
991 #endif
992
993 m_freem(data->td_m);
994 data->td_m = NULL;
995 }
996
997 sc->sc_txq.t_next = i;
998
999 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1000 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
1001 }
1002 }
1003
1004 static void
1005 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1006 {
1007 struct ifnet *ifp = &sc->sc_ec.ec_if;
1008 struct dwc_gmac_dev_dmadesc *desc;
1009 struct dwc_gmac_rx_data *data;
1010 bus_addr_t physaddr;
1011 uint32_t status;
1012 struct mbuf *m, *mnew;
1013 int i, len, error;
1014
1015 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1016 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1017 RX_DESC_OFFSET(i), sizeof(*desc),
1018 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1019 desc = &sc->sc_rxq.r_desc[i];
1020 data = &sc->sc_rxq.r_data[i];
1021
1022 status = le32toh(desc->ddesc_status);
1023 if (status & DDESC_STATUS_OWNEDBYDEV)
1024 break;
1025
1026 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1027 #ifdef DWC_GMAC_DEBUG
1028 aprint_normal_dev(sc->sc_dev,
1029 "RX error: descriptor status %08x, skipping\n",
1030 status);
1031 #endif
1032 ifp->if_ierrors++;
1033 goto skip;
1034 }
1035
1036 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1037
1038 #ifdef DWC_GMAC_DEBUG
1039 aprint_normal_dev(sc->sc_dev,
1040 "rx int: device is done with descriptor #%d, len: %d\n",
1041 i, len);
1042 #endif
1043
1044 /*
1045 * Try to get a new mbuf before passing this one
1046 * up, if that fails, drop the packet and reuse
1047 * the existing one.
1048 */
1049 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1050 if (mnew == NULL) {
1051 ifp->if_ierrors++;
1052 goto skip;
1053 }
1054 MCLGET(mnew, M_DONTWAIT);
1055 if ((mnew->m_flags & M_EXT) == 0) {
1056 m_freem(mnew);
1057 ifp->if_ierrors++;
1058 goto skip;
1059 }
1060
1061 /* unload old DMA map */
1062 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1063 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1064 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1065
1066 /* and reload with new mbuf */
1067 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1068 mtod(mnew, void*), MCLBYTES, NULL,
1069 BUS_DMA_READ | BUS_DMA_NOWAIT);
1070 if (error != 0) {
1071 m_freem(mnew);
1072 /* try to reload old mbuf */
1073 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1074 mtod(data->rd_m, void*), MCLBYTES, NULL,
1075 BUS_DMA_READ | BUS_DMA_NOWAIT);
1076 if (error != 0) {
1077 panic("%s: could not load old rx mbuf",
1078 device_xname(sc->sc_dev));
1079 }
1080 ifp->if_ierrors++;
1081 goto skip;
1082 }
1083 physaddr = data->rd_map->dm_segs[0].ds_addr;
1084
1085 /*
1086 * New mbuf loaded, update RX ring and continue
1087 */
1088 m = data->rd_m;
1089 data->rd_m = mnew;
1090 desc->ddesc_data = htole32(physaddr);
1091
1092 /* finalize mbuf */
1093 m->m_pkthdr.len = m->m_len = len;
1094 m->m_pkthdr.rcvif = ifp;
1095 m->m_flags |= M_HASFCS;
1096
1097 bpf_mtap(ifp, m);
1098 ifp->if_ipackets++;
1099 (*ifp->if_input)(ifp, m);
1100
1101 skip:
1102 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1103 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1104 desc->ddesc_cntl = htole32(
1105 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1106 DDESC_CNTL_RXCHAIN);
1107 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1108 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1109 RX_DESC_OFFSET(i), sizeof(*desc),
1110 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1111 }
1112
1113 /* update RX pointer */
1114 sc->sc_rxq.r_cur = i;
1115
1116 }
1117
1118 /*
1119 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1120 */
1121 static uint32_t
1122 bitrev32(uint32_t x)
1123 {
1124 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1125 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1126 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1127 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1128
1129 return (x >> 16) | (x << 16);
1130 }
1131
1132 static void
1133 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1134 {
1135 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1136 struct ether_multi *enm;
1137 struct ether_multistep step;
1138 uint32_t hashes[2] = { 0, 0 };
1139 uint32_t ffilt, h;
1140 int mcnt, s;
1141
1142 s = splnet();
1143
1144 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1145
1146 if (ifp->if_flags & IFF_PROMISC) {
1147 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1148 goto special_filter;
1149 }
1150
1151 ifp->if_flags &= ~IFF_ALLMULTI;
1152 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1153
1154 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1155 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1156
1157 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1158 mcnt = 0;
1159 while (enm != NULL) {
1160 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1161 ETHER_ADDR_LEN) != 0) {
1162 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1163 ifp->if_flags |= IFF_ALLMULTI;
1164 goto special_filter;
1165 }
1166
1167 h = bitrev32(
1168 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1169 ) >> 26;
1170 hashes[h >> 5] |= (1 << (h & 0x1f));
1171
1172 mcnt++;
1173 ETHER_NEXT_MULTI(step, enm);
1174 }
1175
1176 if (mcnt)
1177 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1178 else
1179 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1180
1181 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1182 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1183 hashes[0]);
1184 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1185 hashes[1]);
1186 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1187
1188 splx(s);
1189
1190 #ifdef DWC_GMAC_DEBUG
1191 dwc_gmac_dump_ffilt(sc, ffilt);
1192 #endif
1193 return;
1194
1195 special_filter:
1196 #ifdef DWC_GMAC_DEBUG
1197 dwc_gmac_dump_ffilt(sc, ffilt);
1198 #endif
1199 /* no MAC hashes, ALLMULTI or PROMISC */
1200 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1201 ffilt);
1202 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1203 0xffffffff);
1204 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1205 0xffffffff);
1206 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1207 splx(s);
1208 }
1209
1210 int
1211 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1212 {
1213 uint32_t status, dma_status;
1214 int rv = 0;
1215
1216 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1217 if (status & AWIN_GMAC_MII_IRQ) {
1218 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1219 AWIN_GMAC_MII_STATUS);
1220 rv = 1;
1221 mii_pollstat(&sc->sc_mii);
1222 }
1223
1224 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1225 AWIN_GMAC_DMA_STATUS);
1226
1227 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1228 rv = 1;
1229
1230 if (dma_status & GMAC_DMA_INT_TIE)
1231 dwc_gmac_tx_intr(sc);
1232
1233 if (dma_status & GMAC_DMA_INT_RIE)
1234 dwc_gmac_rx_intr(sc);
1235
1236 /*
1237 * Check error conditions
1238 */
1239 if (dma_status & GMAC_DMA_INT_ERRORS) {
1240 sc->sc_ec.ec_if.if_oerrors++;
1241 #ifdef DWC_GMAC_DEBUG
1242 dwc_dump_and_abort(sc, "interrupt error condition");
1243 #endif
1244 }
1245
1246 /* ack interrupt */
1247 if (dma_status)
1248 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1249 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1250
1251 return rv;
1252 }
1253
1254 #ifdef DWC_GMAC_DEBUG
1255 static void
1256 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1257 {
1258 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1259 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1260 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1261 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1262 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1263 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1264 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1265 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1266 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1267 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1268 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1269 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1270 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1271 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1272 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1273 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1274 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1275 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1276 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1277 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1278 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1279 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1280 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1281 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1282 }
1283
1284 static void
1285 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1286 {
1287 int i;
1288
1289 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1290 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1291 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1292 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1293 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1294 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1295 "data: %08x next: %08x\n",
1296 i, sc->sc_txq.t_physaddr +
1297 i*sizeof(struct dwc_gmac_dev_dmadesc),
1298 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1299 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1300 }
1301 }
1302
1303 static void
1304 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1305 {
1306 int i;
1307
1308 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1309 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1310 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1311 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1312 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1313 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1314 "data: %08x next: %08x\n",
1315 i, sc->sc_rxq.r_physaddr +
1316 i*sizeof(struct dwc_gmac_dev_dmadesc),
1317 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1318 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1319 }
1320 }
1321
1322 static void
1323 dwc_dump_status(struct dwc_gmac_softc *sc)
1324 {
1325 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1326 AWIN_GMAC_MAC_INTR);
1327 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1328 AWIN_GMAC_DMA_STATUS);
1329 char buf[200];
1330
1331 /* print interrupt state */
1332 snprintb(buf, sizeof(buf), "\177\20"
1333 "b\x10""NI\0"
1334 "b\x0f""AI\0"
1335 "b\x0e""ER\0"
1336 "b\x0d""FB\0"
1337 "b\x0a""ET\0"
1338 "b\x09""RW\0"
1339 "b\x08""RS\0"
1340 "b\x07""RU\0"
1341 "b\x06""RI\0"
1342 "b\x05""UN\0"
1343 "b\x04""OV\0"
1344 "b\x03""TJ\0"
1345 "b\x02""TU\0"
1346 "b\x01""TS\0"
1347 "b\x00""TI\0"
1348 "\0", dma_status);
1349 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1350 status, buf);
1351 }
1352
1353 static void
1354 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1355 {
1356 dwc_dump_status(sc);
1357 dwc_gmac_dump_ffilt(sc,
1358 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1359 dwc_gmac_dump_dma(sc);
1360 dwc_gmac_dump_tx_desc(sc);
1361 dwc_gmac_dump_rx_desc(sc);
1362
1363 panic("%s", msg);
1364 }
1365
1366 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1367 {
1368 char buf[200];
1369
1370 /* print filter setup */
1371 snprintb(buf, sizeof(buf), "\177\20"
1372 "b\x1f""RA\0"
1373 "b\x0a""HPF\0"
1374 "b\x09""SAF\0"
1375 "b\x08""SAIF\0"
1376 "b\x05""DBF\0"
1377 "b\x04""PM\0"
1378 "b\x03""DAIF\0"
1379 "b\x02""HMC\0"
1380 "b\x01""HUC\0"
1381 "b\x00""PR\0"
1382 "\0", ffilt);
1383 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1384 }
1385 #endif
1386