dwc_gmac.c revision 1.22 1 /* $NetBSD: dwc_gmac.c,v 1.22 2014/10/26 17:39:16 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.22 2014/10/26 17:39:16 martin Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #include "opt_inet.h"
49
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56
57 #include <net/if.h>
58 #include <net/if_ether.h>
59 #include <net/if_media.h>
60 #include <net/bpf.h>
61 #ifdef INET
62 #include <netinet/if_inarp.h>
63 #endif
64
65 #include <dev/mii/miivar.h>
66
67 #include <dev/ic/dwc_gmac_reg.h>
68 #include <dev/ic/dwc_gmac_var.h>
69
70 static int dwc_gmac_miibus_read_reg(device_t, int, int);
71 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
72 static void dwc_gmac_miibus_statchg(struct ifnet *);
73
74 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
75 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
76 uint8_t enaddr[ETHER_ADDR_LEN]);
77 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
78 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
79 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
86 static int dwc_gmac_init(struct ifnet *ifp);
87 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
88 static void dwc_gmac_start(struct ifnet *ifp);
89 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
90 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
91 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
92 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
93 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
94 static int dwc_gmac_ifflags_cb(struct ethercom *);
95 static uint32_t bitrev32(uint32_t x);
96
97 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
98 *sizeof(struct dwc_gmac_dev_dmadesc))
99 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
100
101 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
102 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
103
104
105
106 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
107 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
108 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
109
110 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
111 GMAC_DMA_INT_FBE| \
112 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
113 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
114 GMAC_DMA_INT_TJE)
115
116 #define AWIN_DEF_MAC_INTRMASK \
117 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
118 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
119
120
121 #ifdef DWC_GMAC_DEBUG
122 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
123 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
124 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
125 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
126 static void dwc_dump_status(struct dwc_gmac_softc *sc);
127 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
128 #endif
129
130 void
131 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
132 {
133 uint8_t enaddr[ETHER_ADDR_LEN];
134 uint32_t maclo, machi;
135 struct mii_data * const mii = &sc->sc_mii;
136 struct ifnet * const ifp = &sc->sc_ec.ec_if;
137 prop_dictionary_t dict;
138 int s;
139
140 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
141 sc->sc_mii_clk = mii_clk & 7;
142
143 dict = device_properties(sc->sc_dev);
144 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
145 if (ea != NULL) {
146 /*
147 * If the MAC address is overriden by a device property,
148 * use that.
149 */
150 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
151 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
152 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
153 } else {
154 /*
155 * If we did not get an externaly configure address,
156 * try to read one from the current filter setup,
157 * before resetting the chip.
158 */
159 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
160 AWIN_GMAC_MAC_ADDR0LO);
161 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
162 AWIN_GMAC_MAC_ADDR0HI);
163
164 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
165 aprint_error_dev(sc->sc_dev,
166 "couldn't read MAC address\n");
167 return;
168 }
169
170 enaddr[0] = maclo & 0x0ff;
171 enaddr[1] = (maclo >> 8) & 0x0ff;
172 enaddr[2] = (maclo >> 16) & 0x0ff;
173 enaddr[3] = (maclo >> 24) & 0x0ff;
174 enaddr[4] = machi & 0x0ff;
175 enaddr[5] = (machi >> 8) & 0x0ff;
176 }
177
178 /*
179 * Init chip and do initial setup
180 */
181 if (dwc_gmac_reset(sc) != 0)
182 return; /* not much to cleanup, haven't attached yet */
183 dwc_gmac_write_hwaddr(sc, enaddr);
184 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
185 ether_sprintf(enaddr));
186
187 /*
188 * Allocate Tx and Rx rings
189 */
190 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
191 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
192 goto fail;
193 }
194
195 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
196 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
197 goto fail;
198 }
199
200 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
201 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
202 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
203 goto fail;
204 }
205
206 /*
207 * Prepare interface data
208 */
209 ifp->if_softc = sc;
210 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
211 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
212 ifp->if_ioctl = dwc_gmac_ioctl;
213 ifp->if_start = dwc_gmac_start;
214 ifp->if_init = dwc_gmac_init;
215 ifp->if_stop = dwc_gmac_stop;
216 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
217 IFQ_SET_READY(&ifp->if_snd);
218
219 /*
220 * Attach MII subdevices
221 */
222 sc->sc_ec.ec_mii = &sc->sc_mii;
223 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
224 mii->mii_ifp = ifp;
225 mii->mii_readreg = dwc_gmac_miibus_read_reg;
226 mii->mii_writereg = dwc_gmac_miibus_write_reg;
227 mii->mii_statchg = dwc_gmac_miibus_statchg;
228 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
229
230 if (LIST_EMPTY(&mii->mii_phys)) {
231 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
232 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
233 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
234 } else {
235 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
236 }
237
238 /*
239 * Ready, attach interface
240 */
241 if_attach(ifp);
242 ether_ifattach(ifp, enaddr);
243 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
244
245 /*
246 * Enable interrupts
247 */
248 s = splnet();
249 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
250 AWIN_DEF_MAC_INTRMASK);
251 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
252 GMAC_DEF_DMA_INT_MASK);
253 splx(s);
254
255 return;
256
257 fail:
258 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
259 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
260 }
261
262
263
264 static int
265 dwc_gmac_reset(struct dwc_gmac_softc *sc)
266 {
267 size_t cnt;
268 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
269 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
270 for (cnt = 0; cnt < 3000; cnt++) {
271 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
272 & GMAC_BUSMODE_RESET) == 0)
273 return 0;
274 delay(10);
275 }
276
277 aprint_error_dev(sc->sc_dev, "reset timed out\n");
278 return EIO;
279 }
280
281 static void
282 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
283 uint8_t enaddr[ETHER_ADDR_LEN])
284 {
285 uint32_t lo, hi;
286
287 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
288 | (enaddr[3] << 24);
289 hi = enaddr[4] | (enaddr[5] << 8);
290 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
291 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
292 }
293
294 static int
295 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
296 {
297 struct dwc_gmac_softc * const sc = device_private(self);
298 uint16_t mii;
299 size_t cnt;
300 int rv = 0;
301
302 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
303 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
304 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
305 | GMAC_MII_BUSY;
306
307 mutex_enter(&sc->sc_mdio_lock);
308 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
309
310 for (cnt = 0; cnt < 1000; cnt++) {
311 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
312 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
313 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
314 AWIN_GMAC_MAC_MIIDATA);
315 break;
316 }
317 delay(10);
318 }
319
320 mutex_exit(&sc->sc_mdio_lock);
321
322 return rv;
323 }
324
325 static void
326 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
327 {
328 struct dwc_gmac_softc * const sc = device_private(self);
329 uint16_t mii;
330 size_t cnt;
331
332 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
333 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
334 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
335 | GMAC_MII_BUSY | GMAC_MII_WRITE;
336
337 mutex_enter(&sc->sc_mdio_lock);
338 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
339 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
340
341 for (cnt = 0; cnt < 1000; cnt++) {
342 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
343 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
344 break;
345 delay(10);
346 }
347
348 mutex_exit(&sc->sc_mdio_lock);
349 }
350
351 static int
352 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
353 struct dwc_gmac_rx_ring *ring)
354 {
355 struct dwc_gmac_rx_data *data;
356 bus_addr_t physaddr;
357 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
358 int error, i, next;
359
360 ring->r_cur = ring->r_next = 0;
361 memset(ring->r_desc, 0, descsize);
362
363 /*
364 * Pre-allocate Rx buffers and populate Rx ring.
365 */
366 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
367 struct dwc_gmac_dev_dmadesc *desc;
368
369 data = &sc->sc_rxq.r_data[i];
370
371 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
372 if (data->rd_m == NULL) {
373 aprint_error_dev(sc->sc_dev,
374 "could not allocate rx mbuf #%d\n", i);
375 error = ENOMEM;
376 goto fail;
377 }
378 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
379 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
380 if (error != 0) {
381 aprint_error_dev(sc->sc_dev,
382 "could not create DMA map\n");
383 data->rd_map = NULL;
384 goto fail;
385 }
386 MCLGET(data->rd_m, M_DONTWAIT);
387 if (!(data->rd_m->m_flags & M_EXT)) {
388 aprint_error_dev(sc->sc_dev,
389 "could not allocate mbuf cluster #%d\n", i);
390 error = ENOMEM;
391 goto fail;
392 }
393
394 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
395 mtod(data->rd_m, void *), MCLBYTES, NULL,
396 BUS_DMA_READ | BUS_DMA_NOWAIT);
397 if (error != 0) {
398 aprint_error_dev(sc->sc_dev,
399 "could not load rx buf DMA map #%d", i);
400 goto fail;
401 }
402 physaddr = data->rd_map->dm_segs[0].ds_addr;
403
404 desc = &sc->sc_rxq.r_desc[i];
405 desc->ddesc_data = htole32(physaddr);
406 next = RX_NEXT(i);
407 desc->ddesc_next = htole32(ring->r_physaddr
408 + next * sizeof(*desc));
409 desc->ddesc_cntl = htole32(
410 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
411 DDESC_CNTL_RXCHAIN);
412 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
413 }
414
415 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
416 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
417 BUS_DMASYNC_PREREAD);
418 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
419 ring->r_physaddr);
420
421 return 0;
422
423 fail:
424 dwc_gmac_free_rx_ring(sc, ring);
425 return error;
426 }
427
428 static void
429 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
430 struct dwc_gmac_rx_ring *ring)
431 {
432 struct dwc_gmac_dev_dmadesc *desc;
433 int i;
434
435 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
436 desc = &sc->sc_rxq.r_desc[i];
437 desc->ddesc_cntl = htole32(
438 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
439 DDESC_CNTL_RXCHAIN);
440 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
441 }
442
443 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
444 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
445 BUS_DMASYNC_PREWRITE);
446
447 ring->r_cur = ring->r_next = 0;
448 /* reset DMA address to start of ring */
449 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
450 sc->sc_rxq.r_physaddr);
451 }
452
453 static int
454 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
455 {
456 const size_t descsize = AWGE_TOTAL_RING_COUNT *
457 sizeof(struct dwc_gmac_dev_dmadesc);
458 int error, nsegs;
459 void *rings;
460
461 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
462 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
463 if (error != 0) {
464 aprint_error_dev(sc->sc_dev,
465 "could not create desc DMA map\n");
466 sc->sc_dma_ring_map = NULL;
467 goto fail;
468 }
469
470 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
471 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
472 if (error != 0) {
473 aprint_error_dev(sc->sc_dev,
474 "could not map DMA memory\n");
475 goto fail;
476 }
477
478 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
479 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
480 if (error != 0) {
481 aprint_error_dev(sc->sc_dev,
482 "could not allocate DMA memory\n");
483 goto fail;
484 }
485
486 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
487 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
488 if (error != 0) {
489 aprint_error_dev(sc->sc_dev,
490 "could not load desc DMA map\n");
491 goto fail;
492 }
493
494 /* give first AWGE_RX_RING_COUNT to the RX side */
495 sc->sc_rxq.r_desc = rings;
496 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
497
498 /* and next rings to the TX side */
499 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
500 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
501 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
502
503 return 0;
504
505 fail:
506 dwc_gmac_free_dma_rings(sc);
507 return error;
508 }
509
510 static void
511 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
512 {
513 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
514 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
515 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
516 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
517 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
518 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
519 }
520
521 static void
522 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
523 {
524 struct dwc_gmac_rx_data *data;
525 int i;
526
527 if (ring->r_desc == NULL)
528 return;
529
530
531 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
532 data = &ring->r_data[i];
533
534 if (data->rd_map != NULL) {
535 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
536 AWGE_RX_RING_COUNT
537 *sizeof(struct dwc_gmac_dev_dmadesc),
538 BUS_DMASYNC_POSTREAD);
539 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
540 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
541 }
542 if (data->rd_m != NULL)
543 m_freem(data->rd_m);
544 }
545 }
546
547 static int
548 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
549 struct dwc_gmac_tx_ring *ring)
550 {
551 int i, error = 0;
552
553 ring->t_queued = 0;
554 ring->t_cur = ring->t_next = 0;
555
556 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
557 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
558 TX_DESC_OFFSET(0),
559 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
560 BUS_DMASYNC_POSTWRITE);
561
562 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
563 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
564 AWGE_TX_RING_COUNT, MCLBYTES, 0,
565 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
566 &ring->t_data[i].td_map);
567 if (error != 0) {
568 aprint_error_dev(sc->sc_dev,
569 "could not create TX DMA map #%d\n", i);
570 ring->t_data[i].td_map = NULL;
571 goto fail;
572 }
573 ring->t_desc[i].ddesc_next = htole32(
574 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
575 *TX_NEXT(i));
576 }
577
578 return 0;
579
580 fail:
581 dwc_gmac_free_tx_ring(sc, ring);
582 return error;
583 }
584
585 static void
586 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
587 {
588 /* 'end' is pointing one descriptor beyound the last we want to sync */
589 if (end > start) {
590 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
591 TX_DESC_OFFSET(start),
592 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
593 ops);
594 return;
595 }
596 /* sync from 'start' to end of ring */
597 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
598 TX_DESC_OFFSET(start),
599 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
600 ops);
601 /* sync from start of ring to 'end' */
602 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
603 TX_DESC_OFFSET(0),
604 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
605 ops);
606 }
607
608 static void
609 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
610 struct dwc_gmac_tx_ring *ring)
611 {
612 int i;
613
614 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
615 struct dwc_gmac_tx_data *data = &ring->t_data[i];
616
617 if (data->td_m != NULL) {
618 bus_dmamap_sync(sc->sc_dmat, data->td_active,
619 0, data->td_active->dm_mapsize,
620 BUS_DMASYNC_POSTWRITE);
621 bus_dmamap_unload(sc->sc_dmat, data->td_active);
622 m_freem(data->td_m);
623 data->td_m = NULL;
624 }
625 }
626
627 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
628 TX_DESC_OFFSET(0),
629 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
630 BUS_DMASYNC_PREWRITE);
631 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
632 sc->sc_txq.t_physaddr);
633
634 ring->t_queued = 0;
635 ring->t_cur = ring->t_next = 0;
636 }
637
638 static void
639 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
640 struct dwc_gmac_tx_ring *ring)
641 {
642 int i;
643
644 /* unload the maps */
645 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
646 struct dwc_gmac_tx_data *data = &ring->t_data[i];
647
648 if (data->td_m != NULL) {
649 bus_dmamap_sync(sc->sc_dmat, data->td_active,
650 0, data->td_map->dm_mapsize,
651 BUS_DMASYNC_POSTWRITE);
652 bus_dmamap_unload(sc->sc_dmat, data->td_active);
653 m_freem(data->td_m);
654 data->td_m = NULL;
655 }
656 }
657
658 /* and actually free them */
659 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
660 struct dwc_gmac_tx_data *data = &ring->t_data[i];
661
662 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
663 }
664 }
665
666 static void
667 dwc_gmac_miibus_statchg(struct ifnet *ifp)
668 {
669 struct dwc_gmac_softc * const sc = ifp->if_softc;
670 struct mii_data * const mii = &sc->sc_mii;
671 uint32_t conf;
672
673 /*
674 * Set MII or GMII interface based on the speed
675 * negotiated by the PHY.
676 */
677 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
678 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
679 |AWIN_GMAC_MAC_CONF_FULLDPLX);
680 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
681 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
682 | AWIN_GMAC_MAC_CONF_RXENABLE
683 | AWIN_GMAC_MAC_CONF_TXENABLE;
684 switch (IFM_SUBTYPE(mii->mii_media_active)) {
685 case IFM_10_T:
686 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
687 break;
688 case IFM_100_TX:
689 conf |= AWIN_GMAC_MAC_CONF_FES100 |
690 AWIN_GMAC_MAC_CONF_MIISEL;
691 break;
692 case IFM_1000_T:
693 break;
694 }
695 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
696 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
697
698 #ifdef DWC_GMAC_DEBUG
699 aprint_normal_dev(sc->sc_dev,
700 "setting MAC conf register: %08x\n", conf);
701 #endif
702
703 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
704 AWIN_GMAC_MAC_CONF, conf);
705 }
706
707 static int
708 dwc_gmac_init(struct ifnet *ifp)
709 {
710 struct dwc_gmac_softc *sc = ifp->if_softc;
711 uint32_t ffilt;
712
713 if (ifp->if_flags & IFF_RUNNING)
714 return 0;
715
716 dwc_gmac_stop(ifp, 0);
717
718 /*
719 * Configure DMA burst/transfer mode and RX/TX priorities.
720 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
721 */
722 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
723 GMAC_BUSMODE_FIXEDBURST |
724 __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
725 __SHIFTIN(8, GMCA_BUSMODE_PBL));
726
727 /*
728 * Set up address filter
729 */
730 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
731 if (ifp->if_flags & IFF_PROMISC) {
732 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
733 } else {
734 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
735 }
736 if (ifp->if_flags & IFF_BROADCAST) {
737 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
738 } else {
739 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
740 }
741 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
742
743 /*
744 * Set up multicast filter
745 */
746 dwc_gmac_setmulti(sc);
747
748 /*
749 * Set up dma pointer for RX and TX ring
750 */
751 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
752 sc->sc_rxq.r_physaddr);
753 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
754 sc->sc_txq.t_physaddr);
755
756 /*
757 * Start RX/TX part
758 */
759 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
760 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
761 GMAC_DMA_OP_STOREFORWARD);
762
763 ifp->if_flags |= IFF_RUNNING;
764 ifp->if_flags &= ~IFF_OACTIVE;
765
766 return 0;
767 }
768
769 static void
770 dwc_gmac_start(struct ifnet *ifp)
771 {
772 struct dwc_gmac_softc *sc = ifp->if_softc;
773 int old = sc->sc_txq.t_queued;
774 struct mbuf *m0;
775
776 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
777 return;
778
779 for (;;) {
780 IFQ_POLL(&ifp->if_snd, m0);
781 if (m0 == NULL)
782 break;
783 if (dwc_gmac_queue(sc, m0) != 0) {
784 ifp->if_flags |= IFF_OACTIVE;
785 break;
786 }
787 IFQ_DEQUEUE(&ifp->if_snd, m0);
788 bpf_mtap(ifp, m0);
789 }
790
791 if (sc->sc_txq.t_queued != old) {
792 /* packets have been queued, kick it off */
793 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
794 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
795
796 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
797 AWIN_GMAC_DMA_TXPOLL, ~0U);
798 #ifdef DWC_GMAC_DEBUG
799 dwc_dump_status(sc);
800 #endif
801 }
802 }
803
804 static void
805 dwc_gmac_stop(struct ifnet *ifp, int disable)
806 {
807 struct dwc_gmac_softc *sc = ifp->if_softc;
808
809 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
810 AWIN_GMAC_DMA_OPMODE,
811 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
812 AWIN_GMAC_DMA_OPMODE)
813 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
814 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
815 AWIN_GMAC_DMA_OPMODE,
816 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
817 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
818
819 mii_down(&sc->sc_mii);
820 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
821 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
822 }
823
824 /*
825 * Add m0 to the TX ring
826 */
827 static int
828 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
829 {
830 struct dwc_gmac_dev_dmadesc *desc = NULL;
831 struct dwc_gmac_tx_data *data = NULL;
832 bus_dmamap_t map;
833 uint32_t flags, len;
834 int error, i, first;
835
836 #ifdef DWC_GMAC_DEBUG
837 aprint_normal_dev(sc->sc_dev,
838 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
839 #endif
840
841 first = sc->sc_txq.t_cur;
842 map = sc->sc_txq.t_data[first].td_map;
843 flags = 0;
844
845 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
846 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
847 if (error != 0) {
848 aprint_error_dev(sc->sc_dev, "could not map mbuf "
849 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
850 return error;
851 }
852
853 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
854 bus_dmamap_unload(sc->sc_dmat, map);
855 return ENOBUFS;
856 }
857
858 data = NULL;
859 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
860 for (i = 0; i < map->dm_nsegs; i++) {
861 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
862 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
863
864 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
865 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
866 if (i == map->dm_nsegs-1)
867 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
868
869 #ifdef DWC_GMAC_DEBUG
870 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
871 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
872 (unsigned long)map->dm_segs[i].ds_addr,
873 (unsigned long)map->dm_segs[i].ds_len,
874 flags, len);
875 #endif
876
877 desc->ddesc_cntl = htole32(len|flags);
878 flags &= ~DDESC_CNTL_TXFIRST;
879
880 /*
881 * Defer passing ownership of the first descriptor
882 * untill we are done.
883 */
884 if (i)
885 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
886
887 sc->sc_txq.t_queued++;
888 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
889 }
890
891 /* Pass first to device */
892 sc->sc_txq.t_desc[first].ddesc_status
893 = htole32(DDESC_STATUS_OWNEDBYDEV);
894
895 data->td_m = m0;
896 data->td_active = map;
897
898 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
899 BUS_DMASYNC_PREWRITE);
900
901 return 0;
902 }
903
904 /*
905 * If the interface is up and running, only modify the receive
906 * filter when setting promiscuous or debug mode. Otherwise fall
907 * through to ether_ioctl, which will reset the chip.
908 */
909 static int
910 dwc_gmac_ifflags_cb(struct ethercom *ec)
911 {
912 struct ifnet *ifp = &ec->ec_if;
913 struct dwc_gmac_softc *sc = ifp->if_softc;
914 int change = ifp->if_flags ^ sc->sc_if_flags;
915
916 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
917 return ENETRESET;
918 if ((change & IFF_PROMISC) != 0)
919 dwc_gmac_setmulti(sc);
920 return 0;
921 }
922
923 static int
924 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
925 {
926 struct dwc_gmac_softc *sc = ifp->if_softc;
927 int s, error = 0;
928
929 s = splnet();
930
931 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
932 error = 0;
933 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
934 ;
935 else if (ifp->if_flags & IFF_RUNNING) {
936 /*
937 * Multicast list has changed; set the hardware filter
938 * accordingly.
939 */
940 dwc_gmac_setmulti(sc);
941 }
942 }
943
944 /* Try to get things going again */
945 if (ifp->if_flags & IFF_UP)
946 dwc_gmac_start(ifp);
947 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
948 splx(s);
949 return error;
950 }
951
952 static void
953 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
954 {
955 struct dwc_gmac_tx_data *data;
956 struct dwc_gmac_dev_dmadesc *desc;
957 uint32_t flags;
958 int i;
959
960 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
961 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
962
963 #ifdef DWC_GMAC_DEBUG
964 aprint_normal_dev(sc->sc_dev,
965 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
966 i, sc->sc_txq.t_queued);
967 #endif
968
969 desc = &sc->sc_txq.t_desc[i];
970 dwc_gmac_txdesc_sync(sc, i, i+1,
971 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
972 flags = le32toh(desc->ddesc_status);
973
974 if (flags & DDESC_STATUS_OWNEDBYDEV)
975 break;
976
977 data = &sc->sc_txq.t_data[i];
978 if (data->td_m == NULL)
979 continue;
980 sc->sc_ec.ec_if.if_opackets++;
981 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
982 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
983 bus_dmamap_unload(sc->sc_dmat, data->td_active);
984
985 #ifdef DWC_GMAC_DEBUG
986 aprint_normal_dev(sc->sc_dev,
987 "dwc_gmac_tx_intr: done with packet at desc #%d, "
988 "freeing mbuf %p\n", i, data->td_m);
989 #endif
990
991 m_freem(data->td_m);
992 data->td_m = NULL;
993 }
994
995 sc->sc_txq.t_next = i;
996
997 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
998 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
999 }
1000 }
1001
1002 static void
1003 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1004 {
1005 struct ifnet *ifp = &sc->sc_ec.ec_if;
1006 struct dwc_gmac_dev_dmadesc *desc;
1007 struct dwc_gmac_rx_data *data;
1008 bus_addr_t physaddr;
1009 uint32_t status;
1010 struct mbuf *m, *mnew;
1011 int i, len, error;
1012
1013 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1014 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1015 RX_DESC_OFFSET(i), sizeof(*desc),
1016 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1017 desc = &sc->sc_rxq.r_desc[i];
1018 data = &sc->sc_rxq.r_data[i];
1019
1020 status = le32toh(desc->ddesc_status);
1021 if (status & DDESC_STATUS_OWNEDBYDEV)
1022 break;
1023
1024 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1025 #ifdef DWC_GMAC_DEBUG
1026 aprint_normal_dev(sc->sc_dev,
1027 "RX error: descriptor status %08x, skipping\n",
1028 status);
1029 #endif
1030 ifp->if_ierrors++;
1031 goto skip;
1032 }
1033
1034 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1035
1036 #ifdef DWC_GMAC_DEBUG
1037 aprint_normal_dev(sc->sc_dev,
1038 "rx int: device is done with descriptor #%d, len: %d\n",
1039 i, len);
1040 #endif
1041
1042 /*
1043 * Try to get a new mbuf before passing this one
1044 * up, if that fails, drop the packet and reuse
1045 * the existing one.
1046 */
1047 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1048 if (mnew == NULL) {
1049 ifp->if_ierrors++;
1050 goto skip;
1051 }
1052 MCLGET(mnew, M_DONTWAIT);
1053 if ((mnew->m_flags & M_EXT) == 0) {
1054 m_freem(mnew);
1055 ifp->if_ierrors++;
1056 goto skip;
1057 }
1058
1059 /* unload old DMA map */
1060 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1061 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1062 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1063
1064 /* and reload with new mbuf */
1065 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1066 mtod(mnew, void*), MCLBYTES, NULL,
1067 BUS_DMA_READ | BUS_DMA_NOWAIT);
1068 if (error != 0) {
1069 m_freem(mnew);
1070 /* try to reload old mbuf */
1071 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1072 mtod(data->rd_m, void*), MCLBYTES, NULL,
1073 BUS_DMA_READ | BUS_DMA_NOWAIT);
1074 if (error != 0) {
1075 panic("%s: could not load old rx mbuf",
1076 device_xname(sc->sc_dev));
1077 }
1078 ifp->if_ierrors++;
1079 goto skip;
1080 }
1081 physaddr = data->rd_map->dm_segs[0].ds_addr;
1082
1083 /*
1084 * New mbuf loaded, update RX ring and continue
1085 */
1086 m = data->rd_m;
1087 data->rd_m = mnew;
1088 desc->ddesc_data = htole32(physaddr);
1089
1090 /* finalize mbuf */
1091 m->m_pkthdr.len = m->m_len = len;
1092 m->m_pkthdr.rcvif = ifp;
1093 m->m_flags |= M_HASFCS;
1094
1095 bpf_mtap(ifp, m);
1096 ifp->if_ipackets++;
1097 (*ifp->if_input)(ifp, m);
1098
1099 skip:
1100 desc->ddesc_cntl = htole32(
1101 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1102 DDESC_CNTL_RXCHAIN);
1103 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1104 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1105 RX_DESC_OFFSET(i), sizeof(*desc),
1106 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1107 }
1108
1109 /* update RX pointer */
1110 sc->sc_rxq.r_cur = i;
1111
1112 }
1113
1114 /*
1115 * Revers order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1116 */
1117 static uint32_t
1118 bitrev32(uint32_t x)
1119 {
1120 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1121 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1122 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1123 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1124
1125 return (x >> 16) | (x << 16);
1126 }
1127
1128 static void
1129 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1130 {
1131 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1132 struct ether_multi *enm;
1133 struct ether_multistep step;
1134 uint32_t hashes[2] = { 0, 0 };
1135 uint32_t ffilt, h;
1136 int mcnt, s;
1137
1138 s = splnet();
1139
1140 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1141
1142 if (ifp->if_flags & IFF_PROMISC) {
1143 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1144 goto special_filter;
1145 }
1146
1147 ifp->if_flags &= ~IFF_ALLMULTI;
1148 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1149
1150 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1151 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1152
1153 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1154 mcnt = 0;
1155 while (enm != NULL) {
1156 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1157 ETHER_ADDR_LEN) != 0) {
1158 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1159 ifp->if_flags |= IFF_ALLMULTI;
1160 goto special_filter;
1161 }
1162
1163 h = bitrev32(
1164 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1165 ) >> 26;
1166 hashes[h >> 5] |= (1 << (h & 0x1f));
1167
1168 mcnt++;
1169 ETHER_NEXT_MULTI(step, enm);
1170 }
1171
1172 if (mcnt)
1173 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1174 else
1175 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1176
1177 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1178 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1179 hashes[0]);
1180 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1181 hashes[1]);
1182 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1183
1184 splx(s);
1185
1186 #ifdef DWC_GMAC_DEBUG
1187 dwc_gmac_dump_ffilt(sc, ffilt);
1188 #endif
1189 return;
1190
1191 special_filter:
1192 #ifdef DWC_GMAC_DEBUG
1193 dwc_gmac_dump_ffilt(sc, ffilt);
1194 #endif
1195 /* no MAC hashes, ALLMULTI or PROMISC */
1196 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1197 ffilt);
1198 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1199 0xffffffff);
1200 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1201 0xffffffff);
1202 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1203 splx(s);
1204 }
1205
1206 int
1207 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1208 {
1209 uint32_t status, dma_status;
1210 int rv = 0;
1211
1212 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1213 if (status & AWIN_GMAC_MII_IRQ) {
1214 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1215 AWIN_GMAC_MII_STATUS);
1216 rv = 1;
1217 mii_pollstat(&sc->sc_mii);
1218 }
1219
1220 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1221 AWIN_GMAC_DMA_STATUS);
1222
1223 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1224 rv = 1;
1225
1226 if (dma_status & GMAC_DMA_INT_TIE)
1227 dwc_gmac_tx_intr(sc);
1228
1229 if (dma_status & GMAC_DMA_INT_RIE)
1230 dwc_gmac_rx_intr(sc);
1231
1232 /*
1233 * Check error conditions
1234 */
1235 if (dma_status & GMAC_DMA_INT_ERRORS) {
1236 sc->sc_ec.ec_if.if_oerrors++;
1237 #ifdef DWC_GMAC_DEBUG
1238 dwc_dump_and_abort(sc, "interrupt error condition");
1239 #endif
1240 }
1241
1242 /* ack interrupt */
1243 if (dma_status)
1244 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1245 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1246
1247 return rv;
1248 }
1249
1250 #ifdef DWC_GMAC_DEBUG
1251 static void
1252 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1253 {
1254 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1255 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1256 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1257 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1258 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1259 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1260 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1261 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1262 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1263 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1264 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1265 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1266 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1267 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1268 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1269 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1270 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1271 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1272 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1273 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1274 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1275 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1276 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1277 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1278 }
1279
1280 static void
1281 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1282 {
1283 int i;
1284
1285 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1286 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1287 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1288 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1289 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1290 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1291 "data: %08x next: %08x\n",
1292 i, sc->sc_txq.t_physaddr +
1293 i*sizeof(struct dwc_gmac_dev_dmadesc),
1294 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1295 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1296 }
1297 }
1298
1299 static void
1300 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1301 {
1302 int i;
1303
1304 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1305 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1306 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1307 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1308 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1309 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1310 "data: %08x next: %08x\n",
1311 i, sc->sc_rxq.r_physaddr +
1312 i*sizeof(struct dwc_gmac_dev_dmadesc),
1313 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1314 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1315 }
1316 }
1317
1318 static void
1319 dwc_dump_status(struct dwc_gmac_softc *sc)
1320 {
1321 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1322 AWIN_GMAC_MAC_INTR);
1323 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1324 AWIN_GMAC_DMA_STATUS);
1325 char buf[200];
1326
1327 /* print interrupt state */
1328 snprintb(buf, sizeof(buf), "\177\20"
1329 "b\x10""NI\0"
1330 "b\x0f""AI\0"
1331 "b\x0e""ER\0"
1332 "b\x0d""FB\0"
1333 "b\x0a""ET\0"
1334 "b\x09""RW\0"
1335 "b\x08""RS\0"
1336 "b\x07""RU\0"
1337 "b\x06""RI\0"
1338 "b\x05""UN\0"
1339 "b\x04""OV\0"
1340 "b\x03""TJ\0"
1341 "b\x02""TU\0"
1342 "b\x01""TS\0"
1343 "b\x00""TI\0"
1344 "\0", dma_status);
1345 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1346 status, buf);
1347 }
1348
1349 static void
1350 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1351 {
1352 dwc_dump_status(sc);
1353 dwc_gmac_dump_ffilt(sc,
1354 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1355 dwc_gmac_dump_dma(sc);
1356 dwc_gmac_dump_tx_desc(sc);
1357 dwc_gmac_dump_rx_desc(sc);
1358
1359 panic("%s", msg);
1360 }
1361
1362 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1363 {
1364 char buf[200];
1365
1366 /* print filter setup */
1367 snprintb(buf, sizeof(buf), "\177\20"
1368 "b\x1f""RA\0"
1369 "b\x0a""HPF\0"
1370 "b\x09""SAF\0"
1371 "b\x08""SAIF\0"
1372 "b\x05""DBF\0"
1373 "b\x04""PM\0"
1374 "b\x03""DAIF\0"
1375 "b\x02""HMC\0"
1376 "b\x01""HUC\0"
1377 "b\x00""PR\0"
1378 "\0", ffilt);
1379 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1380 }
1381 #endif
1382