dwc_gmac.c revision 1.26 1 /* $NetBSD: dwc_gmac.c,v 1.26 2014/11/23 11:08:16 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.26 2014/11/23 11:08:16 martin Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #include "opt_inet.h"
49
50 #include <sys/param.h>
51 #include <sys/bus.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56
57 #include <net/if.h>
58 #include <net/if_ether.h>
59 #include <net/if_media.h>
60 #include <net/bpf.h>
61 #ifdef INET
62 #include <netinet/if_inarp.h>
63 #endif
64
65 #include <dev/mii/miivar.h>
66
67 #include <dev/ic/dwc_gmac_reg.h>
68 #include <dev/ic/dwc_gmac_var.h>
69
70 static int dwc_gmac_miibus_read_reg(device_t, int, int);
71 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
72 static void dwc_gmac_miibus_statchg(struct ifnet *);
73
74 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
75 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
76 uint8_t enaddr[ETHER_ADDR_LEN]);
77 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
78 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
79 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
86 static int dwc_gmac_init(struct ifnet *ifp);
87 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
88 static void dwc_gmac_start(struct ifnet *ifp);
89 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
90 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
91 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
92 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
93 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
94 static int dwc_gmac_ifflags_cb(struct ethercom *);
95 static uint32_t bitrev32(uint32_t x);
96
97 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
98 *sizeof(struct dwc_gmac_dev_dmadesc))
99 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
100
101 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
102 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
103
104
105
106 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
107 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
108 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
109
110 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
111 GMAC_DMA_INT_FBE| \
112 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
113 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
114 GMAC_DMA_INT_TJE)
115
116 #define AWIN_DEF_MAC_INTRMASK \
117 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
118 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
119
120
121 #ifdef DWC_GMAC_DEBUG
122 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
123 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
124 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
125 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
126 static void dwc_dump_status(struct dwc_gmac_softc *sc);
127 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
128 #endif
129
130 void
131 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
132 {
133 uint8_t enaddr[ETHER_ADDR_LEN];
134 uint32_t maclo, machi;
135 struct mii_data * const mii = &sc->sc_mii;
136 struct ifnet * const ifp = &sc->sc_ec.ec_if;
137 prop_dictionary_t dict;
138 int s;
139
140 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
141 sc->sc_mii_clk = mii_clk & 7;
142
143 dict = device_properties(sc->sc_dev);
144 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
145 if (ea != NULL) {
146 /*
147 * If the MAC address is overriden by a device property,
148 * use that.
149 */
150 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
151 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
152 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
153 } else {
154 /*
155 * If we did not get an externaly configure address,
156 * try to read one from the current filter setup,
157 * before resetting the chip.
158 */
159 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
160 AWIN_GMAC_MAC_ADDR0LO);
161 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
162 AWIN_GMAC_MAC_ADDR0HI);
163
164 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
165 aprint_error_dev(sc->sc_dev,
166 "couldn't read MAC address\n");
167 return;
168 }
169
170 enaddr[0] = maclo & 0x0ff;
171 enaddr[1] = (maclo >> 8) & 0x0ff;
172 enaddr[2] = (maclo >> 16) & 0x0ff;
173 enaddr[3] = (maclo >> 24) & 0x0ff;
174 enaddr[4] = machi & 0x0ff;
175 enaddr[5] = (machi >> 8) & 0x0ff;
176 }
177
178 /*
179 * Init chip and do initial setup
180 */
181 if (dwc_gmac_reset(sc) != 0)
182 return; /* not much to cleanup, haven't attached yet */
183 dwc_gmac_write_hwaddr(sc, enaddr);
184 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
185 ether_sprintf(enaddr));
186
187 /*
188 * Allocate Tx and Rx rings
189 */
190 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
191 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
192 goto fail;
193 }
194
195 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
196 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
197 goto fail;
198 }
199
200 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
201 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
202 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
203 goto fail;
204 }
205
206 /*
207 * Prepare interface data
208 */
209 ifp->if_softc = sc;
210 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
211 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
212 ifp->if_ioctl = dwc_gmac_ioctl;
213 ifp->if_start = dwc_gmac_start;
214 ifp->if_init = dwc_gmac_init;
215 ifp->if_stop = dwc_gmac_stop;
216 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
217 IFQ_SET_READY(&ifp->if_snd);
218
219 /*
220 * Attach MII subdevices
221 */
222 sc->sc_ec.ec_mii = &sc->sc_mii;
223 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
224 mii->mii_ifp = ifp;
225 mii->mii_readreg = dwc_gmac_miibus_read_reg;
226 mii->mii_writereg = dwc_gmac_miibus_write_reg;
227 mii->mii_statchg = dwc_gmac_miibus_statchg;
228 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
229 MIIF_DOPAUSE);
230
231 if (LIST_EMPTY(&mii->mii_phys)) {
232 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
233 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
234 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
235 } else {
236 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
237 }
238
239 /*
240 * Ready, attach interface
241 */
242 if_attach(ifp);
243 ether_ifattach(ifp, enaddr);
244 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
245
246 /*
247 * Enable interrupts
248 */
249 s = splnet();
250 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
251 AWIN_DEF_MAC_INTRMASK);
252 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
253 GMAC_DEF_DMA_INT_MASK);
254 splx(s);
255
256 return;
257
258 fail:
259 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
260 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
261 }
262
263
264
265 static int
266 dwc_gmac_reset(struct dwc_gmac_softc *sc)
267 {
268 size_t cnt;
269 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
270 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
271 for (cnt = 0; cnt < 3000; cnt++) {
272 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
273 & GMAC_BUSMODE_RESET) == 0)
274 return 0;
275 delay(10);
276 }
277
278 aprint_error_dev(sc->sc_dev, "reset timed out\n");
279 return EIO;
280 }
281
282 static void
283 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
284 uint8_t enaddr[ETHER_ADDR_LEN])
285 {
286 uint32_t lo, hi;
287
288 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
289 | (enaddr[3] << 24);
290 hi = enaddr[4] | (enaddr[5] << 8);
291 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
292 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
293 }
294
295 static int
296 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
297 {
298 struct dwc_gmac_softc * const sc = device_private(self);
299 uint16_t mii;
300 size_t cnt;
301 int rv = 0;
302
303 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
304 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
305 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
306 | GMAC_MII_BUSY;
307
308 mutex_enter(&sc->sc_mdio_lock);
309 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
310
311 for (cnt = 0; cnt < 1000; cnt++) {
312 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
313 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
314 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
315 AWIN_GMAC_MAC_MIIDATA);
316 break;
317 }
318 delay(10);
319 }
320
321 mutex_exit(&sc->sc_mdio_lock);
322
323 return rv;
324 }
325
326 static void
327 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
328 {
329 struct dwc_gmac_softc * const sc = device_private(self);
330 uint16_t mii;
331 size_t cnt;
332
333 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
334 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
335 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
336 | GMAC_MII_BUSY | GMAC_MII_WRITE;
337
338 mutex_enter(&sc->sc_mdio_lock);
339 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
340 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
341
342 for (cnt = 0; cnt < 1000; cnt++) {
343 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
344 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
345 break;
346 delay(10);
347 }
348
349 mutex_exit(&sc->sc_mdio_lock);
350 }
351
352 static int
353 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
354 struct dwc_gmac_rx_ring *ring)
355 {
356 struct dwc_gmac_rx_data *data;
357 bus_addr_t physaddr;
358 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
359 int error, i, next;
360
361 ring->r_cur = ring->r_next = 0;
362 memset(ring->r_desc, 0, descsize);
363
364 /*
365 * Pre-allocate Rx buffers and populate Rx ring.
366 */
367 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
368 struct dwc_gmac_dev_dmadesc *desc;
369
370 data = &sc->sc_rxq.r_data[i];
371
372 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
373 if (data->rd_m == NULL) {
374 aprint_error_dev(sc->sc_dev,
375 "could not allocate rx mbuf #%d\n", i);
376 error = ENOMEM;
377 goto fail;
378 }
379 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
380 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
381 if (error != 0) {
382 aprint_error_dev(sc->sc_dev,
383 "could not create DMA map\n");
384 data->rd_map = NULL;
385 goto fail;
386 }
387 MCLGET(data->rd_m, M_DONTWAIT);
388 if (!(data->rd_m->m_flags & M_EXT)) {
389 aprint_error_dev(sc->sc_dev,
390 "could not allocate mbuf cluster #%d\n", i);
391 error = ENOMEM;
392 goto fail;
393 }
394
395 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
396 mtod(data->rd_m, void *), MCLBYTES, NULL,
397 BUS_DMA_READ | BUS_DMA_NOWAIT);
398 if (error != 0) {
399 aprint_error_dev(sc->sc_dev,
400 "could not load rx buf DMA map #%d", i);
401 goto fail;
402 }
403 physaddr = data->rd_map->dm_segs[0].ds_addr;
404
405 desc = &sc->sc_rxq.r_desc[i];
406 desc->ddesc_data = htole32(physaddr);
407 next = RX_NEXT(i);
408 desc->ddesc_next = htole32(ring->r_physaddr
409 + next * sizeof(*desc));
410 desc->ddesc_cntl = htole32(
411 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
412 DDESC_CNTL_RXCHAIN);
413 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
414 }
415
416 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
417 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
418 BUS_DMASYNC_PREREAD);
419 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
420 ring->r_physaddr);
421
422 return 0;
423
424 fail:
425 dwc_gmac_free_rx_ring(sc, ring);
426 return error;
427 }
428
429 static void
430 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
431 struct dwc_gmac_rx_ring *ring)
432 {
433 struct dwc_gmac_dev_dmadesc *desc;
434 int i;
435
436 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
437 desc = &sc->sc_rxq.r_desc[i];
438 desc->ddesc_cntl = htole32(
439 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
440 DDESC_CNTL_RXCHAIN);
441 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
442 }
443
444 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
445 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
446 BUS_DMASYNC_PREWRITE);
447
448 ring->r_cur = ring->r_next = 0;
449 /* reset DMA address to start of ring */
450 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
451 sc->sc_rxq.r_physaddr);
452 }
453
454 static int
455 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
456 {
457 const size_t descsize = AWGE_TOTAL_RING_COUNT *
458 sizeof(struct dwc_gmac_dev_dmadesc);
459 int error, nsegs;
460 void *rings;
461
462 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
463 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
464 if (error != 0) {
465 aprint_error_dev(sc->sc_dev,
466 "could not create desc DMA map\n");
467 sc->sc_dma_ring_map = NULL;
468 goto fail;
469 }
470
471 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
472 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
473 if (error != 0) {
474 aprint_error_dev(sc->sc_dev,
475 "could not map DMA memory\n");
476 goto fail;
477 }
478
479 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
480 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
481 if (error != 0) {
482 aprint_error_dev(sc->sc_dev,
483 "could not allocate DMA memory\n");
484 goto fail;
485 }
486
487 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
488 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
489 if (error != 0) {
490 aprint_error_dev(sc->sc_dev,
491 "could not load desc DMA map\n");
492 goto fail;
493 }
494
495 /* give first AWGE_RX_RING_COUNT to the RX side */
496 sc->sc_rxq.r_desc = rings;
497 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
498
499 /* and next rings to the TX side */
500 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
501 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
502 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
503
504 return 0;
505
506 fail:
507 dwc_gmac_free_dma_rings(sc);
508 return error;
509 }
510
511 static void
512 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
513 {
514 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
515 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
516 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
517 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
518 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
519 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
520 }
521
522 static void
523 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
524 {
525 struct dwc_gmac_rx_data *data;
526 int i;
527
528 if (ring->r_desc == NULL)
529 return;
530
531
532 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
533 data = &ring->r_data[i];
534
535 if (data->rd_map != NULL) {
536 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
537 AWGE_RX_RING_COUNT
538 *sizeof(struct dwc_gmac_dev_dmadesc),
539 BUS_DMASYNC_POSTREAD);
540 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
541 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
542 }
543 if (data->rd_m != NULL)
544 m_freem(data->rd_m);
545 }
546 }
547
548 static int
549 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
550 struct dwc_gmac_tx_ring *ring)
551 {
552 int i, error = 0;
553
554 ring->t_queued = 0;
555 ring->t_cur = ring->t_next = 0;
556
557 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
558 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
559 TX_DESC_OFFSET(0),
560 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
561 BUS_DMASYNC_POSTWRITE);
562
563 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
564 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
565 AWGE_TX_RING_COUNT, MCLBYTES, 0,
566 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
567 &ring->t_data[i].td_map);
568 if (error != 0) {
569 aprint_error_dev(sc->sc_dev,
570 "could not create TX DMA map #%d\n", i);
571 ring->t_data[i].td_map = NULL;
572 goto fail;
573 }
574 ring->t_desc[i].ddesc_next = htole32(
575 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
576 *TX_NEXT(i));
577 }
578
579 return 0;
580
581 fail:
582 dwc_gmac_free_tx_ring(sc, ring);
583 return error;
584 }
585
586 static void
587 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
588 {
589 /* 'end' is pointing one descriptor beyound the last we want to sync */
590 if (end > start) {
591 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
592 TX_DESC_OFFSET(start),
593 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
594 ops);
595 return;
596 }
597 /* sync from 'start' to end of ring */
598 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
599 TX_DESC_OFFSET(start),
600 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
601 ops);
602 /* sync from start of ring to 'end' */
603 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
604 TX_DESC_OFFSET(0),
605 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
606 ops);
607 }
608
609 static void
610 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
611 struct dwc_gmac_tx_ring *ring)
612 {
613 int i;
614
615 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
616 struct dwc_gmac_tx_data *data = &ring->t_data[i];
617
618 if (data->td_m != NULL) {
619 bus_dmamap_sync(sc->sc_dmat, data->td_active,
620 0, data->td_active->dm_mapsize,
621 BUS_DMASYNC_POSTWRITE);
622 bus_dmamap_unload(sc->sc_dmat, data->td_active);
623 m_freem(data->td_m);
624 data->td_m = NULL;
625 }
626 }
627
628 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
629 TX_DESC_OFFSET(0),
630 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
631 BUS_DMASYNC_PREWRITE);
632 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
633 sc->sc_txq.t_physaddr);
634
635 ring->t_queued = 0;
636 ring->t_cur = ring->t_next = 0;
637 }
638
639 static void
640 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
641 struct dwc_gmac_tx_ring *ring)
642 {
643 int i;
644
645 /* unload the maps */
646 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
647 struct dwc_gmac_tx_data *data = &ring->t_data[i];
648
649 if (data->td_m != NULL) {
650 bus_dmamap_sync(sc->sc_dmat, data->td_active,
651 0, data->td_map->dm_mapsize,
652 BUS_DMASYNC_POSTWRITE);
653 bus_dmamap_unload(sc->sc_dmat, data->td_active);
654 m_freem(data->td_m);
655 data->td_m = NULL;
656 }
657 }
658
659 /* and actually free them */
660 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
661 struct dwc_gmac_tx_data *data = &ring->t_data[i];
662
663 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
664 }
665 }
666
667 static void
668 dwc_gmac_miibus_statchg(struct ifnet *ifp)
669 {
670 struct dwc_gmac_softc * const sc = ifp->if_softc;
671 struct mii_data * const mii = &sc->sc_mii;
672 uint32_t conf, flow;
673
674 /*
675 * Set MII or GMII interface based on the speed
676 * negotiated by the PHY.
677 */
678 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
679 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
680 |AWIN_GMAC_MAC_CONF_FULLDPLX);
681 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
682 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
683 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
684 | AWIN_GMAC_MAC_CONF_ACS
685 | AWIN_GMAC_MAC_CONF_RXENABLE
686 | AWIN_GMAC_MAC_CONF_TXENABLE;
687 switch (IFM_SUBTYPE(mii->mii_media_active)) {
688 case IFM_10_T:
689 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
690 break;
691 case IFM_100_TX:
692 conf |= AWIN_GMAC_MAC_CONF_FES100 |
693 AWIN_GMAC_MAC_CONF_MIISEL;
694 break;
695 case IFM_1000_T:
696 break;
697 }
698
699 flow = 0;
700 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
701 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
702 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
703 }
704 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
705 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
706 }
707 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
708 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
709 }
710 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
711 AWIN_GMAC_MAC_FLOWCTRL, flow);
712
713 #ifdef DWC_GMAC_DEBUG
714 aprint_normal_dev(sc->sc_dev,
715 "setting MAC conf register: %08x\n", conf);
716 #endif
717
718 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
719 AWIN_GMAC_MAC_CONF, conf);
720 }
721
722 static int
723 dwc_gmac_init(struct ifnet *ifp)
724 {
725 struct dwc_gmac_softc *sc = ifp->if_softc;
726 uint32_t ffilt;
727
728 if (ifp->if_flags & IFF_RUNNING)
729 return 0;
730
731 dwc_gmac_stop(ifp, 0);
732
733 /*
734 * Configure DMA burst/transfer mode and RX/TX priorities.
735 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
736 */
737 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
738 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
739 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
740 __SHIFTIN(2, GMAC_BUSMODE_PBL));
741
742 /*
743 * Set up address filter
744 */
745 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
746 if (ifp->if_flags & IFF_PROMISC) {
747 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
748 } else {
749 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
750 }
751 if (ifp->if_flags & IFF_BROADCAST) {
752 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
753 } else {
754 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
755 }
756 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
757
758 /*
759 * Set up multicast filter
760 */
761 dwc_gmac_setmulti(sc);
762
763 /*
764 * Set up dma pointer for RX and TX ring
765 */
766 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
767 sc->sc_rxq.r_physaddr);
768 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
769 sc->sc_txq.t_physaddr);
770
771 /*
772 * Start RX/TX part
773 */
774 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
775 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
776 GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
777
778 ifp->if_flags |= IFF_RUNNING;
779 ifp->if_flags &= ~IFF_OACTIVE;
780
781 return 0;
782 }
783
784 static void
785 dwc_gmac_start(struct ifnet *ifp)
786 {
787 struct dwc_gmac_softc *sc = ifp->if_softc;
788 int old = sc->sc_txq.t_queued;
789 struct mbuf *m0;
790
791 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
792 return;
793
794 for (;;) {
795 IFQ_POLL(&ifp->if_snd, m0);
796 if (m0 == NULL)
797 break;
798 if (dwc_gmac_queue(sc, m0) != 0) {
799 ifp->if_flags |= IFF_OACTIVE;
800 break;
801 }
802 IFQ_DEQUEUE(&ifp->if_snd, m0);
803 bpf_mtap(ifp, m0);
804 }
805
806 if (sc->sc_txq.t_queued != old) {
807 /* packets have been queued, kick it off */
808 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
809 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
810
811 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
812 AWIN_GMAC_DMA_TXPOLL, ~0U);
813 #ifdef DWC_GMAC_DEBUG
814 dwc_dump_status(sc);
815 #endif
816 }
817 }
818
819 static void
820 dwc_gmac_stop(struct ifnet *ifp, int disable)
821 {
822 struct dwc_gmac_softc *sc = ifp->if_softc;
823
824 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
825 AWIN_GMAC_DMA_OPMODE,
826 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
827 AWIN_GMAC_DMA_OPMODE)
828 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
829 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
830 AWIN_GMAC_DMA_OPMODE,
831 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
832 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
833
834 mii_down(&sc->sc_mii);
835 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
836 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
837 }
838
839 /*
840 * Add m0 to the TX ring
841 */
842 static int
843 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
844 {
845 struct dwc_gmac_dev_dmadesc *desc = NULL;
846 struct dwc_gmac_tx_data *data = NULL;
847 bus_dmamap_t map;
848 uint32_t flags, len;
849 int error, i, first;
850
851 #ifdef DWC_GMAC_DEBUG
852 aprint_normal_dev(sc->sc_dev,
853 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
854 #endif
855
856 first = sc->sc_txq.t_cur;
857 map = sc->sc_txq.t_data[first].td_map;
858 flags = 0;
859
860 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
861 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
862 if (error != 0) {
863 aprint_error_dev(sc->sc_dev, "could not map mbuf "
864 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
865 return error;
866 }
867
868 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
869 bus_dmamap_unload(sc->sc_dmat, map);
870 return ENOBUFS;
871 }
872
873 data = NULL;
874 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
875 for (i = 0; i < map->dm_nsegs; i++) {
876 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
877 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
878
879 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
880 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
881 if (i == map->dm_nsegs-1)
882 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
883
884 #ifdef DWC_GMAC_DEBUG
885 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
886 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
887 (unsigned long)map->dm_segs[i].ds_addr,
888 (unsigned long)map->dm_segs[i].ds_len,
889 flags, len);
890 #endif
891
892 desc->ddesc_cntl = htole32(len|flags);
893 flags &= ~DDESC_CNTL_TXFIRST;
894
895 /*
896 * Defer passing ownership of the first descriptor
897 * until we are done.
898 */
899 if (i)
900 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
901
902 sc->sc_txq.t_queued++;
903 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
904 }
905
906 /* Pass first to device */
907 sc->sc_txq.t_desc[first].ddesc_status
908 = htole32(DDESC_STATUS_OWNEDBYDEV);
909
910 data->td_m = m0;
911 data->td_active = map;
912
913 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
914 BUS_DMASYNC_PREWRITE);
915
916 return 0;
917 }
918
919 /*
920 * If the interface is up and running, only modify the receive
921 * filter when setting promiscuous or debug mode. Otherwise fall
922 * through to ether_ioctl, which will reset the chip.
923 */
924 static int
925 dwc_gmac_ifflags_cb(struct ethercom *ec)
926 {
927 struct ifnet *ifp = &ec->ec_if;
928 struct dwc_gmac_softc *sc = ifp->if_softc;
929 int change = ifp->if_flags ^ sc->sc_if_flags;
930
931 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
932 return ENETRESET;
933 if ((change & IFF_PROMISC) != 0)
934 dwc_gmac_setmulti(sc);
935 return 0;
936 }
937
938 static int
939 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
940 {
941 struct dwc_gmac_softc *sc = ifp->if_softc;
942 int s, error = 0;
943
944 s = splnet();
945
946 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
947 error = 0;
948 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
949 ;
950 else if (ifp->if_flags & IFF_RUNNING) {
951 /*
952 * Multicast list has changed; set the hardware filter
953 * accordingly.
954 */
955 dwc_gmac_setmulti(sc);
956 }
957 }
958
959 /* Try to get things going again */
960 if (ifp->if_flags & IFF_UP)
961 dwc_gmac_start(ifp);
962 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
963 splx(s);
964 return error;
965 }
966
967 static void
968 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
969 {
970 struct dwc_gmac_tx_data *data;
971 struct dwc_gmac_dev_dmadesc *desc;
972 uint32_t flags;
973 int i;
974
975 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
976 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
977
978 #ifdef DWC_GMAC_DEBUG
979 aprint_normal_dev(sc->sc_dev,
980 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
981 i, sc->sc_txq.t_queued);
982 #endif
983
984 desc = &sc->sc_txq.t_desc[i];
985 /*
986 * i+1 does not need to be a valid descriptor,
987 * this is just a special notion to just sync
988 * a single tx descriptor (i)
989 */
990 dwc_gmac_txdesc_sync(sc, i, i+1,
991 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
992 flags = le32toh(desc->ddesc_status);
993
994 if (flags & DDESC_STATUS_OWNEDBYDEV)
995 break;
996
997 data = &sc->sc_txq.t_data[i];
998 if (data->td_m == NULL)
999 continue;
1000 sc->sc_ec.ec_if.if_opackets++;
1001 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1002 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1003 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1004
1005 #ifdef DWC_GMAC_DEBUG
1006 aprint_normal_dev(sc->sc_dev,
1007 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1008 "freeing mbuf %p\n", i, data->td_m);
1009 #endif
1010
1011 m_freem(data->td_m);
1012 data->td_m = NULL;
1013 }
1014
1015 sc->sc_txq.t_next = i;
1016
1017 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1018 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
1019 }
1020 }
1021
1022 static void
1023 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1024 {
1025 struct ifnet *ifp = &sc->sc_ec.ec_if;
1026 struct dwc_gmac_dev_dmadesc *desc;
1027 struct dwc_gmac_rx_data *data;
1028 bus_addr_t physaddr;
1029 uint32_t status;
1030 struct mbuf *m, *mnew;
1031 int i, len, error;
1032
1033 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1034 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1035 RX_DESC_OFFSET(i), sizeof(*desc),
1036 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1037 desc = &sc->sc_rxq.r_desc[i];
1038 data = &sc->sc_rxq.r_data[i];
1039
1040 status = le32toh(desc->ddesc_status);
1041 if (status & DDESC_STATUS_OWNEDBYDEV)
1042 break;
1043
1044 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1045 #ifdef DWC_GMAC_DEBUG
1046 aprint_normal_dev(sc->sc_dev,
1047 "RX error: descriptor status %08x, skipping\n",
1048 status);
1049 #endif
1050 ifp->if_ierrors++;
1051 goto skip;
1052 }
1053
1054 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1055
1056 #ifdef DWC_GMAC_DEBUG
1057 aprint_normal_dev(sc->sc_dev,
1058 "rx int: device is done with descriptor #%d, len: %d\n",
1059 i, len);
1060 #endif
1061
1062 /*
1063 * Try to get a new mbuf before passing this one
1064 * up, if that fails, drop the packet and reuse
1065 * the existing one.
1066 */
1067 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1068 if (mnew == NULL) {
1069 ifp->if_ierrors++;
1070 goto skip;
1071 }
1072 MCLGET(mnew, M_DONTWAIT);
1073 if ((mnew->m_flags & M_EXT) == 0) {
1074 m_freem(mnew);
1075 ifp->if_ierrors++;
1076 goto skip;
1077 }
1078
1079 /* unload old DMA map */
1080 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1081 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1082 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1083
1084 /* and reload with new mbuf */
1085 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1086 mtod(mnew, void*), MCLBYTES, NULL,
1087 BUS_DMA_READ | BUS_DMA_NOWAIT);
1088 if (error != 0) {
1089 m_freem(mnew);
1090 /* try to reload old mbuf */
1091 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1092 mtod(data->rd_m, void*), MCLBYTES, NULL,
1093 BUS_DMA_READ | BUS_DMA_NOWAIT);
1094 if (error != 0) {
1095 panic("%s: could not load old rx mbuf",
1096 device_xname(sc->sc_dev));
1097 }
1098 ifp->if_ierrors++;
1099 goto skip;
1100 }
1101 physaddr = data->rd_map->dm_segs[0].ds_addr;
1102
1103 /*
1104 * New mbuf loaded, update RX ring and continue
1105 */
1106 m = data->rd_m;
1107 data->rd_m = mnew;
1108 desc->ddesc_data = htole32(physaddr);
1109
1110 /* finalize mbuf */
1111 m->m_pkthdr.len = m->m_len = len;
1112 m->m_pkthdr.rcvif = ifp;
1113 m->m_flags |= M_HASFCS;
1114
1115 bpf_mtap(ifp, m);
1116 ifp->if_ipackets++;
1117 (*ifp->if_input)(ifp, m);
1118
1119 skip:
1120 desc->ddesc_cntl = htole32(
1121 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1122 DDESC_CNTL_RXCHAIN);
1123 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1124 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1125 RX_DESC_OFFSET(i), sizeof(*desc),
1126 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1127 }
1128
1129 /* update RX pointer */
1130 sc->sc_rxq.r_cur = i;
1131
1132 }
1133
1134 /*
1135 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1136 */
1137 static uint32_t
1138 bitrev32(uint32_t x)
1139 {
1140 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1141 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1142 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1143 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1144
1145 return (x >> 16) | (x << 16);
1146 }
1147
1148 static void
1149 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1150 {
1151 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1152 struct ether_multi *enm;
1153 struct ether_multistep step;
1154 uint32_t hashes[2] = { 0, 0 };
1155 uint32_t ffilt, h;
1156 int mcnt, s;
1157
1158 s = splnet();
1159
1160 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1161
1162 if (ifp->if_flags & IFF_PROMISC) {
1163 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1164 goto special_filter;
1165 }
1166
1167 ifp->if_flags &= ~IFF_ALLMULTI;
1168 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1169
1170 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1171 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1172
1173 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1174 mcnt = 0;
1175 while (enm != NULL) {
1176 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1177 ETHER_ADDR_LEN) != 0) {
1178 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1179 ifp->if_flags |= IFF_ALLMULTI;
1180 goto special_filter;
1181 }
1182
1183 h = bitrev32(
1184 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1185 ) >> 26;
1186 hashes[h >> 5] |= (1 << (h & 0x1f));
1187
1188 mcnt++;
1189 ETHER_NEXT_MULTI(step, enm);
1190 }
1191
1192 if (mcnt)
1193 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1194 else
1195 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1196
1197 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1198 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1199 hashes[0]);
1200 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1201 hashes[1]);
1202 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1203
1204 splx(s);
1205
1206 #ifdef DWC_GMAC_DEBUG
1207 dwc_gmac_dump_ffilt(sc, ffilt);
1208 #endif
1209 return;
1210
1211 special_filter:
1212 #ifdef DWC_GMAC_DEBUG
1213 dwc_gmac_dump_ffilt(sc, ffilt);
1214 #endif
1215 /* no MAC hashes, ALLMULTI or PROMISC */
1216 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1217 ffilt);
1218 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1219 0xffffffff);
1220 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1221 0xffffffff);
1222 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1223 splx(s);
1224 }
1225
1226 int
1227 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1228 {
1229 uint32_t status, dma_status;
1230 int rv = 0;
1231
1232 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1233 if (status & AWIN_GMAC_MII_IRQ) {
1234 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1235 AWIN_GMAC_MII_STATUS);
1236 rv = 1;
1237 mii_pollstat(&sc->sc_mii);
1238 }
1239
1240 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1241 AWIN_GMAC_DMA_STATUS);
1242
1243 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1244 rv = 1;
1245
1246 if (dma_status & GMAC_DMA_INT_TIE)
1247 dwc_gmac_tx_intr(sc);
1248
1249 if (dma_status & GMAC_DMA_INT_RIE)
1250 dwc_gmac_rx_intr(sc);
1251
1252 /*
1253 * Check error conditions
1254 */
1255 if (dma_status & GMAC_DMA_INT_ERRORS) {
1256 sc->sc_ec.ec_if.if_oerrors++;
1257 #ifdef DWC_GMAC_DEBUG
1258 dwc_dump_and_abort(sc, "interrupt error condition");
1259 #endif
1260 }
1261
1262 /* ack interrupt */
1263 if (dma_status)
1264 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1265 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1266
1267 return rv;
1268 }
1269
1270 #ifdef DWC_GMAC_DEBUG
1271 static void
1272 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1273 {
1274 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1275 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1276 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1277 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1278 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1279 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1280 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1281 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1282 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1283 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1284 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1285 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1286 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1287 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1288 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1289 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1290 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1291 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1292 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1293 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1294 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1295 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1296 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1297 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1298 }
1299
1300 static void
1301 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1302 {
1303 int i;
1304
1305 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1306 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1307 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1308 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1309 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1310 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1311 "data: %08x next: %08x\n",
1312 i, sc->sc_txq.t_physaddr +
1313 i*sizeof(struct dwc_gmac_dev_dmadesc),
1314 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1315 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1316 }
1317 }
1318
1319 static void
1320 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1321 {
1322 int i;
1323
1324 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1325 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1326 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1327 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1328 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1329 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1330 "data: %08x next: %08x\n",
1331 i, sc->sc_rxq.r_physaddr +
1332 i*sizeof(struct dwc_gmac_dev_dmadesc),
1333 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1334 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1335 }
1336 }
1337
1338 static void
1339 dwc_dump_status(struct dwc_gmac_softc *sc)
1340 {
1341 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1342 AWIN_GMAC_MAC_INTR);
1343 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1344 AWIN_GMAC_DMA_STATUS);
1345 char buf[200];
1346
1347 /* print interrupt state */
1348 snprintb(buf, sizeof(buf), "\177\20"
1349 "b\x10""NI\0"
1350 "b\x0f""AI\0"
1351 "b\x0e""ER\0"
1352 "b\x0d""FB\0"
1353 "b\x0a""ET\0"
1354 "b\x09""RW\0"
1355 "b\x08""RS\0"
1356 "b\x07""RU\0"
1357 "b\x06""RI\0"
1358 "b\x05""UN\0"
1359 "b\x04""OV\0"
1360 "b\x03""TJ\0"
1361 "b\x02""TU\0"
1362 "b\x01""TS\0"
1363 "b\x00""TI\0"
1364 "\0", dma_status);
1365 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1366 status, buf);
1367 }
1368
1369 static void
1370 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1371 {
1372 dwc_dump_status(sc);
1373 dwc_gmac_dump_ffilt(sc,
1374 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1375 dwc_gmac_dump_dma(sc);
1376 dwc_gmac_dump_tx_desc(sc);
1377 dwc_gmac_dump_rx_desc(sc);
1378
1379 panic("%s", msg);
1380 }
1381
1382 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1383 {
1384 char buf[200];
1385
1386 /* print filter setup */
1387 snprintb(buf, sizeof(buf), "\177\20"
1388 "b\x1f""RA\0"
1389 "b\x0a""HPF\0"
1390 "b\x09""SAF\0"
1391 "b\x08""SAIF\0"
1392 "b\x05""DBF\0"
1393 "b\x04""PM\0"
1394 "b\x03""DAIF\0"
1395 "b\x02""HMC\0"
1396 "b\x01""HUC\0"
1397 "b\x00""PR\0"
1398 "\0", ffilt);
1399 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1400 }
1401 #endif
1402