dwc_gmac.c revision 1.43 1 /* $NetBSD: dwc_gmac.c,v 1.43 2017/11/16 03:07:17 ozaki-r Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.43 2017/11/16 03:07:17 ozaki-r Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t x);
103
104 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
105 *sizeof(struct dwc_gmac_dev_dmadesc))
106 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
107
108 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
110
111
112
113 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116
117 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 GMAC_DMA_INT_FBE| \
119 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 GMAC_DMA_INT_TJE)
122
123 #define AWIN_DEF_MAC_INTRMASK \
124 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
125 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126
127
128 #ifdef DWC_GMAC_DEBUG
129 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
130 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
131 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
132 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
133 static void dwc_dump_status(struct dwc_gmac_softc *sc);
134 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
135 #endif
136
137 #ifdef NET_MPSAFE
138 #define DWCGMAC_MPSAFE 1
139 #endif
140
141 void
142 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
143 {
144 uint8_t enaddr[ETHER_ADDR_LEN];
145 uint32_t maclo, machi;
146 struct mii_data * const mii = &sc->sc_mii;
147 struct ifnet * const ifp = &sc->sc_ec.ec_if;
148 prop_dictionary_t dict;
149 int rv;
150
151 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
152 sc->sc_mii_clk = mii_clk & 7;
153
154 dict = device_properties(sc->sc_dev);
155 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
156 if (ea != NULL) {
157 /*
158 * If the MAC address is overriden by a device property,
159 * use that.
160 */
161 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
162 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
163 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
164 } else {
165 /*
166 * If we did not get an externaly configure address,
167 * try to read one from the current filter setup,
168 * before resetting the chip.
169 */
170 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
171 AWIN_GMAC_MAC_ADDR0LO);
172 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
173 AWIN_GMAC_MAC_ADDR0HI);
174
175 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
176 /* fake MAC address */
177 maclo = 0x00f2 | (cprng_strong32() << 16);
178 machi = cprng_strong32();
179 }
180
181 enaddr[0] = maclo & 0x0ff;
182 enaddr[1] = (maclo >> 8) & 0x0ff;
183 enaddr[2] = (maclo >> 16) & 0x0ff;
184 enaddr[3] = (maclo >> 24) & 0x0ff;
185 enaddr[4] = machi & 0x0ff;
186 enaddr[5] = (machi >> 8) & 0x0ff;
187 }
188
189 /*
190 * Init chip and do initial setup
191 */
192 if (dwc_gmac_reset(sc) != 0)
193 return; /* not much to cleanup, haven't attached yet */
194 dwc_gmac_write_hwaddr(sc, enaddr);
195 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
196 ether_sprintf(enaddr));
197
198 /*
199 * Allocate Tx and Rx rings
200 */
201 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
202 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
203 goto fail;
204 }
205
206 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
207 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
208 goto fail;
209 }
210
211 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
212 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
213 goto fail;
214 }
215
216 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
217 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
218 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
219
220 /*
221 * Prepare interface data
222 */
223 ifp->if_softc = sc;
224 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226 ifp->if_extflags = IFEF_MPSAFE;
227 ifp->if_ioctl = dwc_gmac_ioctl;
228 ifp->if_start = dwc_gmac_start;
229 ifp->if_init = dwc_gmac_init;
230 ifp->if_stop = dwc_gmac_stop;
231 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
232 IFQ_SET_READY(&ifp->if_snd);
233
234 /*
235 * Attach MII subdevices
236 */
237 sc->sc_ec.ec_mii = &sc->sc_mii;
238 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
239 mii->mii_ifp = ifp;
240 mii->mii_readreg = dwc_gmac_miibus_read_reg;
241 mii->mii_writereg = dwc_gmac_miibus_write_reg;
242 mii->mii_statchg = dwc_gmac_miibus_statchg;
243 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
244 MIIF_DOPAUSE);
245
246 if (LIST_EMPTY(&mii->mii_phys)) {
247 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
248 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
249 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
250 } else {
251 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
252 }
253
254 /*
255 * We can support 802.1Q VLAN-sized frames.
256 */
257 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
258
259 /*
260 * Ready, attach interface
261 */
262 /* Attach the interface. */
263 rv = if_initialize(ifp);
264 if (rv != 0)
265 goto fail_2;
266 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
267 if_deferred_start_init(ifp, NULL);
268 ether_ifattach(ifp, enaddr);
269 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
270 if_register(ifp);
271
272 /*
273 * Enable interrupts
274 */
275 mutex_enter(sc->sc_lock);
276 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
277 AWIN_DEF_MAC_INTRMASK);
278 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
279 GMAC_DEF_DMA_INT_MASK);
280 mutex_exit(sc->sc_lock);
281
282 return;
283 fail_2:
284 ifmedia_removeall(&mii->mii_media);
285 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
286 mutex_destroy(&sc->sc_txq.t_mtx);
287 mutex_destroy(&sc->sc_rxq.r_mtx);
288 mutex_obj_free(sc->sc_lock);
289 fail:
290 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
291 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
292 dwc_gmac_free_dma_rings(sc);
293 mutex_destroy(&sc->sc_mdio_lock);
294 }
295
296
297
298 static int
299 dwc_gmac_reset(struct dwc_gmac_softc *sc)
300 {
301 size_t cnt;
302 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
303 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
304 for (cnt = 0; cnt < 3000; cnt++) {
305 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
306 & GMAC_BUSMODE_RESET) == 0)
307 return 0;
308 delay(10);
309 }
310
311 aprint_error_dev(sc->sc_dev, "reset timed out\n");
312 return EIO;
313 }
314
315 static void
316 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
317 uint8_t enaddr[ETHER_ADDR_LEN])
318 {
319 uint32_t lo, hi;
320
321 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
322 | (enaddr[3] << 24);
323 hi = enaddr[4] | (enaddr[5] << 8);
324 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
325 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
326 }
327
328 static int
329 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
330 {
331 struct dwc_gmac_softc * const sc = device_private(self);
332 uint16_t mii;
333 size_t cnt;
334 int rv = 0;
335
336 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
337 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
338 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
339 | GMAC_MII_BUSY;
340
341 mutex_enter(&sc->sc_mdio_lock);
342 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
343
344 for (cnt = 0; cnt < 1000; cnt++) {
345 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
346 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
347 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
348 AWIN_GMAC_MAC_MIIDATA);
349 break;
350 }
351 delay(10);
352 }
353
354 mutex_exit(&sc->sc_mdio_lock);
355
356 return rv;
357 }
358
359 static void
360 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
361 {
362 struct dwc_gmac_softc * const sc = device_private(self);
363 uint16_t mii;
364 size_t cnt;
365
366 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
367 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
368 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
369 | GMAC_MII_BUSY | GMAC_MII_WRITE;
370
371 mutex_enter(&sc->sc_mdio_lock);
372 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
373 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
374
375 for (cnt = 0; cnt < 1000; cnt++) {
376 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
377 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
378 break;
379 delay(10);
380 }
381
382 mutex_exit(&sc->sc_mdio_lock);
383 }
384
385 static int
386 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
387 struct dwc_gmac_rx_ring *ring)
388 {
389 struct dwc_gmac_rx_data *data;
390 bus_addr_t physaddr;
391 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
392 int error, i, next;
393
394 ring->r_cur = ring->r_next = 0;
395 memset(ring->r_desc, 0, descsize);
396
397 /*
398 * Pre-allocate Rx buffers and populate Rx ring.
399 */
400 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
401 struct dwc_gmac_dev_dmadesc *desc;
402
403 data = &sc->sc_rxq.r_data[i];
404
405 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
406 if (data->rd_m == NULL) {
407 aprint_error_dev(sc->sc_dev,
408 "could not allocate rx mbuf #%d\n", i);
409 error = ENOMEM;
410 goto fail;
411 }
412 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
413 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
414 if (error != 0) {
415 aprint_error_dev(sc->sc_dev,
416 "could not create DMA map\n");
417 data->rd_map = NULL;
418 goto fail;
419 }
420 MCLGET(data->rd_m, M_DONTWAIT);
421 if (!(data->rd_m->m_flags & M_EXT)) {
422 aprint_error_dev(sc->sc_dev,
423 "could not allocate mbuf cluster #%d\n", i);
424 error = ENOMEM;
425 goto fail;
426 }
427
428 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
429 mtod(data->rd_m, void *), MCLBYTES, NULL,
430 BUS_DMA_READ | BUS_DMA_NOWAIT);
431 if (error != 0) {
432 aprint_error_dev(sc->sc_dev,
433 "could not load rx buf DMA map #%d", i);
434 goto fail;
435 }
436 physaddr = data->rd_map->dm_segs[0].ds_addr;
437
438 desc = &sc->sc_rxq.r_desc[i];
439 desc->ddesc_data = htole32(physaddr);
440 next = RX_NEXT(i);
441 desc->ddesc_next = htole32(ring->r_physaddr
442 + next * sizeof(*desc));
443 desc->ddesc_cntl = htole32(
444 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
445 DDESC_CNTL_RXCHAIN);
446 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
447 }
448
449 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
450 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
451 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
452 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
453 ring->r_physaddr);
454
455 return 0;
456
457 fail:
458 dwc_gmac_free_rx_ring(sc, ring);
459 return error;
460 }
461
462 static void
463 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
464 struct dwc_gmac_rx_ring *ring)
465 {
466 struct dwc_gmac_dev_dmadesc *desc;
467 int i;
468
469 mutex_enter(&ring->r_mtx);
470 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
471 desc = &sc->sc_rxq.r_desc[i];
472 desc->ddesc_cntl = htole32(
473 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
474 DDESC_CNTL_RXCHAIN);
475 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
476 }
477
478 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
479 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
480 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
481
482 ring->r_cur = ring->r_next = 0;
483 /* reset DMA address to start of ring */
484 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
485 sc->sc_rxq.r_physaddr);
486 mutex_exit(&ring->r_mtx);
487 }
488
489 static int
490 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
491 {
492 const size_t descsize = AWGE_TOTAL_RING_COUNT *
493 sizeof(struct dwc_gmac_dev_dmadesc);
494 int error, nsegs;
495 void *rings;
496
497 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
498 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
499 if (error != 0) {
500 aprint_error_dev(sc->sc_dev,
501 "could not create desc DMA map\n");
502 sc->sc_dma_ring_map = NULL;
503 goto fail;
504 }
505
506 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
507 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
508 if (error != 0) {
509 aprint_error_dev(sc->sc_dev,
510 "could not map DMA memory\n");
511 goto fail;
512 }
513
514 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
515 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
516 if (error != 0) {
517 aprint_error_dev(sc->sc_dev,
518 "could not allocate DMA memory\n");
519 goto fail;
520 }
521
522 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
523 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
524 if (error != 0) {
525 aprint_error_dev(sc->sc_dev,
526 "could not load desc DMA map\n");
527 goto fail;
528 }
529
530 /* give first AWGE_RX_RING_COUNT to the RX side */
531 sc->sc_rxq.r_desc = rings;
532 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
533
534 /* and next rings to the TX side */
535 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
536 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
537 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
538
539 return 0;
540
541 fail:
542 dwc_gmac_free_dma_rings(sc);
543 return error;
544 }
545
546 static void
547 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
548 {
549 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
550 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
551 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
552 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
553 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
554 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
555 }
556
557 static void
558 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
559 {
560 struct dwc_gmac_rx_data *data;
561 int i;
562
563 if (ring->r_desc == NULL)
564 return;
565
566
567 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
568 data = &ring->r_data[i];
569
570 if (data->rd_map != NULL) {
571 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
572 AWGE_RX_RING_COUNT
573 *sizeof(struct dwc_gmac_dev_dmadesc),
574 BUS_DMASYNC_POSTREAD);
575 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
576 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
577 }
578 if (data->rd_m != NULL)
579 m_freem(data->rd_m);
580 }
581 }
582
583 static int
584 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
585 struct dwc_gmac_tx_ring *ring)
586 {
587 int i, error = 0;
588
589 ring->t_queued = 0;
590 ring->t_cur = ring->t_next = 0;
591
592 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
593 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
594 TX_DESC_OFFSET(0),
595 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
596 BUS_DMASYNC_POSTWRITE);
597
598 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
599 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
600 AWGE_TX_RING_COUNT, MCLBYTES, 0,
601 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
602 &ring->t_data[i].td_map);
603 if (error != 0) {
604 aprint_error_dev(sc->sc_dev,
605 "could not create TX DMA map #%d\n", i);
606 ring->t_data[i].td_map = NULL;
607 goto fail;
608 }
609 ring->t_desc[i].ddesc_next = htole32(
610 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
611 *TX_NEXT(i));
612 }
613
614 return 0;
615
616 fail:
617 dwc_gmac_free_tx_ring(sc, ring);
618 return error;
619 }
620
621 static void
622 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
623 {
624 /* 'end' is pointing one descriptor beyound the last we want to sync */
625 if (end > start) {
626 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
627 TX_DESC_OFFSET(start),
628 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
629 ops);
630 return;
631 }
632 /* sync from 'start' to end of ring */
633 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
634 TX_DESC_OFFSET(start),
635 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
636 ops);
637 /* sync from start of ring to 'end' */
638 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
639 TX_DESC_OFFSET(0),
640 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
641 ops);
642 }
643
644 static void
645 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
646 struct dwc_gmac_tx_ring *ring)
647 {
648 int i;
649
650 mutex_enter(&ring->t_mtx);
651 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
652 struct dwc_gmac_tx_data *data = &ring->t_data[i];
653
654 if (data->td_m != NULL) {
655 bus_dmamap_sync(sc->sc_dmat, data->td_active,
656 0, data->td_active->dm_mapsize,
657 BUS_DMASYNC_POSTWRITE);
658 bus_dmamap_unload(sc->sc_dmat, data->td_active);
659 m_freem(data->td_m);
660 data->td_m = NULL;
661 }
662 }
663
664 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
665 TX_DESC_OFFSET(0),
666 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
667 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
668 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
669 sc->sc_txq.t_physaddr);
670
671 ring->t_queued = 0;
672 ring->t_cur = ring->t_next = 0;
673 mutex_exit(&ring->t_mtx);
674 }
675
676 static void
677 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
678 struct dwc_gmac_tx_ring *ring)
679 {
680 int i;
681
682 /* unload the maps */
683 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
684 struct dwc_gmac_tx_data *data = &ring->t_data[i];
685
686 if (data->td_m != NULL) {
687 bus_dmamap_sync(sc->sc_dmat, data->td_active,
688 0, data->td_map->dm_mapsize,
689 BUS_DMASYNC_POSTWRITE);
690 bus_dmamap_unload(sc->sc_dmat, data->td_active);
691 m_freem(data->td_m);
692 data->td_m = NULL;
693 }
694 }
695
696 /* and actually free them */
697 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
698 struct dwc_gmac_tx_data *data = &ring->t_data[i];
699
700 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
701 }
702 }
703
704 static void
705 dwc_gmac_miibus_statchg(struct ifnet *ifp)
706 {
707 struct dwc_gmac_softc * const sc = ifp->if_softc;
708 struct mii_data * const mii = &sc->sc_mii;
709 uint32_t conf, flow;
710
711 /*
712 * Set MII or GMII interface based on the speed
713 * negotiated by the PHY.
714 */
715 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
716 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
717 |AWIN_GMAC_MAC_CONF_FULLDPLX);
718 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
719 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
720 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
721 | AWIN_GMAC_MAC_CONF_ACS
722 | AWIN_GMAC_MAC_CONF_RXENABLE
723 | AWIN_GMAC_MAC_CONF_TXENABLE;
724 switch (IFM_SUBTYPE(mii->mii_media_active)) {
725 case IFM_10_T:
726 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
727 break;
728 case IFM_100_TX:
729 conf |= AWIN_GMAC_MAC_CONF_FES100 |
730 AWIN_GMAC_MAC_CONF_MIISEL;
731 break;
732 case IFM_1000_T:
733 break;
734 }
735
736 flow = 0;
737 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
738 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
739 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
740 }
741 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
742 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
743 }
744 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
745 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
746 }
747 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
748 AWIN_GMAC_MAC_FLOWCTRL, flow);
749
750 #ifdef DWC_GMAC_DEBUG
751 aprint_normal_dev(sc->sc_dev,
752 "setting MAC conf register: %08x\n", conf);
753 #endif
754
755 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
756 AWIN_GMAC_MAC_CONF, conf);
757 }
758
759 static int
760 dwc_gmac_init(struct ifnet *ifp)
761 {
762 struct dwc_gmac_softc *sc = ifp->if_softc;
763
764 mutex_enter(sc->sc_lock);
765 int ret = dwc_gmac_init_locked(ifp);
766 mutex_exit(sc->sc_lock);
767
768 return ret;
769 }
770
771 static int
772 dwc_gmac_init_locked(struct ifnet *ifp)
773 {
774 struct dwc_gmac_softc *sc = ifp->if_softc;
775 uint32_t ffilt;
776
777 if (ifp->if_flags & IFF_RUNNING)
778 return 0;
779
780 dwc_gmac_stop_locked(ifp, 0);
781
782 /*
783 * Configure DMA burst/transfer mode and RX/TX priorities.
784 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
785 */
786 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
787 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
788 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
789 __SHIFTIN(2, GMAC_BUSMODE_PBL));
790
791 /*
792 * Set up address filter
793 */
794 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
795 if (ifp->if_flags & IFF_PROMISC) {
796 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
797 } else {
798 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
799 }
800 if (ifp->if_flags & IFF_BROADCAST) {
801 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
802 } else {
803 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
804 }
805 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
806
807 /*
808 * Set up multicast filter
809 */
810 dwc_gmac_setmulti(sc);
811
812 /*
813 * Set up dma pointer for RX and TX ring
814 */
815 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
816 sc->sc_rxq.r_physaddr);
817 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
818 sc->sc_txq.t_physaddr);
819
820 /*
821 * Start RX/TX part
822 */
823 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
824 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
825 GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
826
827 sc->sc_stopping = false;
828
829 ifp->if_flags |= IFF_RUNNING;
830 ifp->if_flags &= ~IFF_OACTIVE;
831
832 return 0;
833 }
834
835 static void
836 dwc_gmac_start(struct ifnet *ifp)
837 {
838 struct dwc_gmac_softc *sc = ifp->if_softc;
839 KASSERT(if_is_mpsafe(ifp));
840
841 mutex_enter(sc->sc_lock);
842 if (!sc->sc_stopping) {
843 mutex_enter(&sc->sc_txq.t_mtx);
844 dwc_gmac_start_locked(ifp);
845 mutex_exit(&sc->sc_txq.t_mtx);
846 }
847 mutex_exit(sc->sc_lock);
848 }
849
850 static void
851 dwc_gmac_start_locked(struct ifnet *ifp)
852 {
853 struct dwc_gmac_softc *sc = ifp->if_softc;
854 int old = sc->sc_txq.t_queued;
855 int start = sc->sc_txq.t_cur;
856 struct mbuf *m0;
857
858 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
859 return;
860
861 for (;;) {
862 IFQ_POLL(&ifp->if_snd, m0);
863 if (m0 == NULL)
864 break;
865 if (dwc_gmac_queue(sc, m0) != 0) {
866 ifp->if_flags |= IFF_OACTIVE;
867 break;
868 }
869 IFQ_DEQUEUE(&ifp->if_snd, m0);
870 bpf_mtap(ifp, m0);
871 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
872 ifp->if_flags |= IFF_OACTIVE;
873 break;
874 }
875 }
876
877 if (sc->sc_txq.t_queued != old) {
878 /* packets have been queued, kick it off */
879 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
880 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
881
882 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
883 AWIN_GMAC_DMA_TXPOLL, ~0U);
884 #ifdef DWC_GMAC_DEBUG
885 dwc_dump_status(sc);
886 #endif
887 }
888 }
889
890 static void
891 dwc_gmac_stop(struct ifnet *ifp, int disable)
892 {
893 struct dwc_gmac_softc *sc = ifp->if_softc;
894
895 mutex_enter(sc->sc_lock);
896 dwc_gmac_stop_locked(ifp, disable);
897 mutex_exit(sc->sc_lock);
898 }
899
900 static void
901 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
902 {
903 struct dwc_gmac_softc *sc = ifp->if_softc;
904
905 sc->sc_stopping = true;
906
907 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
908 AWIN_GMAC_DMA_OPMODE,
909 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
910 AWIN_GMAC_DMA_OPMODE)
911 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
912 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
913 AWIN_GMAC_DMA_OPMODE,
914 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
915 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
916
917 mii_down(&sc->sc_mii);
918 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
919 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
920 }
921
922 /*
923 * Add m0 to the TX ring
924 */
925 static int
926 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
927 {
928 struct dwc_gmac_dev_dmadesc *desc = NULL;
929 struct dwc_gmac_tx_data *data = NULL;
930 bus_dmamap_t map;
931 uint32_t flags, len, status;
932 int error, i, first;
933
934 #ifdef DWC_GMAC_DEBUG
935 aprint_normal_dev(sc->sc_dev,
936 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
937 #endif
938
939 first = sc->sc_txq.t_cur;
940 map = sc->sc_txq.t_data[first].td_map;
941
942 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
943 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
944 if (error != 0) {
945 aprint_error_dev(sc->sc_dev, "could not map mbuf "
946 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
947 return error;
948 }
949
950 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
951 bus_dmamap_unload(sc->sc_dmat, map);
952 return ENOBUFS;
953 }
954
955 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
956 status = 0;
957 for (i = 0; i < map->dm_nsegs; i++) {
958 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
959 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
960
961 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
962 len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
963
964 #ifdef DWC_GMAC_DEBUG
965 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
966 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
967 (unsigned long)map->dm_segs[i].ds_addr,
968 (unsigned long)map->dm_segs[i].ds_len,
969 flags, len);
970 #endif
971
972 desc->ddesc_cntl = htole32(len|flags);
973 flags &= ~DDESC_CNTL_TXFIRST;
974
975 /*
976 * Defer passing ownership of the first descriptor
977 * until we are done.
978 */
979 desc->ddesc_status = htole32(status);
980 status |= DDESC_STATUS_OWNEDBYDEV;
981
982 sc->sc_txq.t_queued++;
983 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
984 }
985
986 desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
987
988 data->td_m = m0;
989 data->td_active = map;
990
991 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
992 BUS_DMASYNC_PREWRITE);
993
994 /* Pass first to device */
995 sc->sc_txq.t_desc[first].ddesc_status =
996 htole32(DDESC_STATUS_OWNEDBYDEV);
997
998 return 0;
999 }
1000
1001 /*
1002 * If the interface is up and running, only modify the receive
1003 * filter when setting promiscuous or debug mode. Otherwise fall
1004 * through to ether_ioctl, which will reset the chip.
1005 */
1006 static int
1007 dwc_gmac_ifflags_cb(struct ethercom *ec)
1008 {
1009 struct ifnet *ifp = &ec->ec_if;
1010 struct dwc_gmac_softc *sc = ifp->if_softc;
1011 int ret = 0;
1012
1013 mutex_enter(sc->sc_lock);
1014 int change = ifp->if_flags ^ sc->sc_if_flags;
1015 sc->sc_if_flags = ifp->if_flags;
1016
1017 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1018 ret = ENETRESET;
1019 goto out;
1020 }
1021 if ((change & IFF_PROMISC) != 0) {
1022 dwc_gmac_setmulti(sc);
1023 }
1024 out:
1025 mutex_exit(sc->sc_lock);
1026
1027 return ret;
1028 }
1029
1030 static int
1031 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1032 {
1033 struct dwc_gmac_softc *sc = ifp->if_softc;
1034 int error = 0;
1035
1036 int s = splnet();
1037 error = ether_ioctl(ifp, cmd, data);
1038
1039 #ifdef DWCGMAC_MPSAFE
1040 splx(s);
1041 #endif
1042
1043 if (error == ENETRESET) {
1044 error = 0;
1045 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1046 ;
1047 else if (ifp->if_flags & IFF_RUNNING) {
1048 /*
1049 * Multicast list has changed; set the hardware filter
1050 * accordingly.
1051 */
1052 mutex_enter(sc->sc_lock);
1053 dwc_gmac_setmulti(sc);
1054 mutex_exit(sc->sc_lock);
1055 }
1056 }
1057
1058 /* Try to get things going again */
1059 if (ifp->if_flags & IFF_UP)
1060 dwc_gmac_start(ifp);
1061 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1062
1063 #ifndef DWCGMAC_MPSAFE
1064 splx(s);
1065 #endif
1066
1067 return error;
1068 }
1069
1070 static void
1071 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1072 {
1073 struct ifnet *ifp = &sc->sc_ec.ec_if;
1074 struct dwc_gmac_tx_data *data;
1075 struct dwc_gmac_dev_dmadesc *desc;
1076 uint32_t status;
1077 int i, nsegs;
1078
1079 mutex_enter(&sc->sc_txq.t_mtx);
1080
1081 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1082 #ifdef DWC_GMAC_DEBUG
1083 aprint_normal_dev(sc->sc_dev,
1084 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1085 i, sc->sc_txq.t_queued);
1086 #endif
1087
1088 /*
1089 * i+1 does not need to be a valid descriptor,
1090 * this is just a special notion to just sync
1091 * a single tx descriptor (i)
1092 */
1093 dwc_gmac_txdesc_sync(sc, i, i+1,
1094 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1095
1096 desc = &sc->sc_txq.t_desc[i];
1097 status = le32toh(desc->ddesc_status);
1098 if (status & DDESC_STATUS_OWNEDBYDEV)
1099 break;
1100
1101 data = &sc->sc_txq.t_data[i];
1102 if (data->td_m == NULL)
1103 continue;
1104
1105 ifp->if_opackets++;
1106 nsegs = data->td_active->dm_nsegs;
1107 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1108 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1109 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1110
1111 #ifdef DWC_GMAC_DEBUG
1112 aprint_normal_dev(sc->sc_dev,
1113 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1114 "freeing mbuf %p\n", i, data->td_m);
1115 #endif
1116
1117 m_freem(data->td_m);
1118 data->td_m = NULL;
1119
1120 sc->sc_txq.t_queued -= nsegs;
1121 }
1122
1123 sc->sc_txq.t_next = i;
1124
1125 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1126 ifp->if_flags &= ~IFF_OACTIVE;
1127 }
1128 mutex_exit(&sc->sc_txq.t_mtx);
1129 }
1130
1131 static void
1132 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1133 {
1134 struct ifnet *ifp = &sc->sc_ec.ec_if;
1135 struct dwc_gmac_dev_dmadesc *desc;
1136 struct dwc_gmac_rx_data *data;
1137 bus_addr_t physaddr;
1138 uint32_t status;
1139 struct mbuf *m, *mnew;
1140 int i, len, error;
1141
1142 mutex_enter(&sc->sc_rxq.r_mtx);
1143 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1144 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1145 RX_DESC_OFFSET(i), sizeof(*desc),
1146 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1147 desc = &sc->sc_rxq.r_desc[i];
1148 data = &sc->sc_rxq.r_data[i];
1149
1150 status = le32toh(desc->ddesc_status);
1151 if (status & DDESC_STATUS_OWNEDBYDEV)
1152 break;
1153
1154 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1155 #ifdef DWC_GMAC_DEBUG
1156 aprint_normal_dev(sc->sc_dev,
1157 "RX error: descriptor status %08x, skipping\n",
1158 status);
1159 #endif
1160 ifp->if_ierrors++;
1161 goto skip;
1162 }
1163
1164 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1165
1166 #ifdef DWC_GMAC_DEBUG
1167 aprint_normal_dev(sc->sc_dev,
1168 "rx int: device is done with descriptor #%d, len: %d\n",
1169 i, len);
1170 #endif
1171
1172 /*
1173 * Try to get a new mbuf before passing this one
1174 * up, if that fails, drop the packet and reuse
1175 * the existing one.
1176 */
1177 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1178 if (mnew == NULL) {
1179 ifp->if_ierrors++;
1180 goto skip;
1181 }
1182 MCLGET(mnew, M_DONTWAIT);
1183 if ((mnew->m_flags & M_EXT) == 0) {
1184 m_freem(mnew);
1185 ifp->if_ierrors++;
1186 goto skip;
1187 }
1188
1189 /* unload old DMA map */
1190 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1191 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1192 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1193
1194 /* and reload with new mbuf */
1195 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1196 mtod(mnew, void*), MCLBYTES, NULL,
1197 BUS_DMA_READ | BUS_DMA_NOWAIT);
1198 if (error != 0) {
1199 m_freem(mnew);
1200 /* try to reload old mbuf */
1201 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1202 mtod(data->rd_m, void*), MCLBYTES, NULL,
1203 BUS_DMA_READ | BUS_DMA_NOWAIT);
1204 if (error != 0) {
1205 panic("%s: could not load old rx mbuf",
1206 device_xname(sc->sc_dev));
1207 }
1208 ifp->if_ierrors++;
1209 goto skip;
1210 }
1211 physaddr = data->rd_map->dm_segs[0].ds_addr;
1212
1213 /*
1214 * New mbuf loaded, update RX ring and continue
1215 */
1216 m = data->rd_m;
1217 data->rd_m = mnew;
1218 desc->ddesc_data = htole32(physaddr);
1219
1220 /* finalize mbuf */
1221 m->m_pkthdr.len = m->m_len = len;
1222 m_set_rcvif(m, ifp);
1223 m->m_flags |= M_HASFCS;
1224
1225 if_percpuq_enqueue(sc->sc_ipq, m);
1226
1227 skip:
1228 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1229 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1230 desc->ddesc_cntl = htole32(
1231 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1232 DDESC_CNTL_RXCHAIN);
1233 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1234 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1235 RX_DESC_OFFSET(i), sizeof(*desc),
1236 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1237 }
1238
1239 /* update RX pointer */
1240 sc->sc_rxq.r_cur = i;
1241
1242 mutex_exit(&sc->sc_rxq.r_mtx);
1243 }
1244
1245 /*
1246 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1247 */
1248 static uint32_t
1249 bitrev32(uint32_t x)
1250 {
1251 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1252 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1253 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1254 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1255
1256 return (x >> 16) | (x << 16);
1257 }
1258
1259 static void
1260 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1261 {
1262 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1263 struct ether_multi *enm;
1264 struct ether_multistep step;
1265 uint32_t hashes[2] = { 0, 0 };
1266 uint32_t ffilt, h;
1267 int mcnt;
1268
1269 KASSERT(mutex_owned(sc->sc_lock));
1270
1271 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1272
1273 if (ifp->if_flags & IFF_PROMISC) {
1274 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1275 goto special_filter;
1276 }
1277
1278 ifp->if_flags &= ~IFF_ALLMULTI;
1279 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1280
1281 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1282 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1283
1284 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1285 mcnt = 0;
1286 while (enm != NULL) {
1287 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1288 ETHER_ADDR_LEN) != 0) {
1289 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1290 ifp->if_flags |= IFF_ALLMULTI;
1291 goto special_filter;
1292 }
1293
1294 h = bitrev32(
1295 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1296 ) >> 26;
1297 hashes[h >> 5] |= (1 << (h & 0x1f));
1298
1299 mcnt++;
1300 ETHER_NEXT_MULTI(step, enm);
1301 }
1302
1303 if (mcnt)
1304 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1305 else
1306 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1307
1308 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1309 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1310 hashes[0]);
1311 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1312 hashes[1]);
1313 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1314
1315 #ifdef DWC_GMAC_DEBUG
1316 dwc_gmac_dump_ffilt(sc, ffilt);
1317 #endif
1318 return;
1319
1320 special_filter:
1321 #ifdef DWC_GMAC_DEBUG
1322 dwc_gmac_dump_ffilt(sc, ffilt);
1323 #endif
1324 /* no MAC hashes, ALLMULTI or PROMISC */
1325 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1326 ffilt);
1327 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1328 0xffffffff);
1329 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1330 0xffffffff);
1331 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1332 }
1333
1334 int
1335 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1336 {
1337 uint32_t status, dma_status;
1338 int rv = 0;
1339
1340 if (sc->sc_stopping)
1341 return 0;
1342
1343 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1344 if (status & AWIN_GMAC_MII_IRQ) {
1345 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1346 AWIN_GMAC_MII_STATUS);
1347 rv = 1;
1348 mii_pollstat(&sc->sc_mii);
1349 }
1350
1351 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1352 AWIN_GMAC_DMA_STATUS);
1353
1354 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1355 rv = 1;
1356
1357 if (dma_status & GMAC_DMA_INT_TIE)
1358 dwc_gmac_tx_intr(sc);
1359
1360 if (dma_status & GMAC_DMA_INT_RIE)
1361 dwc_gmac_rx_intr(sc);
1362
1363 /*
1364 * Check error conditions
1365 */
1366 if (dma_status & GMAC_DMA_INT_ERRORS) {
1367 sc->sc_ec.ec_if.if_oerrors++;
1368 #ifdef DWC_GMAC_DEBUG
1369 dwc_dump_and_abort(sc, "interrupt error condition");
1370 #endif
1371 }
1372
1373 /* ack interrupt */
1374 if (dma_status)
1375 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1376 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1377
1378 /*
1379 * Get more packets
1380 */
1381 if (rv)
1382 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1383
1384 return rv;
1385 }
1386
1387 #ifdef DWC_GMAC_DEBUG
1388 static void
1389 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1390 {
1391 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1392 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1393 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1394 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1395 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1396 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1397 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1398 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1399 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1400 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1401 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1402 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1403 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1404 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1405 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1406 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1407 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1408 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1409 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1410 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1411 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1412 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1413 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1414 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1415 }
1416
1417 static void
1418 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1419 {
1420 int i;
1421
1422 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1423 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1424 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1425 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1426 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1427 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1428 "data: %08x next: %08x\n",
1429 i, sc->sc_txq.t_physaddr +
1430 i*sizeof(struct dwc_gmac_dev_dmadesc),
1431 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1432 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1433 }
1434 }
1435
1436 static void
1437 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1438 {
1439 int i;
1440
1441 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1442 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1443 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1444 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1445 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1446 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1447 "data: %08x next: %08x\n",
1448 i, sc->sc_rxq.r_physaddr +
1449 i*sizeof(struct dwc_gmac_dev_dmadesc),
1450 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1451 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1452 }
1453 }
1454
1455 static void
1456 dwc_dump_status(struct dwc_gmac_softc *sc)
1457 {
1458 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1459 AWIN_GMAC_MAC_INTR);
1460 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1461 AWIN_GMAC_DMA_STATUS);
1462 char buf[200];
1463
1464 /* print interrupt state */
1465 snprintb(buf, sizeof(buf), "\177\20"
1466 "b\x10""NI\0"
1467 "b\x0f""AI\0"
1468 "b\x0e""ER\0"
1469 "b\x0d""FB\0"
1470 "b\x0a""ET\0"
1471 "b\x09""RW\0"
1472 "b\x08""RS\0"
1473 "b\x07""RU\0"
1474 "b\x06""RI\0"
1475 "b\x05""UN\0"
1476 "b\x04""OV\0"
1477 "b\x03""TJ\0"
1478 "b\x02""TU\0"
1479 "b\x01""TS\0"
1480 "b\x00""TI\0"
1481 "\0", dma_status);
1482 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1483 status, buf);
1484 }
1485
1486 static void
1487 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1488 {
1489 dwc_dump_status(sc);
1490 dwc_gmac_dump_ffilt(sc,
1491 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1492 dwc_gmac_dump_dma(sc);
1493 dwc_gmac_dump_tx_desc(sc);
1494 dwc_gmac_dump_rx_desc(sc);
1495
1496 panic("%s", msg);
1497 }
1498
1499 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1500 {
1501 char buf[200];
1502
1503 /* print filter setup */
1504 snprintb(buf, sizeof(buf), "\177\20"
1505 "b\x1f""RA\0"
1506 "b\x0a""HPF\0"
1507 "b\x09""SAF\0"
1508 "b\x08""SAIF\0"
1509 "b\x05""DBF\0"
1510 "b\x04""PM\0"
1511 "b\x03""DAIF\0"
1512 "b\x02""HMC\0"
1513 "b\x01""HUC\0"
1514 "b\x00""PR\0"
1515 "\0", ffilt);
1516 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1517 }
1518 #endif
1519