dwc_gmac.c revision 1.45 1 /* $NetBSD: dwc_gmac.c,v 1.45 2017/12/21 12:09:43 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This driver supports the Synopsis Designware GMAC core, as found
34 * on Allwinner A20 cores and others.
35 *
36 * Real documentation seems to not be available, the marketing product
37 * documents could be found here:
38 *
39 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 */
41
42 #include <sys/cdefs.h>
43
44 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.45 2017/12/21 12:09:43 martin Exp $");
45
46 /* #define DWC_GMAC_DEBUG 1 */
47
48 #ifdef _KERNEL_OPT
49 #include "opt_inet.h"
50 #include "opt_net_mpsafe.h"
51 #endif
52
53 #include <sys/param.h>
54 #include <sys/bus.h>
55 #include <sys/device.h>
56 #include <sys/intr.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 #include <sys/cprng.h>
60
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_media.h>
64 #include <net/bpf.h>
65 #ifdef INET
66 #include <netinet/if_inarp.h>
67 #endif
68
69 #include <dev/mii/miivar.h>
70
71 #include <dev/ic/dwc_gmac_reg.h>
72 #include <dev/ic/dwc_gmac_var.h>
73
74 static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 static void dwc_gmac_miibus_statchg(struct ifnet *);
77
78 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 uint8_t enaddr[ETHER_ADDR_LEN]);
81 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 static int dwc_gmac_init(struct ifnet *ifp);
91 static int dwc_gmac_init_locked(struct ifnet *ifp);
92 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 static void dwc_gmac_start(struct ifnet *ifp);
95 static void dwc_gmac_start_locked(struct ifnet *ifp);
96 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 static int dwc_gmac_ifflags_cb(struct ethercom *);
102 static uint32_t bitrev32(uint32_t x);
103
104 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
105 *sizeof(struct dwc_gmac_dev_dmadesc))
106 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
107
108 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
109 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
110
111
112
113 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
114 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
115 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
116
117 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
118 GMAC_DMA_INT_FBE| \
119 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
120 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
121 GMAC_DMA_INT_TJE)
122
123 #define AWIN_DEF_MAC_INTRMASK \
124 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
125 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
126
127
128 #ifdef DWC_GMAC_DEBUG
129 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
130 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
131 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
132 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
133 static void dwc_dump_status(struct dwc_gmac_softc *sc);
134 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
135 #endif
136
137 #ifdef NET_MPSAFE
138 #define DWCGMAC_MPSAFE 1
139 #endif
140
141 void
142 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
143 {
144 uint8_t enaddr[ETHER_ADDR_LEN];
145 uint32_t maclo, machi;
146 struct mii_data * const mii = &sc->sc_mii;
147 struct ifnet * const ifp = &sc->sc_ec.ec_if;
148 prop_dictionary_t dict;
149 int rv;
150
151 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
152 sc->sc_mii_clk = mii_clk & 7;
153
154 dict = device_properties(sc->sc_dev);
155 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
156 if (ea != NULL) {
157 /*
158 * If the MAC address is overriden by a device property,
159 * use that.
160 */
161 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
162 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
163 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
164 } else {
165 /*
166 * If we did not get an externaly configure address,
167 * try to read one from the current filter setup,
168 * before resetting the chip.
169 */
170 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
171 AWIN_GMAC_MAC_ADDR0LO);
172 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
173 AWIN_GMAC_MAC_ADDR0HI);
174
175 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
176 /* fake MAC address */
177 maclo = 0x00f2 | (cprng_strong32() << 16);
178 machi = cprng_strong32();
179 }
180
181 enaddr[0] = maclo & 0x0ff;
182 enaddr[1] = (maclo >> 8) & 0x0ff;
183 enaddr[2] = (maclo >> 16) & 0x0ff;
184 enaddr[3] = (maclo >> 24) & 0x0ff;
185 enaddr[4] = machi & 0x0ff;
186 enaddr[5] = (machi >> 8) & 0x0ff;
187 }
188
189 /*
190 * Init chip and do initial setup
191 */
192 if (dwc_gmac_reset(sc) != 0)
193 return; /* not much to cleanup, haven't attached yet */
194 dwc_gmac_write_hwaddr(sc, enaddr);
195 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
196 ether_sprintf(enaddr));
197
198 /*
199 * Allocate Tx and Rx rings
200 */
201 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
202 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
203 goto fail;
204 }
205
206 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
207 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
208 goto fail;
209 }
210
211 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
212 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
213 goto fail;
214 }
215
216 sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
217 mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
218 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
219
220 /*
221 * Prepare interface data
222 */
223 ifp->if_softc = sc;
224 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
225 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226 #ifdef DWCGMAC_MPSAFE
227 ifp->if_extflags = IFEF_MPSAFE;
228 #endif
229 ifp->if_ioctl = dwc_gmac_ioctl;
230 ifp->if_start = dwc_gmac_start;
231 ifp->if_init = dwc_gmac_init;
232 ifp->if_stop = dwc_gmac_stop;
233 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
234 IFQ_SET_READY(&ifp->if_snd);
235
236 /*
237 * Attach MII subdevices
238 */
239 sc->sc_ec.ec_mii = &sc->sc_mii;
240 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
241 mii->mii_ifp = ifp;
242 mii->mii_readreg = dwc_gmac_miibus_read_reg;
243 mii->mii_writereg = dwc_gmac_miibus_write_reg;
244 mii->mii_statchg = dwc_gmac_miibus_statchg;
245 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
246 MIIF_DOPAUSE);
247
248 if (LIST_EMPTY(&mii->mii_phys)) {
249 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
250 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
251 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
252 } else {
253 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
254 }
255
256 /*
257 * We can support 802.1Q VLAN-sized frames.
258 */
259 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
260
261 /*
262 * Ready, attach interface
263 */
264 /* Attach the interface. */
265 rv = if_initialize(ifp);
266 if (rv != 0)
267 goto fail_2;
268 sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
269 if_deferred_start_init(ifp, NULL);
270 ether_ifattach(ifp, enaddr);
271 ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
272 if_register(ifp);
273
274 /*
275 * Enable interrupts
276 */
277 mutex_enter(sc->sc_lock);
278 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
279 AWIN_DEF_MAC_INTRMASK);
280 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
281 GMAC_DEF_DMA_INT_MASK);
282 mutex_exit(sc->sc_lock);
283
284 return;
285 fail_2:
286 ifmedia_removeall(&mii->mii_media);
287 mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
288 mutex_destroy(&sc->sc_txq.t_mtx);
289 mutex_destroy(&sc->sc_rxq.r_mtx);
290 mutex_obj_free(sc->sc_lock);
291 fail:
292 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
293 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
294 dwc_gmac_free_dma_rings(sc);
295 mutex_destroy(&sc->sc_mdio_lock);
296 }
297
298
299
300 static int
301 dwc_gmac_reset(struct dwc_gmac_softc *sc)
302 {
303 size_t cnt;
304 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
305 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
306 for (cnt = 0; cnt < 3000; cnt++) {
307 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
308 & GMAC_BUSMODE_RESET) == 0)
309 return 0;
310 delay(10);
311 }
312
313 aprint_error_dev(sc->sc_dev, "reset timed out\n");
314 return EIO;
315 }
316
317 static void
318 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
319 uint8_t enaddr[ETHER_ADDR_LEN])
320 {
321 uint32_t lo, hi;
322
323 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
324 | (enaddr[3] << 24);
325 hi = enaddr[4] | (enaddr[5] << 8);
326 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
327 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
328 }
329
330 static int
331 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
332 {
333 struct dwc_gmac_softc * const sc = device_private(self);
334 uint16_t mii;
335 size_t cnt;
336 int rv = 0;
337
338 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
339 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
340 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
341 | GMAC_MII_BUSY;
342
343 mutex_enter(&sc->sc_mdio_lock);
344 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
345
346 for (cnt = 0; cnt < 1000; cnt++) {
347 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
348 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
349 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
350 AWIN_GMAC_MAC_MIIDATA);
351 break;
352 }
353 delay(10);
354 }
355
356 mutex_exit(&sc->sc_mdio_lock);
357
358 return rv;
359 }
360
361 static void
362 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
363 {
364 struct dwc_gmac_softc * const sc = device_private(self);
365 uint16_t mii;
366 size_t cnt;
367
368 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
369 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
370 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
371 | GMAC_MII_BUSY | GMAC_MII_WRITE;
372
373 mutex_enter(&sc->sc_mdio_lock);
374 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
375 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
376
377 for (cnt = 0; cnt < 1000; cnt++) {
378 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
379 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
380 break;
381 delay(10);
382 }
383
384 mutex_exit(&sc->sc_mdio_lock);
385 }
386
387 static int
388 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
389 struct dwc_gmac_rx_ring *ring)
390 {
391 struct dwc_gmac_rx_data *data;
392 bus_addr_t physaddr;
393 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
394 int error, i, next;
395
396 ring->r_cur = ring->r_next = 0;
397 memset(ring->r_desc, 0, descsize);
398
399 /*
400 * Pre-allocate Rx buffers and populate Rx ring.
401 */
402 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
403 struct dwc_gmac_dev_dmadesc *desc;
404
405 data = &sc->sc_rxq.r_data[i];
406
407 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
408 if (data->rd_m == NULL) {
409 aprint_error_dev(sc->sc_dev,
410 "could not allocate rx mbuf #%d\n", i);
411 error = ENOMEM;
412 goto fail;
413 }
414 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
415 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
416 if (error != 0) {
417 aprint_error_dev(sc->sc_dev,
418 "could not create DMA map\n");
419 data->rd_map = NULL;
420 goto fail;
421 }
422 MCLGET(data->rd_m, M_DONTWAIT);
423 if (!(data->rd_m->m_flags & M_EXT)) {
424 aprint_error_dev(sc->sc_dev,
425 "could not allocate mbuf cluster #%d\n", i);
426 error = ENOMEM;
427 goto fail;
428 }
429
430 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
431 mtod(data->rd_m, void *), MCLBYTES, NULL,
432 BUS_DMA_READ | BUS_DMA_NOWAIT);
433 if (error != 0) {
434 aprint_error_dev(sc->sc_dev,
435 "could not load rx buf DMA map #%d", i);
436 goto fail;
437 }
438 physaddr = data->rd_map->dm_segs[0].ds_addr;
439
440 desc = &sc->sc_rxq.r_desc[i];
441 desc->ddesc_data = htole32(physaddr);
442 next = RX_NEXT(i);
443 desc->ddesc_next = htole32(ring->r_physaddr
444 + next * sizeof(*desc));
445 desc->ddesc_cntl = htole32(
446 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
447 DDESC_CNTL_RXCHAIN);
448 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
449 }
450
451 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
452 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
453 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
454 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
455 ring->r_physaddr);
456
457 return 0;
458
459 fail:
460 dwc_gmac_free_rx_ring(sc, ring);
461 return error;
462 }
463
464 static void
465 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
466 struct dwc_gmac_rx_ring *ring)
467 {
468 struct dwc_gmac_dev_dmadesc *desc;
469 int i;
470
471 mutex_enter(&ring->r_mtx);
472 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
473 desc = &sc->sc_rxq.r_desc[i];
474 desc->ddesc_cntl = htole32(
475 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
476 DDESC_CNTL_RXCHAIN);
477 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
478 }
479
480 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
481 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
482 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
483
484 ring->r_cur = ring->r_next = 0;
485 /* reset DMA address to start of ring */
486 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
487 sc->sc_rxq.r_physaddr);
488 mutex_exit(&ring->r_mtx);
489 }
490
491 static int
492 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
493 {
494 const size_t descsize = AWGE_TOTAL_RING_COUNT *
495 sizeof(struct dwc_gmac_dev_dmadesc);
496 int error, nsegs;
497 void *rings;
498
499 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
500 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
501 if (error != 0) {
502 aprint_error_dev(sc->sc_dev,
503 "could not create desc DMA map\n");
504 sc->sc_dma_ring_map = NULL;
505 goto fail;
506 }
507
508 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
509 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
510 if (error != 0) {
511 aprint_error_dev(sc->sc_dev,
512 "could not map DMA memory\n");
513 goto fail;
514 }
515
516 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
517 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
518 if (error != 0) {
519 aprint_error_dev(sc->sc_dev,
520 "could not allocate DMA memory\n");
521 goto fail;
522 }
523
524 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
525 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
526 if (error != 0) {
527 aprint_error_dev(sc->sc_dev,
528 "could not load desc DMA map\n");
529 goto fail;
530 }
531
532 /* give first AWGE_RX_RING_COUNT to the RX side */
533 sc->sc_rxq.r_desc = rings;
534 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
535
536 /* and next rings to the TX side */
537 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
538 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
539 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
540
541 return 0;
542
543 fail:
544 dwc_gmac_free_dma_rings(sc);
545 return error;
546 }
547
548 static void
549 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
550 {
551 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
552 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
553 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
554 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
555 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
556 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
557 }
558
559 static void
560 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
561 {
562 struct dwc_gmac_rx_data *data;
563 int i;
564
565 if (ring->r_desc == NULL)
566 return;
567
568
569 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
570 data = &ring->r_data[i];
571
572 if (data->rd_map != NULL) {
573 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
574 AWGE_RX_RING_COUNT
575 *sizeof(struct dwc_gmac_dev_dmadesc),
576 BUS_DMASYNC_POSTREAD);
577 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
578 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
579 }
580 if (data->rd_m != NULL)
581 m_freem(data->rd_m);
582 }
583 }
584
585 static int
586 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
587 struct dwc_gmac_tx_ring *ring)
588 {
589 int i, error = 0;
590
591 ring->t_queued = 0;
592 ring->t_cur = ring->t_next = 0;
593
594 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
595 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
596 TX_DESC_OFFSET(0),
597 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
598 BUS_DMASYNC_POSTWRITE);
599
600 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
601 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
602 AWGE_TX_RING_COUNT, MCLBYTES, 0,
603 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
604 &ring->t_data[i].td_map);
605 if (error != 0) {
606 aprint_error_dev(sc->sc_dev,
607 "could not create TX DMA map #%d\n", i);
608 ring->t_data[i].td_map = NULL;
609 goto fail;
610 }
611 ring->t_desc[i].ddesc_next = htole32(
612 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
613 *TX_NEXT(i));
614 }
615
616 return 0;
617
618 fail:
619 dwc_gmac_free_tx_ring(sc, ring);
620 return error;
621 }
622
623 static void
624 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
625 {
626 /* 'end' is pointing one descriptor beyound the last we want to sync */
627 if (end > start) {
628 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
629 TX_DESC_OFFSET(start),
630 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
631 ops);
632 return;
633 }
634 /* sync from 'start' to end of ring */
635 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
636 TX_DESC_OFFSET(start),
637 TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
638 ops);
639 /* sync from start of ring to 'end' */
640 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
641 TX_DESC_OFFSET(0),
642 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
643 ops);
644 }
645
646 static void
647 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
648 struct dwc_gmac_tx_ring *ring)
649 {
650 int i;
651
652 mutex_enter(&ring->t_mtx);
653 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
654 struct dwc_gmac_tx_data *data = &ring->t_data[i];
655
656 if (data->td_m != NULL) {
657 bus_dmamap_sync(sc->sc_dmat, data->td_active,
658 0, data->td_active->dm_mapsize,
659 BUS_DMASYNC_POSTWRITE);
660 bus_dmamap_unload(sc->sc_dmat, data->td_active);
661 m_freem(data->td_m);
662 data->td_m = NULL;
663 }
664 }
665
666 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
667 TX_DESC_OFFSET(0),
668 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
669 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
670 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
671 sc->sc_txq.t_physaddr);
672
673 ring->t_queued = 0;
674 ring->t_cur = ring->t_next = 0;
675 mutex_exit(&ring->t_mtx);
676 }
677
678 static void
679 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
680 struct dwc_gmac_tx_ring *ring)
681 {
682 int i;
683
684 /* unload the maps */
685 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
686 struct dwc_gmac_tx_data *data = &ring->t_data[i];
687
688 if (data->td_m != NULL) {
689 bus_dmamap_sync(sc->sc_dmat, data->td_active,
690 0, data->td_map->dm_mapsize,
691 BUS_DMASYNC_POSTWRITE);
692 bus_dmamap_unload(sc->sc_dmat, data->td_active);
693 m_freem(data->td_m);
694 data->td_m = NULL;
695 }
696 }
697
698 /* and actually free them */
699 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
700 struct dwc_gmac_tx_data *data = &ring->t_data[i];
701
702 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
703 }
704 }
705
706 static void
707 dwc_gmac_miibus_statchg(struct ifnet *ifp)
708 {
709 struct dwc_gmac_softc * const sc = ifp->if_softc;
710 struct mii_data * const mii = &sc->sc_mii;
711 uint32_t conf, flow;
712
713 /*
714 * Set MII or GMII interface based on the speed
715 * negotiated by the PHY.
716 */
717 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
718 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
719 |AWIN_GMAC_MAC_CONF_FULLDPLX);
720 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
721 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
722 | AWIN_GMAC_MAC_CONF_DISABLEJABBER
723 | AWIN_GMAC_MAC_CONF_ACS
724 | AWIN_GMAC_MAC_CONF_RXENABLE
725 | AWIN_GMAC_MAC_CONF_TXENABLE;
726 switch (IFM_SUBTYPE(mii->mii_media_active)) {
727 case IFM_10_T:
728 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
729 break;
730 case IFM_100_TX:
731 conf |= AWIN_GMAC_MAC_CONF_FES100 |
732 AWIN_GMAC_MAC_CONF_MIISEL;
733 break;
734 case IFM_1000_T:
735 break;
736 }
737
738 flow = 0;
739 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
740 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
741 flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
742 }
743 if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
744 flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
745 }
746 if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
747 flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
748 }
749 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
750 AWIN_GMAC_MAC_FLOWCTRL, flow);
751
752 #ifdef DWC_GMAC_DEBUG
753 aprint_normal_dev(sc->sc_dev,
754 "setting MAC conf register: %08x\n", conf);
755 #endif
756
757 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
758 AWIN_GMAC_MAC_CONF, conf);
759 }
760
761 static int
762 dwc_gmac_init(struct ifnet *ifp)
763 {
764 struct dwc_gmac_softc *sc = ifp->if_softc;
765
766 mutex_enter(sc->sc_lock);
767 int ret = dwc_gmac_init_locked(ifp);
768 mutex_exit(sc->sc_lock);
769
770 return ret;
771 }
772
773 static int
774 dwc_gmac_init_locked(struct ifnet *ifp)
775 {
776 struct dwc_gmac_softc *sc = ifp->if_softc;
777 uint32_t ffilt;
778
779 if (ifp->if_flags & IFF_RUNNING)
780 return 0;
781
782 dwc_gmac_stop_locked(ifp, 0);
783
784 /*
785 * Configure DMA burst/transfer mode and RX/TX priorities.
786 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
787 */
788 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
789 GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
790 __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
791 __SHIFTIN(2, GMAC_BUSMODE_PBL));
792
793 /*
794 * Set up address filter
795 */
796 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
797 if (ifp->if_flags & IFF_PROMISC) {
798 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
799 } else {
800 ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
801 }
802 if (ifp->if_flags & IFF_BROADCAST) {
803 ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
804 } else {
805 ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
806 }
807 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
808
809 /*
810 * Set up multicast filter
811 */
812 dwc_gmac_setmulti(sc);
813
814 /*
815 * Set up dma pointer for RX and TX ring
816 */
817 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
818 sc->sc_rxq.r_physaddr);
819 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
820 sc->sc_txq.t_physaddr);
821
822 /*
823 * Start RX/TX part
824 */
825 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
826 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
827 GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD);
828
829 sc->sc_stopping = false;
830
831 ifp->if_flags |= IFF_RUNNING;
832 ifp->if_flags &= ~IFF_OACTIVE;
833
834 return 0;
835 }
836
837 static void
838 dwc_gmac_start(struct ifnet *ifp)
839 {
840 struct dwc_gmac_softc *sc = ifp->if_softc;
841 #ifdef DWCGMAC_MPSAFE
842 KASSERT(if_is_mpsafe(ifp));
843 #endif
844
845 mutex_enter(sc->sc_lock);
846 if (!sc->sc_stopping) {
847 mutex_enter(&sc->sc_txq.t_mtx);
848 dwc_gmac_start_locked(ifp);
849 mutex_exit(&sc->sc_txq.t_mtx);
850 }
851 mutex_exit(sc->sc_lock);
852 }
853
854 static void
855 dwc_gmac_start_locked(struct ifnet *ifp)
856 {
857 struct dwc_gmac_softc *sc = ifp->if_softc;
858 int old = sc->sc_txq.t_queued;
859 int start = sc->sc_txq.t_cur;
860 struct mbuf *m0;
861
862 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
863 return;
864
865 for (;;) {
866 IFQ_POLL(&ifp->if_snd, m0);
867 if (m0 == NULL)
868 break;
869 if (dwc_gmac_queue(sc, m0) != 0) {
870 ifp->if_flags |= IFF_OACTIVE;
871 break;
872 }
873 IFQ_DEQUEUE(&ifp->if_snd, m0);
874 bpf_mtap(ifp, m0);
875 if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
876 ifp->if_flags |= IFF_OACTIVE;
877 break;
878 }
879 }
880
881 if (sc->sc_txq.t_queued != old) {
882 /* packets have been queued, kick it off */
883 dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
884 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
885
886 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
887 AWIN_GMAC_DMA_TXPOLL, ~0U);
888 #ifdef DWC_GMAC_DEBUG
889 dwc_dump_status(sc);
890 #endif
891 }
892 }
893
894 static void
895 dwc_gmac_stop(struct ifnet *ifp, int disable)
896 {
897 struct dwc_gmac_softc *sc = ifp->if_softc;
898
899 mutex_enter(sc->sc_lock);
900 dwc_gmac_stop_locked(ifp, disable);
901 mutex_exit(sc->sc_lock);
902 }
903
904 static void
905 dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
906 {
907 struct dwc_gmac_softc *sc = ifp->if_softc;
908
909 sc->sc_stopping = true;
910
911 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
912 AWIN_GMAC_DMA_OPMODE,
913 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
914 AWIN_GMAC_DMA_OPMODE)
915 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
916 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
917 AWIN_GMAC_DMA_OPMODE,
918 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
919 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
920
921 mii_down(&sc->sc_mii);
922 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
923 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
924 }
925
926 /*
927 * Add m0 to the TX ring
928 */
929 static int
930 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
931 {
932 struct dwc_gmac_dev_dmadesc *desc = NULL;
933 struct dwc_gmac_tx_data *data = NULL;
934 bus_dmamap_t map;
935 uint32_t flags, len, status;
936 int error, i, first;
937
938 #ifdef DWC_GMAC_DEBUG
939 aprint_normal_dev(sc->sc_dev,
940 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
941 #endif
942
943 first = sc->sc_txq.t_cur;
944 map = sc->sc_txq.t_data[first].td_map;
945
946 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
947 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
948 if (error != 0) {
949 aprint_error_dev(sc->sc_dev, "could not map mbuf "
950 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
951 return error;
952 }
953
954 if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
955 bus_dmamap_unload(sc->sc_dmat, map);
956 return ENOBUFS;
957 }
958
959 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
960 status = 0;
961 for (i = 0; i < map->dm_nsegs; i++) {
962 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
963 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
964
965 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
966 len = __SHIFTIN(map->dm_segs[i].ds_len, DDESC_CNTL_SIZE1MASK);
967
968 #ifdef DWC_GMAC_DEBUG
969 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
970 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
971 (unsigned long)map->dm_segs[i].ds_addr,
972 (unsigned long)map->dm_segs[i].ds_len,
973 flags, len);
974 #endif
975
976 desc->ddesc_cntl = htole32(len|flags);
977 flags &= ~DDESC_CNTL_TXFIRST;
978
979 /*
980 * Defer passing ownership of the first descriptor
981 * until we are done.
982 */
983 desc->ddesc_status = htole32(status);
984 status |= DDESC_STATUS_OWNEDBYDEV;
985
986 sc->sc_txq.t_queued++;
987 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
988 }
989
990 desc->ddesc_cntl |= htole32(DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT);
991
992 data->td_m = m0;
993 data->td_active = map;
994
995 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
996 BUS_DMASYNC_PREWRITE);
997
998 /* Pass first to device */
999 sc->sc_txq.t_desc[first].ddesc_status =
1000 htole32(DDESC_STATUS_OWNEDBYDEV);
1001
1002 return 0;
1003 }
1004
1005 /*
1006 * If the interface is up and running, only modify the receive
1007 * filter when setting promiscuous or debug mode. Otherwise fall
1008 * through to ether_ioctl, which will reset the chip.
1009 */
1010 static int
1011 dwc_gmac_ifflags_cb(struct ethercom *ec)
1012 {
1013 struct ifnet *ifp = &ec->ec_if;
1014 struct dwc_gmac_softc *sc = ifp->if_softc;
1015 int ret = 0;
1016
1017 mutex_enter(sc->sc_lock);
1018 int change = ifp->if_flags ^ sc->sc_if_flags;
1019 sc->sc_if_flags = ifp->if_flags;
1020
1021 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1022 ret = ENETRESET;
1023 goto out;
1024 }
1025 if ((change & IFF_PROMISC) != 0) {
1026 dwc_gmac_setmulti(sc);
1027 }
1028 out:
1029 mutex_exit(sc->sc_lock);
1030
1031 return ret;
1032 }
1033
1034 static int
1035 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1036 {
1037 struct dwc_gmac_softc *sc = ifp->if_softc;
1038 int error = 0;
1039
1040 int s = splnet();
1041 error = ether_ioctl(ifp, cmd, data);
1042
1043 #ifdef DWCGMAC_MPSAFE
1044 splx(s);
1045 #endif
1046
1047 if (error == ENETRESET) {
1048 error = 0;
1049 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1050 ;
1051 else if (ifp->if_flags & IFF_RUNNING) {
1052 /*
1053 * Multicast list has changed; set the hardware filter
1054 * accordingly.
1055 */
1056 mutex_enter(sc->sc_lock);
1057 dwc_gmac_setmulti(sc);
1058 mutex_exit(sc->sc_lock);
1059 }
1060 }
1061
1062 /* Try to get things going again */
1063 if (ifp->if_flags & IFF_UP)
1064 dwc_gmac_start(ifp);
1065 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1066
1067 #ifndef DWCGMAC_MPSAFE
1068 splx(s);
1069 #endif
1070
1071 return error;
1072 }
1073
1074 static void
1075 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1076 {
1077 struct ifnet *ifp = &sc->sc_ec.ec_if;
1078 struct dwc_gmac_tx_data *data;
1079 struct dwc_gmac_dev_dmadesc *desc;
1080 uint32_t status;
1081 int i, nsegs;
1082
1083 mutex_enter(&sc->sc_txq.t_mtx);
1084
1085 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1086 #ifdef DWC_GMAC_DEBUG
1087 aprint_normal_dev(sc->sc_dev,
1088 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1089 i, sc->sc_txq.t_queued);
1090 #endif
1091
1092 /*
1093 * i+1 does not need to be a valid descriptor,
1094 * this is just a special notion to just sync
1095 * a single tx descriptor (i)
1096 */
1097 dwc_gmac_txdesc_sync(sc, i, i+1,
1098 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1099
1100 desc = &sc->sc_txq.t_desc[i];
1101 status = le32toh(desc->ddesc_status);
1102 if (status & DDESC_STATUS_OWNEDBYDEV)
1103 break;
1104
1105 data = &sc->sc_txq.t_data[i];
1106 if (data->td_m == NULL)
1107 continue;
1108
1109 ifp->if_opackets++;
1110 nsegs = data->td_active->dm_nsegs;
1111 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1112 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1113 bus_dmamap_unload(sc->sc_dmat, data->td_active);
1114
1115 #ifdef DWC_GMAC_DEBUG
1116 aprint_normal_dev(sc->sc_dev,
1117 "dwc_gmac_tx_intr: done with packet at desc #%d, "
1118 "freeing mbuf %p\n", i, data->td_m);
1119 #endif
1120
1121 m_freem(data->td_m);
1122 data->td_m = NULL;
1123
1124 sc->sc_txq.t_queued -= nsegs;
1125 }
1126
1127 sc->sc_txq.t_next = i;
1128
1129 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1130 ifp->if_flags &= ~IFF_OACTIVE;
1131 }
1132 mutex_exit(&sc->sc_txq.t_mtx);
1133 }
1134
1135 static void
1136 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1137 {
1138 struct ifnet *ifp = &sc->sc_ec.ec_if;
1139 struct dwc_gmac_dev_dmadesc *desc;
1140 struct dwc_gmac_rx_data *data;
1141 bus_addr_t physaddr;
1142 uint32_t status;
1143 struct mbuf *m, *mnew;
1144 int i, len, error;
1145
1146 mutex_enter(&sc->sc_rxq.r_mtx);
1147 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1148 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1149 RX_DESC_OFFSET(i), sizeof(*desc),
1150 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1151 desc = &sc->sc_rxq.r_desc[i];
1152 data = &sc->sc_rxq.r_data[i];
1153
1154 status = le32toh(desc->ddesc_status);
1155 if (status & DDESC_STATUS_OWNEDBYDEV)
1156 break;
1157
1158 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1159 #ifdef DWC_GMAC_DEBUG
1160 aprint_normal_dev(sc->sc_dev,
1161 "RX error: descriptor status %08x, skipping\n",
1162 status);
1163 #endif
1164 ifp->if_ierrors++;
1165 goto skip;
1166 }
1167
1168 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1169
1170 #ifdef DWC_GMAC_DEBUG
1171 aprint_normal_dev(sc->sc_dev,
1172 "rx int: device is done with descriptor #%d, len: %d\n",
1173 i, len);
1174 #endif
1175
1176 /*
1177 * Try to get a new mbuf before passing this one
1178 * up, if that fails, drop the packet and reuse
1179 * the existing one.
1180 */
1181 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1182 if (mnew == NULL) {
1183 ifp->if_ierrors++;
1184 goto skip;
1185 }
1186 MCLGET(mnew, M_DONTWAIT);
1187 if ((mnew->m_flags & M_EXT) == 0) {
1188 m_freem(mnew);
1189 ifp->if_ierrors++;
1190 goto skip;
1191 }
1192
1193 /* unload old DMA map */
1194 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1195 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1196 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1197
1198 /* and reload with new mbuf */
1199 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1200 mtod(mnew, void*), MCLBYTES, NULL,
1201 BUS_DMA_READ | BUS_DMA_NOWAIT);
1202 if (error != 0) {
1203 m_freem(mnew);
1204 /* try to reload old mbuf */
1205 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1206 mtod(data->rd_m, void*), MCLBYTES, NULL,
1207 BUS_DMA_READ | BUS_DMA_NOWAIT);
1208 if (error != 0) {
1209 panic("%s: could not load old rx mbuf",
1210 device_xname(sc->sc_dev));
1211 }
1212 ifp->if_ierrors++;
1213 goto skip;
1214 }
1215 physaddr = data->rd_map->dm_segs[0].ds_addr;
1216
1217 /*
1218 * New mbuf loaded, update RX ring and continue
1219 */
1220 m = data->rd_m;
1221 data->rd_m = mnew;
1222 desc->ddesc_data = htole32(physaddr);
1223
1224 /* finalize mbuf */
1225 m->m_pkthdr.len = m->m_len = len;
1226 m_set_rcvif(m, ifp);
1227 m->m_flags |= M_HASFCS;
1228
1229 if_percpuq_enqueue(sc->sc_ipq, m);
1230
1231 skip:
1232 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1233 data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1234 desc->ddesc_cntl = htole32(
1235 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1236 DDESC_CNTL_RXCHAIN);
1237 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1238 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1239 RX_DESC_OFFSET(i), sizeof(*desc),
1240 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1241 }
1242
1243 /* update RX pointer */
1244 sc->sc_rxq.r_cur = i;
1245
1246 mutex_exit(&sc->sc_rxq.r_mtx);
1247 }
1248
1249 /*
1250 * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1251 */
1252 static uint32_t
1253 bitrev32(uint32_t x)
1254 {
1255 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1256 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1257 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1258 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1259
1260 return (x >> 16) | (x << 16);
1261 }
1262
1263 static void
1264 dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1265 {
1266 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1267 struct ether_multi *enm;
1268 struct ether_multistep step;
1269 uint32_t hashes[2] = { 0, 0 };
1270 uint32_t ffilt, h;
1271 int mcnt;
1272
1273 KASSERT(mutex_owned(sc->sc_lock));
1274
1275 ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1276
1277 if (ifp->if_flags & IFF_PROMISC) {
1278 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1279 goto special_filter;
1280 }
1281
1282 ifp->if_flags &= ~IFF_ALLMULTI;
1283 ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1284
1285 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1286 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1287
1288 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1289 mcnt = 0;
1290 while (enm != NULL) {
1291 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1292 ETHER_ADDR_LEN) != 0) {
1293 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1294 ifp->if_flags |= IFF_ALLMULTI;
1295 goto special_filter;
1296 }
1297
1298 h = bitrev32(
1299 ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1300 ) >> 26;
1301 hashes[h >> 5] |= (1 << (h & 0x1f));
1302
1303 mcnt++;
1304 ETHER_NEXT_MULTI(step, enm);
1305 }
1306
1307 if (mcnt)
1308 ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1309 else
1310 ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1311
1312 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1313 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1314 hashes[0]);
1315 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1316 hashes[1]);
1317 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1318
1319 #ifdef DWC_GMAC_DEBUG
1320 dwc_gmac_dump_ffilt(sc, ffilt);
1321 #endif
1322 return;
1323
1324 special_filter:
1325 #ifdef DWC_GMAC_DEBUG
1326 dwc_gmac_dump_ffilt(sc, ffilt);
1327 #endif
1328 /* no MAC hashes, ALLMULTI or PROMISC */
1329 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1330 ffilt);
1331 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1332 0xffffffff);
1333 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1334 0xffffffff);
1335 sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1336 }
1337
1338 int
1339 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1340 {
1341 uint32_t status, dma_status;
1342 int rv = 0;
1343
1344 if (sc->sc_stopping)
1345 return 0;
1346
1347 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1348 if (status & AWIN_GMAC_MII_IRQ) {
1349 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1350 AWIN_GMAC_MII_STATUS);
1351 rv = 1;
1352 mii_pollstat(&sc->sc_mii);
1353 }
1354
1355 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1356 AWIN_GMAC_DMA_STATUS);
1357
1358 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1359 rv = 1;
1360
1361 if (dma_status & GMAC_DMA_INT_TIE)
1362 dwc_gmac_tx_intr(sc);
1363
1364 if (dma_status & GMAC_DMA_INT_RIE)
1365 dwc_gmac_rx_intr(sc);
1366
1367 /*
1368 * Check error conditions
1369 */
1370 if (dma_status & GMAC_DMA_INT_ERRORS) {
1371 sc->sc_ec.ec_if.if_oerrors++;
1372 #ifdef DWC_GMAC_DEBUG
1373 dwc_dump_and_abort(sc, "interrupt error condition");
1374 #endif
1375 }
1376
1377 /* ack interrupt */
1378 if (dma_status)
1379 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1380 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1381
1382 /*
1383 * Get more packets
1384 */
1385 if (rv)
1386 if_schedule_deferred_start(&sc->sc_ec.ec_if);
1387
1388 return rv;
1389 }
1390
1391 #ifdef DWC_GMAC_DEBUG
1392 static void
1393 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1394 {
1395 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1396 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1397 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1398 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1399 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1400 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1401 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1402 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1403 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1404 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1405 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1406 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1407 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1408 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1409 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1410 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1411 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1412 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1413 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1414 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1415 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1416 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1417 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1418 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1419 }
1420
1421 static void
1422 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1423 {
1424 int i;
1425
1426 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1427 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1428 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1429 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1430 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1431 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1432 "data: %08x next: %08x\n",
1433 i, sc->sc_txq.t_physaddr +
1434 i*sizeof(struct dwc_gmac_dev_dmadesc),
1435 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1436 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1437 }
1438 }
1439
1440 static void
1441 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1442 {
1443 int i;
1444
1445 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1446 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1447 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1448 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1449 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1450 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1451 "data: %08x next: %08x\n",
1452 i, sc->sc_rxq.r_physaddr +
1453 i*sizeof(struct dwc_gmac_dev_dmadesc),
1454 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1455 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1456 }
1457 }
1458
1459 static void
1460 dwc_dump_status(struct dwc_gmac_softc *sc)
1461 {
1462 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1463 AWIN_GMAC_MAC_INTR);
1464 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1465 AWIN_GMAC_DMA_STATUS);
1466 char buf[200];
1467
1468 /* print interrupt state */
1469 snprintb(buf, sizeof(buf), "\177\20"
1470 "b\x10""NI\0"
1471 "b\x0f""AI\0"
1472 "b\x0e""ER\0"
1473 "b\x0d""FB\0"
1474 "b\x0a""ET\0"
1475 "b\x09""RW\0"
1476 "b\x08""RS\0"
1477 "b\x07""RU\0"
1478 "b\x06""RI\0"
1479 "b\x05""UN\0"
1480 "b\x04""OV\0"
1481 "b\x03""TJ\0"
1482 "b\x02""TU\0"
1483 "b\x01""TS\0"
1484 "b\x00""TI\0"
1485 "\0", dma_status);
1486 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1487 status, buf);
1488 }
1489
1490 static void
1491 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1492 {
1493 dwc_dump_status(sc);
1494 dwc_gmac_dump_ffilt(sc,
1495 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1496 dwc_gmac_dump_dma(sc);
1497 dwc_gmac_dump_tx_desc(sc);
1498 dwc_gmac_dump_rx_desc(sc);
1499
1500 panic("%s", msg);
1501 }
1502
1503 static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1504 {
1505 char buf[200];
1506
1507 /* print filter setup */
1508 snprintb(buf, sizeof(buf), "\177\20"
1509 "b\x1f""RA\0"
1510 "b\x0a""HPF\0"
1511 "b\x09""SAF\0"
1512 "b\x08""SAIF\0"
1513 "b\x05""DBF\0"
1514 "b\x04""PM\0"
1515 "b\x03""DAIF\0"
1516 "b\x02""HMC\0"
1517 "b\x01""HUC\0"
1518 "b\x00""PR\0"
1519 "\0", ffilt);
1520 aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1521 }
1522 #endif
1523