dwc_gmac.c revision 1.15 1 /*-
2 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * This driver supports the Synopsis Designware GMAC core, as found
32 * on Allwinner A20 cores and others.
33 *
34 * Real documentation seems to not be available, the marketing product
35 * documents could be found here:
36 *
37 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 */
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.15 2014/10/20 19:36:03 martin Exp $");
43
44 /* #define DWC_GMAC_DEBUG 1 */
45
46 #include "opt_inet.h"
47
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/device.h>
51 #include <sys/intr.h>
52 #include <sys/systm.h>
53 #include <sys/sockio.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #ifdef INET
60 #include <netinet/if_inarp.h>
61 #endif
62
63 #include <dev/mii/miivar.h>
64
65 #include <dev/ic/dwc_gmac_reg.h>
66 #include <dev/ic/dwc_gmac_var.h>
67
68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 static void dwc_gmac_miibus_statchg(struct ifnet *);
71
72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 uint8_t enaddr[ETHER_ADDR_LEN]);
75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 static int dwc_gmac_init(struct ifnet *ifp);
85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 static void dwc_gmac_start(struct ifnet *ifp);
87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
90 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
91
92 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
93 *sizeof(struct dwc_gmac_dev_dmadesc))
94 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
95
96 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
97 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
98
99
100
101 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
102 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
103 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
104
105 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
106 GMAC_DMA_INT_FBE| \
107 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
108 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
109 GMAC_DMA_INT_TJE)
110
111 #define AWIN_DEF_MAC_INTRMASK \
112 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
113 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
114
115
116 #ifdef DWC_GMAC_DEBUG
117 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
118 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
119 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
120 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
121 static void dwc_dump_status(struct dwc_gmac_softc *sc);
122 #endif
123
124 void
125 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
126 {
127 uint8_t enaddr[ETHER_ADDR_LEN];
128 uint32_t maclo, machi;
129 struct mii_data * const mii = &sc->sc_mii;
130 struct ifnet * const ifp = &sc->sc_ec.ec_if;
131 prop_dictionary_t dict;
132
133 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
134 sc->sc_mii_clk = mii_clk & 7;
135
136 dict = device_properties(sc->sc_dev);
137 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
138 if (ea != NULL) {
139 /*
140 * If the MAC address is overriden by a device property,
141 * use that.
142 */
143 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
144 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
145 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
146 } else {
147 /*
148 * If we did not get an externaly configure address,
149 * try to read one from the current filter setup,
150 * before resetting the chip.
151 */
152 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
153 AWIN_GMAC_MAC_ADDR0LO);
154 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
155 AWIN_GMAC_MAC_ADDR0HI);
156
157 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
158 aprint_error_dev(sc->sc_dev,
159 "couldn't read MAC address\n");
160 return;
161 }
162
163 enaddr[0] = maclo & 0x0ff;
164 enaddr[1] = (maclo >> 8) & 0x0ff;
165 enaddr[2] = (maclo >> 16) & 0x0ff;
166 enaddr[3] = (maclo >> 24) & 0x0ff;
167 enaddr[4] = machi & 0x0ff;
168 enaddr[5] = (machi >> 8) & 0x0ff;
169 }
170
171 /*
172 * Init chip and do intial setup
173 */
174 if (dwc_gmac_reset(sc) != 0)
175 return; /* not much to cleanup, haven't attached yet */
176 dwc_gmac_write_hwaddr(sc, enaddr);
177 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
178 ether_sprintf(enaddr));
179
180 /*
181 * Allocate Tx and Rx rings
182 */
183 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
184 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
185 goto fail;
186 }
187
188 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
189 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
190 goto fail;
191 }
192
193 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
194 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
195 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
196 goto fail;
197 }
198
199 /*
200 * Prepare interface data
201 */
202 ifp->if_softc = sc;
203 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
204 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
205 ifp->if_ioctl = dwc_gmac_ioctl;
206 ifp->if_start = dwc_gmac_start;
207 ifp->if_init = dwc_gmac_init;
208 ifp->if_stop = dwc_gmac_stop;
209 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
210 IFQ_SET_READY(&ifp->if_snd);
211
212 /*
213 * Attach MII subdevices
214 */
215 sc->sc_ec.ec_mii = &sc->sc_mii;
216 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
217 mii->mii_ifp = ifp;
218 mii->mii_readreg = dwc_gmac_miibus_read_reg;
219 mii->mii_writereg = dwc_gmac_miibus_write_reg;
220 mii->mii_statchg = dwc_gmac_miibus_statchg;
221 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
222
223 if (LIST_EMPTY(&mii->mii_phys)) {
224 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
225 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
226 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
227 } else {
228 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
229 }
230
231 /*
232 * Ready, attach interface
233 */
234 if_attach(ifp);
235 ether_ifattach(ifp, enaddr);
236
237 /*
238 * Enable interrupts
239 */
240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
241 AWIN_DEF_MAC_INTRMASK);
242 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
243 GMAC_DEF_DMA_INT_MASK);
244
245 return;
246
247 fail:
248 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
249 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
250 }
251
252
253
254 static int
255 dwc_gmac_reset(struct dwc_gmac_softc *sc)
256 {
257 size_t cnt;
258 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
259 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
260 for (cnt = 0; cnt < 3000; cnt++) {
261 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
262 & GMAC_BUSMODE_RESET) == 0)
263 return 0;
264 delay(10);
265 }
266
267 aprint_error_dev(sc->sc_dev, "reset timed out\n");
268 return EIO;
269 }
270
271 static void
272 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
273 uint8_t enaddr[ETHER_ADDR_LEN])
274 {
275 uint32_t lo, hi;
276
277 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
278 | (enaddr[3] << 24);
279 hi = enaddr[4] | (enaddr[5] << 8);
280 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
281 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
282 }
283
284 static int
285 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
286 {
287 struct dwc_gmac_softc * const sc = device_private(self);
288 uint16_t mii;
289 size_t cnt;
290 int rv = 0;
291
292 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
293 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
294 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
295 | GMAC_MII_BUSY;
296
297 mutex_enter(&sc->sc_mdio_lock);
298 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
299
300 for (cnt = 0; cnt < 1000; cnt++) {
301 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
302 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
303 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
304 AWIN_GMAC_MAC_MIIDATA);
305 break;
306 }
307 delay(10);
308 }
309
310 mutex_exit(&sc->sc_mdio_lock);
311
312 return rv;
313 }
314
315 static void
316 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
317 {
318 struct dwc_gmac_softc * const sc = device_private(self);
319 uint16_t mii;
320 size_t cnt;
321
322 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
323 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
324 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
325 | GMAC_MII_BUSY | GMAC_MII_WRITE;
326
327 mutex_enter(&sc->sc_mdio_lock);
328 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
329 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
330
331 for (cnt = 0; cnt < 1000; cnt++) {
332 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
333 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
334 break;
335 delay(10);
336 }
337
338 mutex_exit(&sc->sc_mdio_lock);
339 }
340
341 static int
342 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
343 struct dwc_gmac_rx_ring *ring)
344 {
345 struct dwc_gmac_rx_data *data;
346 bus_addr_t physaddr;
347 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
348 int error, i, next;
349
350 ring->r_cur = ring->r_next = 0;
351 memset(ring->r_desc, 0, descsize);
352
353 /*
354 * Pre-allocate Rx buffers and populate Rx ring.
355 */
356 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
357 struct dwc_gmac_dev_dmadesc *desc;
358
359 data = &sc->sc_rxq.r_data[i];
360
361 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
362 if (data->rd_m == NULL) {
363 aprint_error_dev(sc->sc_dev,
364 "could not allocate rx mbuf #%d\n", i);
365 error = ENOMEM;
366 goto fail;
367 }
368 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
369 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
370 if (error != 0) {
371 aprint_error_dev(sc->sc_dev,
372 "could not create DMA map\n");
373 data->rd_map = NULL;
374 goto fail;
375 }
376 MCLGET(data->rd_m, M_DONTWAIT);
377 if (!(data->rd_m->m_flags & M_EXT)) {
378 aprint_error_dev(sc->sc_dev,
379 "could not allocate mbuf cluster #%d\n", i);
380 error = ENOMEM;
381 goto fail;
382 }
383
384 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
385 mtod(data->rd_m, void *), MCLBYTES, NULL,
386 BUS_DMA_READ | BUS_DMA_NOWAIT);
387 if (error != 0) {
388 aprint_error_dev(sc->sc_dev,
389 "could not load rx buf DMA map #%d", i);
390 goto fail;
391 }
392 physaddr = data->rd_map->dm_segs[0].ds_addr;
393
394 desc = &sc->sc_rxq.r_desc[i];
395 desc->ddesc_data = htole32(physaddr);
396 next = RX_NEXT(i);
397 desc->ddesc_next = htole32(ring->r_physaddr
398 + next * sizeof(*desc));
399 desc->ddesc_cntl = htole32(
400 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
401 DDESC_CNTL_RXCHAIN | DDESC_CNTL_RXINT);
402 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
403 }
404
405 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
406 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
407 BUS_DMASYNC_PREREAD);
408 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
409 ring->r_physaddr);
410
411 return 0;
412
413 fail:
414 dwc_gmac_free_rx_ring(sc, ring);
415 return error;
416 }
417
418 static void
419 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
420 struct dwc_gmac_rx_ring *ring)
421 {
422 struct dwc_gmac_dev_dmadesc *desc;
423 int i;
424
425 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
426 desc = &sc->sc_rxq.r_desc[i];
427 desc->ddesc_cntl = htole32(
428 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
429 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
430 }
431
432 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
433 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
434 BUS_DMASYNC_PREWRITE);
435
436 ring->r_cur = ring->r_next = 0;
437 /* reset DMA address to start of ring */
438 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
439 sc->sc_rxq.r_physaddr);
440 }
441
442 static int
443 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
444 {
445 const size_t descsize = AWGE_TOTAL_RING_COUNT *
446 sizeof(struct dwc_gmac_dev_dmadesc);
447 int error, nsegs;
448 void *rings;
449
450 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
451 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
452 if (error != 0) {
453 aprint_error_dev(sc->sc_dev,
454 "could not create desc DMA map\n");
455 sc->sc_dma_ring_map = NULL;
456 goto fail;
457 }
458
459 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
460 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
461 if (error != 0) {
462 aprint_error_dev(sc->sc_dev,
463 "could not map DMA memory\n");
464 goto fail;
465 }
466
467 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
468 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
469 if (error != 0) {
470 aprint_error_dev(sc->sc_dev,
471 "could not allocate DMA memory\n");
472 goto fail;
473 }
474
475 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
476 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
477 if (error != 0) {
478 aprint_error_dev(sc->sc_dev,
479 "could not load desc DMA map\n");
480 goto fail;
481 }
482
483 /* give first AWGE_RX_RING_COUNT to the RX side */
484 sc->sc_rxq.r_desc = rings;
485 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
486
487 /* and next rings to the TX side */
488 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
489 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
490 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
491
492 return 0;
493
494 fail:
495 dwc_gmac_free_dma_rings(sc);
496 return error;
497 }
498
499 static void
500 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
501 {
502 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
503 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
504 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
505 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
506 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
507 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
508 }
509
510 static void
511 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
512 {
513 struct dwc_gmac_rx_data *data;
514 int i;
515
516 if (ring->r_desc == NULL)
517 return;
518
519
520 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
521 data = &ring->r_data[i];
522
523 if (data->rd_map != NULL) {
524 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
525 AWGE_RX_RING_COUNT
526 *sizeof(struct dwc_gmac_dev_dmadesc),
527 BUS_DMASYNC_POSTREAD);
528 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
529 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
530 }
531 if (data->rd_m != NULL)
532 m_freem(data->rd_m);
533 }
534 }
535
536 static int
537 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
538 struct dwc_gmac_tx_ring *ring)
539 {
540 int i, error = 0;
541
542 ring->t_queued = 0;
543 ring->t_cur = ring->t_next = 0;
544
545 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
546 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
547 TX_DESC_OFFSET(0),
548 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
549 BUS_DMASYNC_POSTWRITE);
550
551 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
552 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
553 AWGE_TX_RING_COUNT, MCLBYTES, 0,
554 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
555 &ring->t_data[i].td_map);
556 if (error != 0) {
557 aprint_error_dev(sc->sc_dev,
558 "could not create TX DMA map #%d\n", i);
559 ring->t_data[i].td_map = NULL;
560 goto fail;
561 }
562 ring->t_desc[i].ddesc_next = htole32(
563 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
564 *TX_NEXT(i));
565 }
566
567 return 0;
568
569 fail:
570 dwc_gmac_free_tx_ring(sc, ring);
571 return error;
572 }
573
574 static void
575 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
576 {
577 /* 'end' is pointing one descriptor beyound the last we want to sync */
578 if (end > start) {
579 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
580 TX_DESC_OFFSET(start),
581 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
582 ops);
583 return;
584 }
585 /* sync from 'start' to end of ring */
586 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
587 TX_DESC_OFFSET(start),
588 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
589 ops);
590 /* sync from start of ring to 'end' */
591 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
592 TX_DESC_OFFSET(0),
593 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
594 ops);
595 }
596
597 static void
598 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
599 struct dwc_gmac_tx_ring *ring)
600 {
601 int i;
602
603 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
604 struct dwc_gmac_tx_data *data = &ring->t_data[i];
605
606 if (data->td_m != NULL) {
607 bus_dmamap_sync(sc->sc_dmat, data->td_active,
608 0, data->td_active->dm_mapsize,
609 BUS_DMASYNC_POSTWRITE);
610 bus_dmamap_unload(sc->sc_dmat, data->td_active);
611 m_freem(data->td_m);
612 data->td_m = NULL;
613 }
614 }
615
616 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
617 TX_DESC_OFFSET(0),
618 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
619 BUS_DMASYNC_PREWRITE);
620 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
621 sc->sc_txq.t_physaddr);
622
623 ring->t_queued = 0;
624 ring->t_cur = ring->t_next = 0;
625 }
626
627 static void
628 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
629 struct dwc_gmac_tx_ring *ring)
630 {
631 int i;
632
633 /* unload the maps */
634 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
635 struct dwc_gmac_tx_data *data = &ring->t_data[i];
636
637 if (data->td_m != NULL) {
638 bus_dmamap_sync(sc->sc_dmat, data->td_active,
639 0, data->td_map->dm_mapsize,
640 BUS_DMASYNC_POSTWRITE);
641 bus_dmamap_unload(sc->sc_dmat, data->td_active);
642 m_freem(data->td_m);
643 data->td_m = NULL;
644 }
645 }
646
647 /* and actually free them */
648 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
649 struct dwc_gmac_tx_data *data = &ring->t_data[i];
650
651 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
652 }
653 }
654
655 static void
656 dwc_gmac_miibus_statchg(struct ifnet *ifp)
657 {
658 struct dwc_gmac_softc * const sc = ifp->if_softc;
659 struct mii_data * const mii = &sc->sc_mii;
660 uint32_t conf;
661
662 /*
663 * Set MII or GMII interface based on the speed
664 * negotiated by the PHY.
665 */
666 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
667 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
668 |AWIN_GMAC_MAC_CONF_FULLDPLX);
669 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
670 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
671 | AWIN_GMAC_MAC_CONF_RXENABLE
672 | AWIN_GMAC_MAC_CONF_TXENABLE;
673 switch (IFM_SUBTYPE(mii->mii_media_active)) {
674 case IFM_10_T:
675 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
676 break;
677 case IFM_100_TX:
678 conf |= AWIN_GMAC_MAC_CONF_FES100 |
679 AWIN_GMAC_MAC_CONF_MIISEL;
680 break;
681 case IFM_1000_T:
682 break;
683 }
684 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
685 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
686
687 #ifdef DWC_GMAC_DEBUG
688 aprint_normal_dev(sc->sc_dev,
689 "setting MAC conf register: %08x\n", conf);
690 #endif
691
692 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
693 AWIN_GMAC_MAC_CONF, conf);
694 }
695
696 static int
697 dwc_gmac_init(struct ifnet *ifp)
698 {
699 struct dwc_gmac_softc *sc = ifp->if_softc;
700 uint32_t ffilt;
701
702 if (ifp->if_flags & IFF_RUNNING)
703 return 0;
704
705 dwc_gmac_stop(ifp, 0);
706
707 /*
708 * Configure DMA burst/transfer mode and RX/TX priorities.
709 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
710 */
711 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
712 GMAC_BUSMODE_FIXEDBURST |
713 __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
714 __SHIFTIN(8, GMCA_BUSMODE_PBL));
715
716 /*
717 * Set up address filter
718 */
719 ffilt = 0;
720 if (ifp->if_flags & IFF_PROMISC)
721 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
722 else if (ifp->if_flags & IFF_ALLMULTI)
723 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
724 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
725
726 /*
727 * Set up dma pointer for RX and TX ring
728 */
729 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
730 sc->sc_rxq.r_physaddr);
731 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
732 sc->sc_txq.t_physaddr);
733
734 /*
735 * Start RX/TX part
736 */
737 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
738 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
739 GMAC_DMA_OP_STOREFORWARD);
740
741 ifp->if_flags |= IFF_RUNNING;
742 ifp->if_flags &= ~IFF_OACTIVE;
743
744 return 0;
745 }
746
747 static void
748 dwc_gmac_start(struct ifnet *ifp)
749 {
750 struct dwc_gmac_softc *sc = ifp->if_softc;
751 int old = sc->sc_txq.t_queued;
752 struct mbuf *m0;
753
754 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
755 return;
756
757 for (;;) {
758 IFQ_POLL(&ifp->if_snd, m0);
759 if (m0 == NULL)
760 break;
761 if (dwc_gmac_queue(sc, m0) != 0) {
762 ifp->if_flags |= IFF_OACTIVE;
763 break;
764 }
765 IFQ_DEQUEUE(&ifp->if_snd, m0);
766 bpf_mtap(ifp, m0);
767 }
768
769 if (sc->sc_txq.t_queued != old) {
770 /* packets have been queued, kick it off */
771 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
772 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
773
774 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
775 AWIN_GMAC_DMA_TXPOLL, ~0U);
776 #ifdef DWC_GMAC_DEBUG
777 dwc_dump_status(sc);
778 #endif
779 }
780 }
781
782 static void
783 dwc_gmac_stop(struct ifnet *ifp, int disable)
784 {
785 struct dwc_gmac_softc *sc = ifp->if_softc;
786
787 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
788 AWIN_GMAC_DMA_OPMODE,
789 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
790 AWIN_GMAC_DMA_OPMODE)
791 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
792 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
793 AWIN_GMAC_DMA_OPMODE,
794 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
795 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
796
797 mii_down(&sc->sc_mii);
798 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
799 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
800 }
801
802 /*
803 * Add m0 to the TX ring
804 */
805 static int
806 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
807 {
808 struct dwc_gmac_dev_dmadesc *desc = NULL;
809 struct dwc_gmac_tx_data *data = NULL;
810 bus_dmamap_t map;
811 uint32_t flags, len;
812 int error, i, first;
813
814 #ifdef DWC_GMAC_DEBUG
815 aprint_normal_dev(sc->sc_dev,
816 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
817 #endif
818
819 first = sc->sc_txq.t_cur;
820 map = sc->sc_txq.t_data[first].td_map;
821 flags = 0;
822
823 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
824 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
825 if (error != 0) {
826 aprint_error_dev(sc->sc_dev, "could not map mbuf "
827 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
828 return error;
829 }
830
831 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
832 bus_dmamap_unload(sc->sc_dmat, map);
833 return ENOBUFS;
834 }
835
836 data = NULL;
837 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
838 for (i = 0; i < map->dm_nsegs; i++) {
839 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
840 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
841
842 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
843 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
844 if (i == map->dm_nsegs-1)
845 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
846
847 #ifdef DWC_GMAC_DEBUG
848 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
849 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
850 (unsigned long)map->dm_segs[i].ds_addr,
851 (unsigned long)map->dm_segs[i].ds_len,
852 flags, len);
853 #endif
854
855 desc->ddesc_cntl = htole32(len|flags);
856 flags &= ~DDESC_CNTL_TXFIRST;
857
858 /*
859 * Defer passing ownership of the first descriptor
860 * untill we are done.
861 */
862 if (i)
863 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
864
865 sc->sc_txq.t_queued++;
866 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
867 }
868
869 /* Pass first to device */
870 sc->sc_txq.t_desc[first].ddesc_status
871 = htole32(DDESC_STATUS_OWNEDBYDEV);
872
873 data->td_m = m0;
874 data->td_active = map;
875
876 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
877 BUS_DMASYNC_PREWRITE);
878
879 return 0;
880 }
881
882 static int
883 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
884 {
885 struct ifaddr *ifa = (struct ifaddr *)data;
886 int s, error = 0;
887
888 s = splnet();
889
890 switch (cmd) {
891 case SIOCINITIFADDR:
892 ifp->if_flags |= IFF_UP;
893 dwc_gmac_init(ifp);
894 switch (ifa->ifa_addr->sa_family) {
895 #ifdef INET
896 case AF_INET:
897 arp_ifinit(ifp, ifa);
898 break;
899 #endif
900 default:
901 break;
902 }
903 break;
904
905 case SIOCSIFFLAGS:
906 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
907 break;
908
909 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
910 case IFF_RUNNING:
911 /*
912 * If interface is marked down and it is running, then
913 * stop it.
914 */
915 dwc_gmac_stop(ifp, 0);
916 ifp->if_flags &= ~IFF_RUNNING;
917 break;
918 case IFF_UP:
919 /*
920 * If interface is marked up and it is stopped, then
921 * start it.
922 */
923 error = dwc_gmac_init(ifp);
924 break;
925 case IFF_UP|IFF_RUNNING:
926 /*
927 * If setting debug or promiscuous mode, do not reset
928 * the chip; for everything else, call dwc_gmac_init()
929 * which will trigger a reset.
930 */
931 /* XXX - for now allways init */
932 error = dwc_gmac_init(ifp);
933 break;
934 case 0:
935 break;
936 }
937
938 break;
939
940 default:
941 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
942 break;
943 error = 0;
944 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
945 ;
946 else if (ifp->if_flags & IFF_RUNNING)
947 /* setmulti */;
948 break;
949 }
950
951 splx(s);
952
953 return error;
954 }
955
956 static void
957 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
958 {
959 struct dwc_gmac_tx_data *data;
960 struct dwc_gmac_dev_dmadesc *desc;
961 uint32_t flags;
962 int i;
963
964 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
965 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
966
967 #ifdef DWC_GMAC_DEBUG
968 aprint_normal_dev(sc->sc_dev,
969 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
970 i, sc->sc_txq.t_queued);
971 #endif
972
973 desc = &sc->sc_txq.t_desc[i];
974 dwc_gmac_txdesc_sync(sc, i, i+1,
975 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
976 flags = le32toh(desc->ddesc_status);
977
978 if (flags & DDESC_STATUS_OWNEDBYDEV)
979 break;
980
981 data = &sc->sc_txq.t_data[i];
982 if (data->td_m == NULL)
983 continue;
984 sc->sc_ec.ec_if.if_opackets++;
985 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
986 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
987 bus_dmamap_unload(sc->sc_dmat, data->td_active);
988
989 #ifdef DWC_GMAC_DEBUG
990 aprint_normal_dev(sc->sc_dev,
991 "dwc_gmac_tx_intr: done with packet at desc #%d, "
992 "freeing mbuf %p\n", i, data->td_m);
993 #endif
994
995 m_freem(data->td_m);
996 data->td_m = NULL;
997 }
998
999 sc->sc_txq.t_next = i;
1000
1001 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1002 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
1003 }
1004 }
1005
1006 static void
1007 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1008 {
1009 struct ifnet *ifp = &sc->sc_ec.ec_if;
1010 struct dwc_gmac_dev_dmadesc *desc;
1011 struct dwc_gmac_rx_data *data;
1012 bus_addr_t physaddr;
1013 uint32_t status;
1014 struct mbuf *m, *mnew;
1015 int i, len, error;
1016
1017 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1018 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1019 RX_DESC_OFFSET(i), sizeof(*desc),
1020 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1021 desc = &sc->sc_rxq.r_desc[i];
1022 data = &sc->sc_rxq.r_data[i];
1023
1024 status = le32toh(desc->ddesc_status);
1025 if (status & DDESC_STATUS_OWNEDBYDEV)
1026 break;
1027
1028 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1029 #ifdef DWC_GMAC_DEBUG
1030 aprint_normal_dev(sc->sc_dev,
1031 "RX error: descriptor status %08x, skipping\n",
1032 status);
1033 #endif
1034 ifp->if_ierrors++;
1035 goto skip;
1036 }
1037
1038 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1039
1040 #ifdef DWC_GMAC_DEBUG
1041 aprint_normal_dev(sc->sc_dev,
1042 "rx int: device is done with descriptor #%d, len: %d\n",
1043 i, len);
1044 #endif
1045
1046 /*
1047 * Try to get a new mbuf before passing this one
1048 * up, if that fails, drop the packet and reuse
1049 * the existing one.
1050 */
1051 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1052 if (mnew == NULL) {
1053 ifp->if_ierrors++;
1054 goto skip;
1055 }
1056 MCLGET(mnew, M_DONTWAIT);
1057 if ((mnew->m_flags & M_EXT) == 0) {
1058 m_freem(mnew);
1059 ifp->if_ierrors++;
1060 goto skip;
1061 }
1062
1063 /* unload old DMA map */
1064 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1065 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1066 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1067
1068 /* and reload with new mbuf */
1069 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1070 mtod(mnew, void*), MCLBYTES, NULL,
1071 BUS_DMA_READ | BUS_DMA_NOWAIT);
1072 if (error != 0) {
1073 m_freem(mnew);
1074 /* try to reload old mbuf */
1075 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1076 mtod(data->rd_m, void*), MCLBYTES, NULL,
1077 BUS_DMA_READ | BUS_DMA_NOWAIT);
1078 if (error != 0) {
1079 panic("%s: could not load old rx mbuf",
1080 device_xname(sc->sc_dev));
1081 }
1082 ifp->if_ierrors++;
1083 goto skip;
1084 }
1085 physaddr = data->rd_map->dm_segs[0].ds_addr;
1086
1087 /*
1088 * New mbuf loaded, update RX ring and continue
1089 */
1090 m = data->rd_m;
1091 data->rd_m = mnew;
1092 desc->ddesc_data = htole32(physaddr);
1093
1094 /* finalize mbuf */
1095 m->m_pkthdr.len = m->m_len = len;
1096 m->m_pkthdr.rcvif = ifp;
1097
1098 bpf_mtap(ifp, m);
1099 ifp->if_ipackets++;
1100 (*ifp->if_input)(ifp, m);
1101
1102 skip:
1103 desc->ddesc_cntl = htole32(
1104 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
1105 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1106 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1107 RX_DESC_OFFSET(i), sizeof(*desc),
1108 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1109 }
1110
1111 /* update RX pointer */
1112 sc->sc_rxq.r_cur = i;
1113
1114 }
1115
1116 int
1117 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1118 {
1119 uint32_t status, dma_status;
1120 int rv = 0;
1121
1122 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1123 if (status & AWIN_GMAC_MII_IRQ) {
1124 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1125 AWIN_GMAC_MII_STATUS);
1126 rv = 1;
1127 mii_pollstat(&sc->sc_mii);
1128 }
1129
1130 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1131 AWIN_GMAC_DMA_STATUS);
1132
1133 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1134 rv = 1;
1135
1136 if (dma_status & GMAC_DMA_INT_TIE)
1137 dwc_gmac_tx_intr(sc);
1138
1139 if (dma_status & GMAC_DMA_INT_RIE)
1140 dwc_gmac_rx_intr(sc);
1141
1142 /*
1143 * Check error conditions
1144 */
1145 if (dma_status & GMAC_DMA_INT_ERRORS) {
1146 sc->sc_ec.ec_if.if_oerrors++;
1147 #ifdef DWC_GMAC_DEBUG
1148 dwc_dump_and_abort(sc, "interrupt error condition");
1149 #endif
1150 }
1151
1152 /* ack interrupt */
1153 if (dma_status)
1154 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1155 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1156
1157 return rv;
1158 }
1159
1160 #ifdef DWC_GMAC_DEBUG
1161 static void
1162 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1163 {
1164 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1165 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1166 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1167 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1168 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1169 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1170 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1171 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1172 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1173 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1174 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1175 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1176 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1177 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1178 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1179 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1180 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1181 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1182 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1183 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1184 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1185 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1186 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1187 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1188 }
1189
1190 static void
1191 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1192 {
1193 int i;
1194
1195 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1196 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1197 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1198 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1199 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1200 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1201 "data: %08x next: %08x\n",
1202 i, sc->sc_txq.t_physaddr +
1203 i*sizeof(struct dwc_gmac_dev_dmadesc),
1204 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1205 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1206 }
1207 }
1208
1209 static void
1210 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1211 {
1212 int i;
1213
1214 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1215 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1216 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1217 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1218 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1219 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1220 "data: %08x next: %08x\n",
1221 i, sc->sc_rxq.r_physaddr +
1222 i*sizeof(struct dwc_gmac_dev_dmadesc),
1223 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1224 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1225 }
1226 }
1227
1228 static void
1229 dwc_dump_status(struct dwc_gmac_softc *sc)
1230 {
1231 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1232 AWIN_GMAC_MAC_INTR);
1233 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1234 AWIN_GMAC_DMA_STATUS);
1235 char buf[200];
1236
1237 /* print interrupt state */
1238 snprintb(buf, sizeof(buf), "\177\20"
1239 "b\x10""NI\0"
1240 "b\x0f""AI\0"
1241 "b\x0e""ER\0"
1242 "b\x0d""FB\0"
1243 "b\x0a""ET\0"
1244 "b\x09""RW\0"
1245 "b\x08""RS\0"
1246 "b\x07""RU\0"
1247 "b\x06""RI\0"
1248 "b\x05""UN\0"
1249 "b\x04""OV\0"
1250 "b\x03""TJ\0"
1251 "b\x02""TU\0"
1252 "b\x01""TS\0"
1253 "b\x00""TI\0"
1254 "\0", dma_status);
1255 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1256 status, buf);
1257 }
1258
1259 static void
1260 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1261 {
1262 dwc_dump_status(sc);
1263 dwc_gmac_dump_dma(sc);
1264 dwc_gmac_dump_tx_desc(sc);
1265 dwc_gmac_dump_rx_desc(sc);
1266
1267 panic(msg);
1268 }
1269 #endif
1270