dwc_gmac.c revision 1.13 1 /*-
2 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * This driver supports the Synopsis Designware GMAC core, as found
32 * on Allwinner A20 cores and others.
33 *
34 * Real documentation seems to not be available, the marketing product
35 * documents could be found here:
36 *
37 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 */
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.13 2014/10/19 13:15:23 jmcneill Exp $");
43
44 /* #define DWC_GMAC_DEBUG 1 */
45
46 #include "opt_inet.h"
47
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/device.h>
51 #include <sys/intr.h>
52 #include <sys/systm.h>
53 #include <sys/sockio.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #ifdef INET
60 #include <netinet/if_inarp.h>
61 #endif
62
63 #include <dev/mii/miivar.h>
64
65 #include <dev/ic/dwc_gmac_reg.h>
66 #include <dev/ic/dwc_gmac_var.h>
67
68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 static void dwc_gmac_miibus_statchg(struct ifnet *);
71
72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 uint8_t enaddr[ETHER_ADDR_LEN]);
75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 static int dwc_gmac_init(struct ifnet *ifp);
85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 static void dwc_gmac_start(struct ifnet *ifp);
87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
90 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
91
92 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
93 *sizeof(struct dwc_gmac_dev_dmadesc))
94 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
95
96 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
97 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
98
99
100
101 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
102 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
103 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
104
105 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
106 GMAC_DMA_INT_FBE| \
107 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
108 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
109 GMAC_DMA_INT_TJE)
110
111 #define AWIN_DEF_MAC_INTRMASK \
112 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
113 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
114
115
116 #ifdef DWC_GMAC_DEBUG
117 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
118 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
119 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
120 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
121 static void dwc_dump_status(struct dwc_gmac_softc *sc);
122 #endif
123
124 void
125 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
126 {
127 uint8_t enaddr[ETHER_ADDR_LEN];
128 uint32_t maclo, machi;
129 struct mii_data * const mii = &sc->sc_mii;
130 struct ifnet * const ifp = &sc->sc_ec.ec_if;
131 prop_dictionary_t dict;
132
133 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
134 sc->sc_mii_clk = mii_clk & 7;
135
136 dict = device_properties(sc->sc_dev);
137 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
138 if (ea != NULL) {
139 /*
140 * If the MAC address is overriden by a device property,
141 * use that.
142 */
143 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
144 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
145 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
146 } else {
147 /*
148 * If we did not get an externaly configure address,
149 * try to read one from the current filter setup,
150 * before resetting the chip.
151 */
152 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
153 AWIN_GMAC_MAC_ADDR0LO);
154 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
155 AWIN_GMAC_MAC_ADDR0HI);
156 enaddr[0] = maclo & 0x0ff;
157 enaddr[1] = (maclo >> 8) & 0x0ff;
158 enaddr[2] = (maclo >> 16) & 0x0ff;
159 enaddr[3] = (maclo >> 24) & 0x0ff;
160 enaddr[4] = machi & 0x0ff;
161 enaddr[5] = (machi >> 8) & 0x0ff;
162 }
163
164 /*
165 * Init chip and do intial setup
166 */
167 if (dwc_gmac_reset(sc) != 0)
168 return; /* not much to cleanup, haven't attached yet */
169 dwc_gmac_write_hwaddr(sc, enaddr);
170 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
171 ether_sprintf(enaddr));
172
173 /*
174 * Allocate Tx and Rx rings
175 */
176 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
177 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
178 goto fail;
179 }
180
181 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
182 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
183 goto fail;
184 }
185
186 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
187 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
188 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
189 goto fail;
190 }
191
192 /*
193 * Prepare interface data
194 */
195 ifp->if_softc = sc;
196 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
197 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
198 ifp->if_ioctl = dwc_gmac_ioctl;
199 ifp->if_start = dwc_gmac_start;
200 ifp->if_init = dwc_gmac_init;
201 ifp->if_stop = dwc_gmac_stop;
202 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
203 IFQ_SET_READY(&ifp->if_snd);
204
205 /*
206 * Attach MII subdevices
207 */
208 sc->sc_ec.ec_mii = &sc->sc_mii;
209 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
210 mii->mii_ifp = ifp;
211 mii->mii_readreg = dwc_gmac_miibus_read_reg;
212 mii->mii_writereg = dwc_gmac_miibus_write_reg;
213 mii->mii_statchg = dwc_gmac_miibus_statchg;
214 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
215
216 if (LIST_EMPTY(&mii->mii_phys)) {
217 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
218 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
219 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
220 } else {
221 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
222 }
223
224 /*
225 * Ready, attach interface
226 */
227 if_attach(ifp);
228 ether_ifattach(ifp, enaddr);
229
230 /*
231 * Enable interrupts
232 */
233 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
234 AWIN_DEF_MAC_INTRMASK);
235 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
236 GMAC_DEF_DMA_INT_MASK);
237
238 return;
239
240 fail:
241 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
242 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
243 }
244
245
246
247 static int
248 dwc_gmac_reset(struct dwc_gmac_softc *sc)
249 {
250 size_t cnt;
251 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
252 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
253 for (cnt = 0; cnt < 3000; cnt++) {
254 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
255 & GMAC_BUSMODE_RESET) == 0)
256 return 0;
257 delay(10);
258 }
259
260 aprint_error_dev(sc->sc_dev, "reset timed out\n");
261 return EIO;
262 }
263
264 static void
265 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
266 uint8_t enaddr[ETHER_ADDR_LEN])
267 {
268 uint32_t lo, hi;
269
270 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
271 | (enaddr[3] << 24);
272 hi = enaddr[4] | (enaddr[5] << 8);
273 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
274 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
275 }
276
277 static int
278 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
279 {
280 struct dwc_gmac_softc * const sc = device_private(self);
281 uint16_t mii;
282 size_t cnt;
283 int rv = 0;
284
285 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
286 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
287 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
288 | GMAC_MII_BUSY;
289
290 mutex_enter(&sc->sc_mdio_lock);
291 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
292
293 for (cnt = 0; cnt < 1000; cnt++) {
294 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
295 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
296 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
297 AWIN_GMAC_MAC_MIIDATA);
298 break;
299 }
300 delay(10);
301 }
302
303 mutex_exit(&sc->sc_mdio_lock);
304
305 return rv;
306 }
307
308 static void
309 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
310 {
311 struct dwc_gmac_softc * const sc = device_private(self);
312 uint16_t mii;
313 size_t cnt;
314
315 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
316 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
317 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
318 | GMAC_MII_BUSY | GMAC_MII_WRITE;
319
320 mutex_enter(&sc->sc_mdio_lock);
321 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
322 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
323
324 for (cnt = 0; cnt < 1000; cnt++) {
325 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
326 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
327 break;
328 delay(10);
329 }
330
331 mutex_exit(&sc->sc_mdio_lock);
332 }
333
334 static int
335 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
336 struct dwc_gmac_rx_ring *ring)
337 {
338 struct dwc_gmac_rx_data *data;
339 bus_addr_t physaddr;
340 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
341 int error, i, next;
342
343 ring->r_cur = ring->r_next = 0;
344 memset(ring->r_desc, 0, descsize);
345
346 /*
347 * Pre-allocate Rx buffers and populate Rx ring.
348 */
349 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
350 struct dwc_gmac_dev_dmadesc *desc;
351
352 data = &sc->sc_rxq.r_data[i];
353
354 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
355 if (data->rd_m == NULL) {
356 aprint_error_dev(sc->sc_dev,
357 "could not allocate rx mbuf #%d\n", i);
358 error = ENOMEM;
359 goto fail;
360 }
361 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
362 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
363 if (error != 0) {
364 aprint_error_dev(sc->sc_dev,
365 "could not create DMA map\n");
366 data->rd_map = NULL;
367 goto fail;
368 }
369 MCLGET(data->rd_m, M_DONTWAIT);
370 if (!(data->rd_m->m_flags & M_EXT)) {
371 aprint_error_dev(sc->sc_dev,
372 "could not allocate mbuf cluster #%d\n", i);
373 error = ENOMEM;
374 goto fail;
375 }
376
377 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
378 mtod(data->rd_m, void *), MCLBYTES, NULL,
379 BUS_DMA_READ | BUS_DMA_NOWAIT);
380 if (error != 0) {
381 aprint_error_dev(sc->sc_dev,
382 "could not load rx buf DMA map #%d", i);
383 goto fail;
384 }
385 physaddr = data->rd_map->dm_segs[0].ds_addr;
386
387 desc = &sc->sc_rxq.r_desc[i];
388 desc->ddesc_data = htole32(physaddr);
389 next = RX_NEXT(i);
390 desc->ddesc_next = htole32(ring->r_physaddr
391 + next * sizeof(*desc));
392 desc->ddesc_cntl = htole32(
393 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
394 DDESC_CNTL_RXCHAIN | DDESC_CNTL_RXINT);
395 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
396 }
397
398 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
399 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
400 BUS_DMASYNC_PREREAD);
401 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
402 ring->r_physaddr);
403
404 return 0;
405
406 fail:
407 dwc_gmac_free_rx_ring(sc, ring);
408 return error;
409 }
410
411 static void
412 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
413 struct dwc_gmac_rx_ring *ring)
414 {
415 struct dwc_gmac_dev_dmadesc *desc;
416 int i;
417
418 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
419 desc = &sc->sc_rxq.r_desc[i];
420 desc->ddesc_cntl = htole32(
421 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
422 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
423 }
424
425 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
426 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
427 BUS_DMASYNC_PREWRITE);
428
429 ring->r_cur = ring->r_next = 0;
430 /* reset DMA address to start of ring */
431 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
432 sc->sc_rxq.r_physaddr);
433 }
434
435 static int
436 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
437 {
438 const size_t descsize = AWGE_TOTAL_RING_COUNT *
439 sizeof(struct dwc_gmac_dev_dmadesc);
440 int error, nsegs;
441 void *rings;
442
443 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
444 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
445 if (error != 0) {
446 aprint_error_dev(sc->sc_dev,
447 "could not create desc DMA map\n");
448 sc->sc_dma_ring_map = NULL;
449 goto fail;
450 }
451
452 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
453 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
454 if (error != 0) {
455 aprint_error_dev(sc->sc_dev,
456 "could not map DMA memory\n");
457 goto fail;
458 }
459
460 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
461 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
462 if (error != 0) {
463 aprint_error_dev(sc->sc_dev,
464 "could not allocate DMA memory\n");
465 goto fail;
466 }
467
468 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
469 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
470 if (error != 0) {
471 aprint_error_dev(sc->sc_dev,
472 "could not load desc DMA map\n");
473 goto fail;
474 }
475
476 /* give first AWGE_RX_RING_COUNT to the RX side */
477 sc->sc_rxq.r_desc = rings;
478 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
479
480 /* and next rings to the TX side */
481 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
482 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
483 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
484
485 return 0;
486
487 fail:
488 dwc_gmac_free_dma_rings(sc);
489 return error;
490 }
491
492 static void
493 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
494 {
495 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
496 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
497 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
498 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
499 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
500 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
501 }
502
503 static void
504 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
505 {
506 struct dwc_gmac_rx_data *data;
507 int i;
508
509 if (ring->r_desc == NULL)
510 return;
511
512
513 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
514 data = &ring->r_data[i];
515
516 if (data->rd_map != NULL) {
517 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
518 AWGE_RX_RING_COUNT
519 *sizeof(struct dwc_gmac_dev_dmadesc),
520 BUS_DMASYNC_POSTREAD);
521 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
522 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
523 }
524 if (data->rd_m != NULL)
525 m_freem(data->rd_m);
526 }
527 }
528
529 static int
530 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
531 struct dwc_gmac_tx_ring *ring)
532 {
533 int i, error = 0;
534
535 ring->t_queued = 0;
536 ring->t_cur = ring->t_next = 0;
537
538 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
539 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
540 TX_DESC_OFFSET(0),
541 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
542 BUS_DMASYNC_POSTWRITE);
543
544 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
545 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
546 AWGE_TX_RING_COUNT, MCLBYTES, 0,
547 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
548 &ring->t_data[i].td_map);
549 if (error != 0) {
550 aprint_error_dev(sc->sc_dev,
551 "could not create TX DMA map #%d\n", i);
552 ring->t_data[i].td_map = NULL;
553 goto fail;
554 }
555 ring->t_desc[i].ddesc_next = htole32(
556 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
557 *TX_NEXT(i));
558 }
559
560 return 0;
561
562 fail:
563 dwc_gmac_free_tx_ring(sc, ring);
564 return error;
565 }
566
567 static void
568 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
569 {
570 /* 'end' is pointing one descriptor beyound the last we want to sync */
571 if (end > start) {
572 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
573 TX_DESC_OFFSET(start),
574 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
575 ops);
576 return;
577 }
578 /* sync from 'start' to end of ring */
579 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
580 TX_DESC_OFFSET(start),
581 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
582 ops);
583 /* sync from start of ring to 'end' */
584 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
585 TX_DESC_OFFSET(0),
586 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
587 ops);
588 }
589
590 static void
591 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
592 struct dwc_gmac_tx_ring *ring)
593 {
594 int i;
595
596 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
597 struct dwc_gmac_tx_data *data = &ring->t_data[i];
598
599 if (data->td_m != NULL) {
600 bus_dmamap_sync(sc->sc_dmat, data->td_active,
601 0, data->td_active->dm_mapsize,
602 BUS_DMASYNC_POSTWRITE);
603 bus_dmamap_unload(sc->sc_dmat, data->td_active);
604 m_freem(data->td_m);
605 data->td_m = NULL;
606 }
607 }
608
609 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
610 TX_DESC_OFFSET(0),
611 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
612 BUS_DMASYNC_PREWRITE);
613 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
614 sc->sc_txq.t_physaddr);
615
616 ring->t_queued = 0;
617 ring->t_cur = ring->t_next = 0;
618 }
619
620 static void
621 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
622 struct dwc_gmac_tx_ring *ring)
623 {
624 int i;
625
626 /* unload the maps */
627 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
628 struct dwc_gmac_tx_data *data = &ring->t_data[i];
629
630 if (data->td_m != NULL) {
631 bus_dmamap_sync(sc->sc_dmat, data->td_active,
632 0, data->td_map->dm_mapsize,
633 BUS_DMASYNC_POSTWRITE);
634 bus_dmamap_unload(sc->sc_dmat, data->td_active);
635 m_freem(data->td_m);
636 data->td_m = NULL;
637 }
638 }
639
640 /* and actually free them */
641 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
642 struct dwc_gmac_tx_data *data = &ring->t_data[i];
643
644 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
645 }
646 }
647
648 static void
649 dwc_gmac_miibus_statchg(struct ifnet *ifp)
650 {
651 struct dwc_gmac_softc * const sc = ifp->if_softc;
652 struct mii_data * const mii = &sc->sc_mii;
653 uint32_t conf;
654
655 /*
656 * Set MII or GMII interface based on the speed
657 * negotiated by the PHY.
658 */
659 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
660 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
661 |AWIN_GMAC_MAC_CONF_FULLDPLX);
662 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
663 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
664 | AWIN_GMAC_MAC_CONF_RXENABLE
665 | AWIN_GMAC_MAC_CONF_TXENABLE;
666 switch (IFM_SUBTYPE(mii->mii_media_active)) {
667 case IFM_10_T:
668 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
669 break;
670 case IFM_100_TX:
671 conf |= AWIN_GMAC_MAC_CONF_FES100 |
672 AWIN_GMAC_MAC_CONF_MIISEL;
673 break;
674 case IFM_1000_T:
675 break;
676 }
677 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
678 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
679
680 #ifdef DWC_GMAC_DEBUG
681 aprint_normal_dev(sc->sc_dev,
682 "setting MAC conf register: %08x\n", conf);
683 #endif
684
685 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
686 AWIN_GMAC_MAC_CONF, conf);
687 }
688
689 static int
690 dwc_gmac_init(struct ifnet *ifp)
691 {
692 struct dwc_gmac_softc *sc = ifp->if_softc;
693 uint32_t ffilt;
694
695 if (ifp->if_flags & IFF_RUNNING)
696 return 0;
697
698 dwc_gmac_stop(ifp, 0);
699
700 /*
701 * Configure DMA burst/transfer mode and RX/TX priorities.
702 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
703 */
704 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
705 GMAC_BUSMODE_FIXEDBURST |
706 __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
707 __SHIFTIN(8, GMCA_BUSMODE_PBL));
708
709 /*
710 * Set up address filter
711 */
712 ffilt = 0;
713 if (ifp->if_flags & IFF_PROMISC)
714 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
715 else if (ifp->if_flags & IFF_ALLMULTI)
716 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
717 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
718
719 /*
720 * Set up dma pointer for RX and TX ring
721 */
722 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
723 sc->sc_rxq.r_physaddr);
724 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
725 sc->sc_txq.t_physaddr);
726
727 /*
728 * Start RX/TX part
729 */
730 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
731 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
732 GMAC_DMA_OP_STOREFORWARD);
733
734 ifp->if_flags |= IFF_RUNNING;
735 ifp->if_flags &= ~IFF_OACTIVE;
736
737 return 0;
738 }
739
740 static void
741 dwc_gmac_start(struct ifnet *ifp)
742 {
743 struct dwc_gmac_softc *sc = ifp->if_softc;
744 int old = sc->sc_txq.t_queued;
745 struct mbuf *m0;
746
747 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
748 return;
749
750 for (;;) {
751 IFQ_POLL(&ifp->if_snd, m0);
752 if (m0 == NULL)
753 break;
754 if (dwc_gmac_queue(sc, m0) != 0) {
755 ifp->if_flags |= IFF_OACTIVE;
756 break;
757 }
758 IFQ_DEQUEUE(&ifp->if_snd, m0);
759 bpf_mtap(ifp, m0);
760 }
761
762 if (sc->sc_txq.t_queued != old) {
763 /* packets have been queued, kick it off */
764 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
765 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
766
767 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
768 AWIN_GMAC_DMA_TXPOLL, ~0U);
769 #ifdef DWC_GMAC_DEBUG
770 dwc_dump_status(sc);
771 #endif
772 }
773 }
774
775 static void
776 dwc_gmac_stop(struct ifnet *ifp, int disable)
777 {
778 struct dwc_gmac_softc *sc = ifp->if_softc;
779
780 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
781 AWIN_GMAC_DMA_OPMODE,
782 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
783 AWIN_GMAC_DMA_OPMODE)
784 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
785 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
786 AWIN_GMAC_DMA_OPMODE,
787 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
788 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
789
790 mii_down(&sc->sc_mii);
791 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
792 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
793 }
794
795 /*
796 * Add m0 to the TX ring
797 */
798 static int
799 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
800 {
801 struct dwc_gmac_dev_dmadesc *desc = NULL;
802 struct dwc_gmac_tx_data *data = NULL;
803 bus_dmamap_t map;
804 uint32_t flags, len;
805 int error, i, first;
806
807 #ifdef DWC_GMAC_DEBUG
808 aprint_normal_dev(sc->sc_dev,
809 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
810 #endif
811
812 first = sc->sc_txq.t_cur;
813 map = sc->sc_txq.t_data[first].td_map;
814 flags = 0;
815
816 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
817 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
818 if (error != 0) {
819 aprint_error_dev(sc->sc_dev, "could not map mbuf "
820 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
821 return error;
822 }
823
824 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
825 bus_dmamap_unload(sc->sc_dmat, map);
826 return ENOBUFS;
827 }
828
829 data = NULL;
830 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
831 for (i = 0; i < map->dm_nsegs; i++) {
832 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
833 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
834
835 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
836 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
837 if (i == map->dm_nsegs-1)
838 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
839
840 #ifdef DWC_GMAC_DEBUG
841 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
842 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
843 (unsigned long)map->dm_segs[i].ds_addr,
844 (unsigned long)map->dm_segs[i].ds_len,
845 flags, len);
846 #endif
847
848 desc->ddesc_cntl = htole32(len|flags);
849 flags &= ~DDESC_CNTL_TXFIRST;
850
851 /*
852 * Defer passing ownership of the first descriptor
853 * untill we are done.
854 */
855 if (i)
856 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
857
858 sc->sc_txq.t_queued++;
859 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
860 }
861
862 /* Pass first to device */
863 sc->sc_txq.t_desc[first].ddesc_status
864 = htole32(DDESC_STATUS_OWNEDBYDEV);
865
866 data->td_m = m0;
867 data->td_active = map;
868
869 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
870 BUS_DMASYNC_PREWRITE);
871
872 return 0;
873 }
874
875 static int
876 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
877 {
878 struct ifaddr *ifa = (struct ifaddr *)data;
879 int s, error = 0;
880
881 s = splnet();
882
883 switch (cmd) {
884 case SIOCINITIFADDR:
885 ifp->if_flags |= IFF_UP;
886 dwc_gmac_init(ifp);
887 switch (ifa->ifa_addr->sa_family) {
888 #ifdef INET
889 case AF_INET:
890 arp_ifinit(ifp, ifa);
891 break;
892 #endif
893 default:
894 break;
895 }
896 break;
897
898 case SIOCSIFFLAGS:
899 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
900 break;
901
902 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
903 case IFF_RUNNING:
904 /*
905 * If interface is marked down and it is running, then
906 * stop it.
907 */
908 dwc_gmac_stop(ifp, 0);
909 ifp->if_flags &= ~IFF_RUNNING;
910 break;
911 case IFF_UP:
912 /*
913 * If interface is marked up and it is stopped, then
914 * start it.
915 */
916 error = dwc_gmac_init(ifp);
917 break;
918 case IFF_UP|IFF_RUNNING:
919 /*
920 * If setting debug or promiscuous mode, do not reset
921 * the chip; for everything else, call dwc_gmac_init()
922 * which will trigger a reset.
923 */
924 /* XXX - for now allways init */
925 error = dwc_gmac_init(ifp);
926 break;
927 case 0:
928 break;
929 }
930
931 break;
932
933 default:
934 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
935 break;
936 error = 0;
937 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
938 ;
939 else if (ifp->if_flags & IFF_RUNNING)
940 /* setmulti */;
941 break;
942 }
943
944 splx(s);
945
946 return error;
947 }
948
949 static void
950 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
951 {
952 struct dwc_gmac_tx_data *data;
953 struct dwc_gmac_dev_dmadesc *desc;
954 uint32_t flags;
955 int i;
956
957 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
958 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
959
960 #ifdef DWC_GMAC_DEBUG
961 aprint_normal_dev(sc->sc_dev,
962 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
963 i, sc->sc_txq.t_queued);
964 #endif
965
966 desc = &sc->sc_txq.t_desc[i];
967 dwc_gmac_txdesc_sync(sc, i, i+1,
968 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
969 flags = le32toh(desc->ddesc_status);
970
971 if (flags & DDESC_STATUS_OWNEDBYDEV)
972 break;
973
974 data = &sc->sc_txq.t_data[i];
975 if (data->td_m == NULL)
976 continue;
977 sc->sc_ec.ec_if.if_opackets++;
978 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
979 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
980 bus_dmamap_unload(sc->sc_dmat, data->td_active);
981
982 #ifdef DWC_GMAC_DEBUG
983 aprint_normal_dev(sc->sc_dev,
984 "dwc_gmac_tx_intr: done with packet at desc #%d, "
985 "freeing mbuf %p\n", i, data->td_m);
986 #endif
987
988 m_freem(data->td_m);
989 data->td_m = NULL;
990 }
991
992 sc->sc_txq.t_next = i;
993
994 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
995 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
996 }
997 }
998
999 static void
1000 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1001 {
1002 struct ifnet *ifp = &sc->sc_ec.ec_if;
1003 struct dwc_gmac_dev_dmadesc *desc;
1004 struct dwc_gmac_rx_data *data;
1005 bus_addr_t physaddr;
1006 uint32_t status;
1007 struct mbuf *m, *mnew;
1008 int i, len, error;
1009
1010 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1011
1012 #ifdef DWC_GMAC_DEBUG
1013 printf("rx int: checking desc #%d\n", i);
1014 #endif
1015
1016 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1017 RX_DESC_OFFSET(i), sizeof(*desc),
1018 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1019 desc = &sc->sc_rxq.r_desc[i];
1020 data = &sc->sc_rxq.r_data[i];
1021
1022 status = le32toh(desc->ddesc_status);
1023 if (status & DDESC_STATUS_OWNEDBYDEV) {
1024 #ifdef DWC_GMAC_DEBUG
1025 printf("status %08x, still owned by device\n", status);
1026 #endif
1027 break;
1028 }
1029
1030 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1031 #ifdef DWC_GMAC_DEBUG
1032 printf("status %08x, RX error, skipping\n", status);
1033 #endif
1034 ifp->if_ierrors++;
1035 goto skip;
1036 }
1037
1038 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1039
1040 #ifdef DWC_GMAC_DEBUG
1041 printf("rx int: device is done with #%d, len: %d\n", i, len);
1042 #endif
1043
1044 /*
1045 * Try to get a new mbuf before passing this one
1046 * up, if that fails, drop the packet and reuse
1047 * the existing one.
1048 */
1049 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1050 if (mnew == NULL) {
1051 ifp->if_ierrors++;
1052 goto skip;
1053 }
1054 MCLGET(mnew, M_DONTWAIT);
1055 if ((mnew->m_flags & M_EXT) == 0) {
1056 m_freem(mnew);
1057 ifp->if_ierrors++;
1058 goto skip;
1059 }
1060
1061 /* unload old DMA map */
1062 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1063 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1064 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1065
1066 /* and reload with new mbuf */
1067 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1068 mtod(mnew, void*), MCLBYTES, NULL,
1069 BUS_DMA_READ | BUS_DMA_NOWAIT);
1070 if (error != 0) {
1071 m_freem(mnew);
1072 /* try to reload old mbuf */
1073 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1074 mtod(data->rd_m, void*), MCLBYTES, NULL,
1075 BUS_DMA_READ | BUS_DMA_NOWAIT);
1076 if (error != 0) {
1077 panic("%s: could not load old rx mbuf",
1078 device_xname(sc->sc_dev));
1079 }
1080 ifp->if_ierrors++;
1081 goto skip;
1082 }
1083 physaddr = data->rd_map->dm_segs[0].ds_addr;
1084
1085 /*
1086 * New mbuf loaded, update RX ring and continue
1087 */
1088 m = data->rd_m;
1089 data->rd_m = mnew;
1090 desc->ddesc_data = htole32(physaddr);
1091
1092 /* finalize mbuf */
1093 m->m_pkthdr.len = m->m_len = len;
1094 m->m_pkthdr.rcvif = ifp;
1095
1096 bpf_mtap(ifp, m);
1097 ifp->if_ipackets++;
1098 (*ifp->if_input)(ifp, m);
1099
1100 skip:
1101 desc->ddesc_cntl = htole32(
1102 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
1103 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1104 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1105 RX_DESC_OFFSET(i), sizeof(*desc),
1106 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1107 }
1108
1109 /* update RX pointer */
1110 sc->sc_rxq.r_cur = i;
1111
1112 }
1113
1114 int
1115 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1116 {
1117 uint32_t status, dma_status;
1118 int rv = 0;
1119
1120 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1121 if (status & AWIN_GMAC_MII_IRQ) {
1122 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1123 AWIN_GMAC_MII_STATUS);
1124 rv = 1;
1125 mii_pollstat(&sc->sc_mii);
1126 }
1127
1128 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1129 AWIN_GMAC_DMA_STATUS);
1130
1131 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1132 rv = 1;
1133
1134 if (dma_status & GMAC_DMA_INT_TIE)
1135 dwc_gmac_tx_intr(sc);
1136
1137 if (dma_status & GMAC_DMA_INT_RIE)
1138 dwc_gmac_rx_intr(sc);
1139
1140 /*
1141 * Check error conditions
1142 */
1143 if (dma_status & GMAC_DMA_INT_ERRORS) {
1144 sc->sc_ec.ec_if.if_oerrors++;
1145 #ifdef DWC_GMAC_DEBUG
1146 dwc_dump_and_abort(sc, "interrupt error condition");
1147 #endif
1148 }
1149
1150 /* ack interrupt */
1151 if (dma_status)
1152 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1153 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1154
1155 return rv;
1156 }
1157
1158 #ifdef DWC_GMAC_DEBUG
1159 static void
1160 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1161 {
1162 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1163 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1164 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1165 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1166 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1167 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1168 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1169 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1170 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1171 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1172 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1173 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1174 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1175 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1176 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1177 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1178 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1179 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1180 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1181 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1182 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1183 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1184 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1185 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1186 }
1187
1188 static void
1189 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1190 {
1191 int i;
1192
1193 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1194 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1195 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1196 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1197 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1198 aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
1199 i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
1200 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1201 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1202 }
1203 }
1204
1205 static void
1206 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1207 {
1208 int i;
1209
1210 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1211 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1212 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1213 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1214 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1215 aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
1216 i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
1217 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1218 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1219 }
1220 }
1221
1222 static void
1223 dwc_dump_status(struct dwc_gmac_softc *sc)
1224 {
1225 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1226 AWIN_GMAC_MAC_INTR);
1227 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1228 AWIN_GMAC_DMA_STATUS);
1229 char buf[200];
1230
1231 /* print interrupt state */
1232 snprintb(buf, sizeof(buf), "\177\20"
1233 "b\x10""NI\0"
1234 "b\x0f""AI\0"
1235 "b\x0e""ER\0"
1236 "b\x0d""FB\0"
1237 "b\x0a""ET\0"
1238 "b\x09""RW\0"
1239 "b\x08""RS\0"
1240 "b\x07""RU\0"
1241 "b\x06""RI\0"
1242 "b\x05""UN\0"
1243 "b\x04""OV\0"
1244 "b\x03""TJ\0"
1245 "b\x02""TU\0"
1246 "b\x01""TS\0"
1247 "b\x00""TI\0"
1248 "\0", dma_status);
1249 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1250 status, buf);
1251 }
1252
1253 static void
1254 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1255 {
1256 dwc_dump_status(sc);
1257 dwc_gmac_dump_dma(sc);
1258 dwc_gmac_dump_tx_desc(sc);
1259 dwc_gmac_dump_rx_desc(sc);
1260
1261 panic(msg);
1262 }
1263 #endif
1264