dwc_gmac.c revision 1.16 1 /*-
2 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * This driver supports the Synopsis Designware GMAC core, as found
32 * on Allwinner A20 cores and others.
33 *
34 * Real documentation seems to not be available, the marketing product
35 * documents could be found here:
36 *
37 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 */
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.16 2014/10/20 19:51:40 martin Exp $");
43
44 /* #define DWC_GMAC_DEBUG 1 */
45
46 #include "opt_inet.h"
47
48 #include <sys/param.h>
49 #include <sys/bus.h>
50 #include <sys/device.h>
51 #include <sys/intr.h>
52 #include <sys/systm.h>
53 #include <sys/sockio.h>
54
55 #include <net/if.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58 #include <net/bpf.h>
59 #ifdef INET
60 #include <netinet/if_inarp.h>
61 #endif
62
63 #include <dev/mii/miivar.h>
64
65 #include <dev/ic/dwc_gmac_reg.h>
66 #include <dev/ic/dwc_gmac_var.h>
67
68 static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 static void dwc_gmac_miibus_statchg(struct ifnet *);
71
72 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 uint8_t enaddr[ETHER_ADDR_LEN]);
75 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 static int dwc_gmac_init(struct ifnet *ifp);
85 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 static void dwc_gmac_start(struct ifnet *ifp);
87 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
90 static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
91
92 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
93 *sizeof(struct dwc_gmac_dev_dmadesc))
94 #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
95
96 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
97 #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
98
99
100
101 #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
102 GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
103 GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
104
105 #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
106 GMAC_DMA_INT_FBE| \
107 GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
108 GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
109 GMAC_DMA_INT_TJE)
110
111 #define AWIN_DEF_MAC_INTRMASK \
112 (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
113 AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
114
115
116 #ifdef DWC_GMAC_DEBUG
117 static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
118 static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
119 static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
120 static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
121 static void dwc_dump_status(struct dwc_gmac_softc *sc);
122 #endif
123
124 void
125 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
126 {
127 uint8_t enaddr[ETHER_ADDR_LEN];
128 uint32_t maclo, machi;
129 struct mii_data * const mii = &sc->sc_mii;
130 struct ifnet * const ifp = &sc->sc_ec.ec_if;
131 prop_dictionary_t dict;
132
133 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
134 sc->sc_mii_clk = mii_clk & 7;
135
136 dict = device_properties(sc->sc_dev);
137 prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
138 if (ea != NULL) {
139 /*
140 * If the MAC address is overriden by a device property,
141 * use that.
142 */
143 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
144 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
145 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
146 } else {
147 /*
148 * If we did not get an externaly configure address,
149 * try to read one from the current filter setup,
150 * before resetting the chip.
151 */
152 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
153 AWIN_GMAC_MAC_ADDR0LO);
154 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
155 AWIN_GMAC_MAC_ADDR0HI);
156
157 if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
158 aprint_error_dev(sc->sc_dev,
159 "couldn't read MAC address\n");
160 return;
161 }
162
163 enaddr[0] = maclo & 0x0ff;
164 enaddr[1] = (maclo >> 8) & 0x0ff;
165 enaddr[2] = (maclo >> 16) & 0x0ff;
166 enaddr[3] = (maclo >> 24) & 0x0ff;
167 enaddr[4] = machi & 0x0ff;
168 enaddr[5] = (machi >> 8) & 0x0ff;
169 }
170
171 /*
172 * Init chip and do intial setup
173 */
174 if (dwc_gmac_reset(sc) != 0)
175 return; /* not much to cleanup, haven't attached yet */
176 dwc_gmac_write_hwaddr(sc, enaddr);
177 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
178 ether_sprintf(enaddr));
179
180 /*
181 * Allocate Tx and Rx rings
182 */
183 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
184 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
185 goto fail;
186 }
187
188 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
189 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
190 goto fail;
191 }
192
193 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
194 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
195 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
196 goto fail;
197 }
198
199 /*
200 * Prepare interface data
201 */
202 ifp->if_softc = sc;
203 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
204 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
205 ifp->if_ioctl = dwc_gmac_ioctl;
206 ifp->if_start = dwc_gmac_start;
207 ifp->if_init = dwc_gmac_init;
208 ifp->if_stop = dwc_gmac_stop;
209 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
210 IFQ_SET_READY(&ifp->if_snd);
211
212 /*
213 * Attach MII subdevices
214 */
215 sc->sc_ec.ec_mii = &sc->sc_mii;
216 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
217 mii->mii_ifp = ifp;
218 mii->mii_readreg = dwc_gmac_miibus_read_reg;
219 mii->mii_writereg = dwc_gmac_miibus_write_reg;
220 mii->mii_statchg = dwc_gmac_miibus_statchg;
221 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
222
223 if (LIST_EMPTY(&mii->mii_phys)) {
224 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
225 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
226 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
227 } else {
228 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
229 }
230
231 /*
232 * Ready, attach interface
233 */
234 if_attach(ifp);
235 ether_ifattach(ifp, enaddr);
236
237 /*
238 * Enable interrupts
239 */
240 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
241 AWIN_DEF_MAC_INTRMASK);
242 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
243 GMAC_DEF_DMA_INT_MASK);
244
245 return;
246
247 fail:
248 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
249 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
250 }
251
252
253
254 static int
255 dwc_gmac_reset(struct dwc_gmac_softc *sc)
256 {
257 size_t cnt;
258 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
259 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
260 for (cnt = 0; cnt < 3000; cnt++) {
261 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
262 & GMAC_BUSMODE_RESET) == 0)
263 return 0;
264 delay(10);
265 }
266
267 aprint_error_dev(sc->sc_dev, "reset timed out\n");
268 return EIO;
269 }
270
271 static void
272 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
273 uint8_t enaddr[ETHER_ADDR_LEN])
274 {
275 uint32_t lo, hi;
276
277 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
278 | (enaddr[3] << 24);
279 hi = enaddr[4] | (enaddr[5] << 8);
280 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
281 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
282 }
283
284 static int
285 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
286 {
287 struct dwc_gmac_softc * const sc = device_private(self);
288 uint16_t mii;
289 size_t cnt;
290 int rv = 0;
291
292 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
293 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
294 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
295 | GMAC_MII_BUSY;
296
297 mutex_enter(&sc->sc_mdio_lock);
298 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
299
300 for (cnt = 0; cnt < 1000; cnt++) {
301 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
302 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
303 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
304 AWIN_GMAC_MAC_MIIDATA);
305 break;
306 }
307 delay(10);
308 }
309
310 mutex_exit(&sc->sc_mdio_lock);
311
312 return rv;
313 }
314
315 static void
316 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
317 {
318 struct dwc_gmac_softc * const sc = device_private(self);
319 uint16_t mii;
320 size_t cnt;
321
322 mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
323 | __SHIFTIN(reg,GMAC_MII_REG_MASK)
324 | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
325 | GMAC_MII_BUSY | GMAC_MII_WRITE;
326
327 mutex_enter(&sc->sc_mdio_lock);
328 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
329 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
330
331 for (cnt = 0; cnt < 1000; cnt++) {
332 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
333 AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
334 break;
335 delay(10);
336 }
337
338 mutex_exit(&sc->sc_mdio_lock);
339 }
340
341 static int
342 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
343 struct dwc_gmac_rx_ring *ring)
344 {
345 struct dwc_gmac_rx_data *data;
346 bus_addr_t physaddr;
347 const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
348 int error, i, next;
349
350 ring->r_cur = ring->r_next = 0;
351 memset(ring->r_desc, 0, descsize);
352
353 /*
354 * Pre-allocate Rx buffers and populate Rx ring.
355 */
356 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
357 struct dwc_gmac_dev_dmadesc *desc;
358
359 data = &sc->sc_rxq.r_data[i];
360
361 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
362 if (data->rd_m == NULL) {
363 aprint_error_dev(sc->sc_dev,
364 "could not allocate rx mbuf #%d\n", i);
365 error = ENOMEM;
366 goto fail;
367 }
368 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
369 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
370 if (error != 0) {
371 aprint_error_dev(sc->sc_dev,
372 "could not create DMA map\n");
373 data->rd_map = NULL;
374 goto fail;
375 }
376 MCLGET(data->rd_m, M_DONTWAIT);
377 if (!(data->rd_m->m_flags & M_EXT)) {
378 aprint_error_dev(sc->sc_dev,
379 "could not allocate mbuf cluster #%d\n", i);
380 error = ENOMEM;
381 goto fail;
382 }
383
384 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
385 mtod(data->rd_m, void *), MCLBYTES, NULL,
386 BUS_DMA_READ | BUS_DMA_NOWAIT);
387 if (error != 0) {
388 aprint_error_dev(sc->sc_dev,
389 "could not load rx buf DMA map #%d", i);
390 goto fail;
391 }
392 physaddr = data->rd_map->dm_segs[0].ds_addr;
393
394 desc = &sc->sc_rxq.r_desc[i];
395 desc->ddesc_data = htole32(physaddr);
396 next = RX_NEXT(i);
397 desc->ddesc_next = htole32(ring->r_physaddr
398 + next * sizeof(*desc));
399 desc->ddesc_cntl = htole32(
400 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
401 DDESC_CNTL_RXCHAIN);
402 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
403 }
404
405 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
406 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
407 BUS_DMASYNC_PREREAD);
408 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
409 ring->r_physaddr);
410
411 return 0;
412
413 fail:
414 dwc_gmac_free_rx_ring(sc, ring);
415 return error;
416 }
417
418 static void
419 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
420 struct dwc_gmac_rx_ring *ring)
421 {
422 struct dwc_gmac_dev_dmadesc *desc;
423 int i;
424
425 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
426 desc = &sc->sc_rxq.r_desc[i];
427 desc->ddesc_cntl = htole32(
428 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
429 DDESC_CNTL_RXCHAIN);
430 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
431 }
432
433 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
434 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
435 BUS_DMASYNC_PREWRITE);
436
437 ring->r_cur = ring->r_next = 0;
438 /* reset DMA address to start of ring */
439 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
440 sc->sc_rxq.r_physaddr);
441 }
442
443 static int
444 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
445 {
446 const size_t descsize = AWGE_TOTAL_RING_COUNT *
447 sizeof(struct dwc_gmac_dev_dmadesc);
448 int error, nsegs;
449 void *rings;
450
451 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
452 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
453 if (error != 0) {
454 aprint_error_dev(sc->sc_dev,
455 "could not create desc DMA map\n");
456 sc->sc_dma_ring_map = NULL;
457 goto fail;
458 }
459
460 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
461 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
462 if (error != 0) {
463 aprint_error_dev(sc->sc_dev,
464 "could not map DMA memory\n");
465 goto fail;
466 }
467
468 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
469 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
470 if (error != 0) {
471 aprint_error_dev(sc->sc_dev,
472 "could not allocate DMA memory\n");
473 goto fail;
474 }
475
476 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
477 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
478 if (error != 0) {
479 aprint_error_dev(sc->sc_dev,
480 "could not load desc DMA map\n");
481 goto fail;
482 }
483
484 /* give first AWGE_RX_RING_COUNT to the RX side */
485 sc->sc_rxq.r_desc = rings;
486 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
487
488 /* and next rings to the TX side */
489 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
490 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
491 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
492
493 return 0;
494
495 fail:
496 dwc_gmac_free_dma_rings(sc);
497 return error;
498 }
499
500 static void
501 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
502 {
503 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
504 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
505 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
506 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
507 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
508 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
509 }
510
511 static void
512 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
513 {
514 struct dwc_gmac_rx_data *data;
515 int i;
516
517 if (ring->r_desc == NULL)
518 return;
519
520
521 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
522 data = &ring->r_data[i];
523
524 if (data->rd_map != NULL) {
525 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
526 AWGE_RX_RING_COUNT
527 *sizeof(struct dwc_gmac_dev_dmadesc),
528 BUS_DMASYNC_POSTREAD);
529 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
530 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
531 }
532 if (data->rd_m != NULL)
533 m_freem(data->rd_m);
534 }
535 }
536
537 static int
538 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
539 struct dwc_gmac_tx_ring *ring)
540 {
541 int i, error = 0;
542
543 ring->t_queued = 0;
544 ring->t_cur = ring->t_next = 0;
545
546 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
547 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
548 TX_DESC_OFFSET(0),
549 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
550 BUS_DMASYNC_POSTWRITE);
551
552 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
553 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
554 AWGE_TX_RING_COUNT, MCLBYTES, 0,
555 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
556 &ring->t_data[i].td_map);
557 if (error != 0) {
558 aprint_error_dev(sc->sc_dev,
559 "could not create TX DMA map #%d\n", i);
560 ring->t_data[i].td_map = NULL;
561 goto fail;
562 }
563 ring->t_desc[i].ddesc_next = htole32(
564 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
565 *TX_NEXT(i));
566 }
567
568 return 0;
569
570 fail:
571 dwc_gmac_free_tx_ring(sc, ring);
572 return error;
573 }
574
575 static void
576 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
577 {
578 /* 'end' is pointing one descriptor beyound the last we want to sync */
579 if (end > start) {
580 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
581 TX_DESC_OFFSET(start),
582 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
583 ops);
584 return;
585 }
586 /* sync from 'start' to end of ring */
587 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
588 TX_DESC_OFFSET(start),
589 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
590 ops);
591 /* sync from start of ring to 'end' */
592 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
593 TX_DESC_OFFSET(0),
594 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
595 ops);
596 }
597
598 static void
599 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
600 struct dwc_gmac_tx_ring *ring)
601 {
602 int i;
603
604 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
605 struct dwc_gmac_tx_data *data = &ring->t_data[i];
606
607 if (data->td_m != NULL) {
608 bus_dmamap_sync(sc->sc_dmat, data->td_active,
609 0, data->td_active->dm_mapsize,
610 BUS_DMASYNC_POSTWRITE);
611 bus_dmamap_unload(sc->sc_dmat, data->td_active);
612 m_freem(data->td_m);
613 data->td_m = NULL;
614 }
615 }
616
617 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
618 TX_DESC_OFFSET(0),
619 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
620 BUS_DMASYNC_PREWRITE);
621 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
622 sc->sc_txq.t_physaddr);
623
624 ring->t_queued = 0;
625 ring->t_cur = ring->t_next = 0;
626 }
627
628 static void
629 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
630 struct dwc_gmac_tx_ring *ring)
631 {
632 int i;
633
634 /* unload the maps */
635 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
636 struct dwc_gmac_tx_data *data = &ring->t_data[i];
637
638 if (data->td_m != NULL) {
639 bus_dmamap_sync(sc->sc_dmat, data->td_active,
640 0, data->td_map->dm_mapsize,
641 BUS_DMASYNC_POSTWRITE);
642 bus_dmamap_unload(sc->sc_dmat, data->td_active);
643 m_freem(data->td_m);
644 data->td_m = NULL;
645 }
646 }
647
648 /* and actually free them */
649 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
650 struct dwc_gmac_tx_data *data = &ring->t_data[i];
651
652 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
653 }
654 }
655
656 static void
657 dwc_gmac_miibus_statchg(struct ifnet *ifp)
658 {
659 struct dwc_gmac_softc * const sc = ifp->if_softc;
660 struct mii_data * const mii = &sc->sc_mii;
661 uint32_t conf;
662
663 /*
664 * Set MII or GMII interface based on the speed
665 * negotiated by the PHY.
666 */
667 conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
668 conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
669 |AWIN_GMAC_MAC_CONF_FULLDPLX);
670 conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
671 | AWIN_GMAC_MAC_CONF_DISABLERXOWN
672 | AWIN_GMAC_MAC_CONF_RXENABLE
673 | AWIN_GMAC_MAC_CONF_TXENABLE;
674 switch (IFM_SUBTYPE(mii->mii_media_active)) {
675 case IFM_10_T:
676 conf |= AWIN_GMAC_MAC_CONF_MIISEL;
677 break;
678 case IFM_100_TX:
679 conf |= AWIN_GMAC_MAC_CONF_FES100 |
680 AWIN_GMAC_MAC_CONF_MIISEL;
681 break;
682 case IFM_1000_T:
683 break;
684 }
685 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
686 conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
687
688 #ifdef DWC_GMAC_DEBUG
689 aprint_normal_dev(sc->sc_dev,
690 "setting MAC conf register: %08x\n", conf);
691 #endif
692
693 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
694 AWIN_GMAC_MAC_CONF, conf);
695 }
696
697 static int
698 dwc_gmac_init(struct ifnet *ifp)
699 {
700 struct dwc_gmac_softc *sc = ifp->if_softc;
701 uint32_t ffilt;
702
703 if (ifp->if_flags & IFF_RUNNING)
704 return 0;
705
706 dwc_gmac_stop(ifp, 0);
707
708 /*
709 * Configure DMA burst/transfer mode and RX/TX priorities.
710 * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
711 */
712 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
713 GMAC_BUSMODE_FIXEDBURST |
714 __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
715 __SHIFTIN(8, GMCA_BUSMODE_PBL));
716
717 /*
718 * Set up address filter
719 */
720 ffilt = 0;
721 if (ifp->if_flags & IFF_PROMISC)
722 ffilt |= AWIN_GMAC_MAC_FFILT_PR;
723 else if (ifp->if_flags & IFF_ALLMULTI)
724 ffilt |= AWIN_GMAC_MAC_FFILT_PM;
725 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
726
727 /*
728 * Set up dma pointer for RX and TX ring
729 */
730 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
731 sc->sc_rxq.r_physaddr);
732 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
733 sc->sc_txq.t_physaddr);
734
735 /*
736 * Start RX/TX part
737 */
738 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
739 AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
740 GMAC_DMA_OP_STOREFORWARD);
741
742 ifp->if_flags |= IFF_RUNNING;
743 ifp->if_flags &= ~IFF_OACTIVE;
744
745 return 0;
746 }
747
748 static void
749 dwc_gmac_start(struct ifnet *ifp)
750 {
751 struct dwc_gmac_softc *sc = ifp->if_softc;
752 int old = sc->sc_txq.t_queued;
753 struct mbuf *m0;
754
755 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
756 return;
757
758 for (;;) {
759 IFQ_POLL(&ifp->if_snd, m0);
760 if (m0 == NULL)
761 break;
762 if (dwc_gmac_queue(sc, m0) != 0) {
763 ifp->if_flags |= IFF_OACTIVE;
764 break;
765 }
766 IFQ_DEQUEUE(&ifp->if_snd, m0);
767 bpf_mtap(ifp, m0);
768 }
769
770 if (sc->sc_txq.t_queued != old) {
771 /* packets have been queued, kick it off */
772 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
773 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
774
775 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
776 AWIN_GMAC_DMA_TXPOLL, ~0U);
777 #ifdef DWC_GMAC_DEBUG
778 dwc_dump_status(sc);
779 #endif
780 }
781 }
782
783 static void
784 dwc_gmac_stop(struct ifnet *ifp, int disable)
785 {
786 struct dwc_gmac_softc *sc = ifp->if_softc;
787
788 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
789 AWIN_GMAC_DMA_OPMODE,
790 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
791 AWIN_GMAC_DMA_OPMODE)
792 & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
793 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
794 AWIN_GMAC_DMA_OPMODE,
795 bus_space_read_4(sc->sc_bst, sc->sc_bsh,
796 AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
797
798 mii_down(&sc->sc_mii);
799 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
800 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
801 }
802
803 /*
804 * Add m0 to the TX ring
805 */
806 static int
807 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
808 {
809 struct dwc_gmac_dev_dmadesc *desc = NULL;
810 struct dwc_gmac_tx_data *data = NULL;
811 bus_dmamap_t map;
812 uint32_t flags, len;
813 int error, i, first;
814
815 #ifdef DWC_GMAC_DEBUG
816 aprint_normal_dev(sc->sc_dev,
817 "dwc_gmac_queue: adding mbuf chain %p\n", m0);
818 #endif
819
820 first = sc->sc_txq.t_cur;
821 map = sc->sc_txq.t_data[first].td_map;
822 flags = 0;
823
824 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
825 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
826 if (error != 0) {
827 aprint_error_dev(sc->sc_dev, "could not map mbuf "
828 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
829 return error;
830 }
831
832 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
833 bus_dmamap_unload(sc->sc_dmat, map);
834 return ENOBUFS;
835 }
836
837 data = NULL;
838 flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
839 for (i = 0; i < map->dm_nsegs; i++) {
840 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
841 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
842
843 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
844 len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
845 if (i == map->dm_nsegs-1)
846 flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
847
848 #ifdef DWC_GMAC_DEBUG
849 aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
850 "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
851 (unsigned long)map->dm_segs[i].ds_addr,
852 (unsigned long)map->dm_segs[i].ds_len,
853 flags, len);
854 #endif
855
856 desc->ddesc_cntl = htole32(len|flags);
857 flags &= ~DDESC_CNTL_TXFIRST;
858
859 /*
860 * Defer passing ownership of the first descriptor
861 * untill we are done.
862 */
863 if (i)
864 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
865
866 sc->sc_txq.t_queued++;
867 sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
868 }
869
870 /* Pass first to device */
871 sc->sc_txq.t_desc[first].ddesc_status
872 = htole32(DDESC_STATUS_OWNEDBYDEV);
873
874 data->td_m = m0;
875 data->td_active = map;
876
877 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
878 BUS_DMASYNC_PREWRITE);
879
880 return 0;
881 }
882
883 static int
884 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
885 {
886 struct ifaddr *ifa = (struct ifaddr *)data;
887 int s, error = 0;
888
889 s = splnet();
890
891 switch (cmd) {
892 case SIOCINITIFADDR:
893 ifp->if_flags |= IFF_UP;
894 dwc_gmac_init(ifp);
895 switch (ifa->ifa_addr->sa_family) {
896 #ifdef INET
897 case AF_INET:
898 arp_ifinit(ifp, ifa);
899 break;
900 #endif
901 default:
902 break;
903 }
904 break;
905
906 case SIOCSIFFLAGS:
907 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
908 break;
909
910 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
911 case IFF_RUNNING:
912 /*
913 * If interface is marked down and it is running, then
914 * stop it.
915 */
916 dwc_gmac_stop(ifp, 0);
917 ifp->if_flags &= ~IFF_RUNNING;
918 break;
919 case IFF_UP:
920 /*
921 * If interface is marked up and it is stopped, then
922 * start it.
923 */
924 error = dwc_gmac_init(ifp);
925 break;
926 case IFF_UP|IFF_RUNNING:
927 /*
928 * If setting debug or promiscuous mode, do not reset
929 * the chip; for everything else, call dwc_gmac_init()
930 * which will trigger a reset.
931 */
932 /* XXX - for now allways init */
933 error = dwc_gmac_init(ifp);
934 break;
935 case 0:
936 break;
937 }
938
939 break;
940
941 default:
942 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
943 break;
944 error = 0;
945 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
946 ;
947 else if (ifp->if_flags & IFF_RUNNING)
948 /* setmulti */;
949 break;
950 }
951
952 splx(s);
953
954 return error;
955 }
956
957 static void
958 dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
959 {
960 struct dwc_gmac_tx_data *data;
961 struct dwc_gmac_dev_dmadesc *desc;
962 uint32_t flags;
963 int i;
964
965 for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
966 i = TX_NEXT(i), sc->sc_txq.t_queued--) {
967
968 #ifdef DWC_GMAC_DEBUG
969 aprint_normal_dev(sc->sc_dev,
970 "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
971 i, sc->sc_txq.t_queued);
972 #endif
973
974 desc = &sc->sc_txq.t_desc[i];
975 dwc_gmac_txdesc_sync(sc, i, i+1,
976 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
977 flags = le32toh(desc->ddesc_status);
978
979 if (flags & DDESC_STATUS_OWNEDBYDEV)
980 break;
981
982 data = &sc->sc_txq.t_data[i];
983 if (data->td_m == NULL)
984 continue;
985 sc->sc_ec.ec_if.if_opackets++;
986 bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
987 data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
988 bus_dmamap_unload(sc->sc_dmat, data->td_active);
989
990 #ifdef DWC_GMAC_DEBUG
991 aprint_normal_dev(sc->sc_dev,
992 "dwc_gmac_tx_intr: done with packet at desc #%d, "
993 "freeing mbuf %p\n", i, data->td_m);
994 #endif
995
996 m_freem(data->td_m);
997 data->td_m = NULL;
998 }
999
1000 sc->sc_txq.t_next = i;
1001
1002 if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1003 sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
1004 }
1005 }
1006
1007 static void
1008 dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1009 {
1010 struct ifnet *ifp = &sc->sc_ec.ec_if;
1011 struct dwc_gmac_dev_dmadesc *desc;
1012 struct dwc_gmac_rx_data *data;
1013 bus_addr_t physaddr;
1014 uint32_t status;
1015 struct mbuf *m, *mnew;
1016 int i, len, error;
1017
1018 for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1019 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1020 RX_DESC_OFFSET(i), sizeof(*desc),
1021 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1022 desc = &sc->sc_rxq.r_desc[i];
1023 data = &sc->sc_rxq.r_data[i];
1024
1025 status = le32toh(desc->ddesc_status);
1026 if (status & DDESC_STATUS_OWNEDBYDEV)
1027 break;
1028
1029 if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1030 #ifdef DWC_GMAC_DEBUG
1031 aprint_normal_dev(sc->sc_dev,
1032 "RX error: descriptor status %08x, skipping\n",
1033 status);
1034 #endif
1035 ifp->if_ierrors++;
1036 goto skip;
1037 }
1038
1039 len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1040
1041 #ifdef DWC_GMAC_DEBUG
1042 aprint_normal_dev(sc->sc_dev,
1043 "rx int: device is done with descriptor #%d, len: %d\n",
1044 i, len);
1045 #endif
1046
1047 /*
1048 * Try to get a new mbuf before passing this one
1049 * up, if that fails, drop the packet and reuse
1050 * the existing one.
1051 */
1052 MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1053 if (mnew == NULL) {
1054 ifp->if_ierrors++;
1055 goto skip;
1056 }
1057 MCLGET(mnew, M_DONTWAIT);
1058 if ((mnew->m_flags & M_EXT) == 0) {
1059 m_freem(mnew);
1060 ifp->if_ierrors++;
1061 goto skip;
1062 }
1063
1064 /* unload old DMA map */
1065 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1066 data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1067 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1068
1069 /* and reload with new mbuf */
1070 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1071 mtod(mnew, void*), MCLBYTES, NULL,
1072 BUS_DMA_READ | BUS_DMA_NOWAIT);
1073 if (error != 0) {
1074 m_freem(mnew);
1075 /* try to reload old mbuf */
1076 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1077 mtod(data->rd_m, void*), MCLBYTES, NULL,
1078 BUS_DMA_READ | BUS_DMA_NOWAIT);
1079 if (error != 0) {
1080 panic("%s: could not load old rx mbuf",
1081 device_xname(sc->sc_dev));
1082 }
1083 ifp->if_ierrors++;
1084 goto skip;
1085 }
1086 physaddr = data->rd_map->dm_segs[0].ds_addr;
1087
1088 /*
1089 * New mbuf loaded, update RX ring and continue
1090 */
1091 m = data->rd_m;
1092 data->rd_m = mnew;
1093 desc->ddesc_data = htole32(physaddr);
1094
1095 /* finalize mbuf */
1096 m->m_pkthdr.len = m->m_len = len;
1097 m->m_pkthdr.rcvif = ifp;
1098
1099 bpf_mtap(ifp, m);
1100 ifp->if_ipackets++;
1101 (*ifp->if_input)(ifp, m);
1102
1103 skip:
1104 desc->ddesc_cntl = htole32(
1105 __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1106 DDESC_CNTL_RXCHAIN);
1107 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1108 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1109 RX_DESC_OFFSET(i), sizeof(*desc),
1110 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1111 }
1112
1113 /* update RX pointer */
1114 sc->sc_rxq.r_cur = i;
1115
1116 }
1117
1118 int
1119 dwc_gmac_intr(struct dwc_gmac_softc *sc)
1120 {
1121 uint32_t status, dma_status;
1122 int rv = 0;
1123
1124 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1125 if (status & AWIN_GMAC_MII_IRQ) {
1126 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1127 AWIN_GMAC_MII_STATUS);
1128 rv = 1;
1129 mii_pollstat(&sc->sc_mii);
1130 }
1131
1132 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1133 AWIN_GMAC_DMA_STATUS);
1134
1135 if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1136 rv = 1;
1137
1138 if (dma_status & GMAC_DMA_INT_TIE)
1139 dwc_gmac_tx_intr(sc);
1140
1141 if (dma_status & GMAC_DMA_INT_RIE)
1142 dwc_gmac_rx_intr(sc);
1143
1144 /*
1145 * Check error conditions
1146 */
1147 if (dma_status & GMAC_DMA_INT_ERRORS) {
1148 sc->sc_ec.ec_if.if_oerrors++;
1149 #ifdef DWC_GMAC_DEBUG
1150 dwc_dump_and_abort(sc, "interrupt error condition");
1151 #endif
1152 }
1153
1154 /* ack interrupt */
1155 if (dma_status)
1156 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1157 AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1158
1159 return rv;
1160 }
1161
1162 #ifdef DWC_GMAC_DEBUG
1163 static void
1164 dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1165 {
1166 aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1167 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1168 aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1169 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1170 aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1171 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1172 aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1173 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1174 aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1175 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1176 aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1177 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1178 aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1179 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1180 aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1181 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1182 aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1183 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1184 aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1185 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1186 aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1187 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1188 aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1189 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1190 }
1191
1192 static void
1193 dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1194 {
1195 int i;
1196
1197 aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1198 sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1199 aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1200 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1201 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1202 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1203 "data: %08x next: %08x\n",
1204 i, sc->sc_txq.t_physaddr +
1205 i*sizeof(struct dwc_gmac_dev_dmadesc),
1206 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1207 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1208 }
1209 }
1210
1211 static void
1212 dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1213 {
1214 int i;
1215
1216 aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1217 sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1218 aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1219 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1220 struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1221 aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1222 "data: %08x next: %08x\n",
1223 i, sc->sc_rxq.r_physaddr +
1224 i*sizeof(struct dwc_gmac_dev_dmadesc),
1225 le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1226 le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1227 }
1228 }
1229
1230 static void
1231 dwc_dump_status(struct dwc_gmac_softc *sc)
1232 {
1233 uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1234 AWIN_GMAC_MAC_INTR);
1235 uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1236 AWIN_GMAC_DMA_STATUS);
1237 char buf[200];
1238
1239 /* print interrupt state */
1240 snprintb(buf, sizeof(buf), "\177\20"
1241 "b\x10""NI\0"
1242 "b\x0f""AI\0"
1243 "b\x0e""ER\0"
1244 "b\x0d""FB\0"
1245 "b\x0a""ET\0"
1246 "b\x09""RW\0"
1247 "b\x08""RS\0"
1248 "b\x07""RU\0"
1249 "b\x06""RI\0"
1250 "b\x05""UN\0"
1251 "b\x04""OV\0"
1252 "b\x03""TJ\0"
1253 "b\x02""TU\0"
1254 "b\x01""TS\0"
1255 "b\x00""TI\0"
1256 "\0", dma_status);
1257 aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1258 status, buf);
1259 }
1260
1261 static void
1262 dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1263 {
1264 dwc_dump_status(sc);
1265 dwc_gmac_dump_dma(sc);
1266 dwc_gmac_dump_tx_desc(sc);
1267 dwc_gmac_dump_rx_desc(sc);
1268
1269 panic(msg);
1270 }
1271 #endif
1272