dwc_gmac.c revision 1.1 1 /*-
2 * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * This driver supports the Synopsis Designware GMAC core, as found
32 * on Allwinner A20 cores and others.
33 *
34 * Real documentation seems to not be available, the marketing product
35 * documents could be found here:
36 *
37 * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 */
39
40 #include <sys/cdefs.h>
41
42 __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.1 2014/09/08 14:24:32 martin Exp $");
43
44 #include "opt_inet.h"
45
46 #include <sys/param.h>
47 #include <sys/bus.h>
48 #include <sys/device.h>
49 #include <sys/intr.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_media.h>
56 #include <net/bpf.h>
57 #ifdef INET
58 #include <netinet/if_inarp.h>
59 #endif
60
61 #include <dev/mii/miivar.h>
62
63 #include <dev/ic/dwc_gmac_reg.h>
64 #include <dev/ic/dwc_gmac_var.h>
65
66 static int dwc_gmac_miibus_read_reg(device_t, int, int);
67 static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
68 static void dwc_gmac_miibus_statchg(struct ifnet *);
69
70 static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
71 static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
72 uint8_t enaddr[ETHER_ADDR_LEN]);
73 static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
74 static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
75 static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
76 static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
77 static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
79 static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
80 static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
82 static int dwc_gmac_init(struct ifnet *ifp);
83 static void dwc_gmac_stop(struct ifnet *ifp, int disable);
84 static void dwc_gmac_start(struct ifnet *ifp);
85 static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
86 static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
87
88
89 #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
90 *sizeof(struct dwc_gmac_dev_dmadesc))
91
92 #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
93
94 void
95 dwc_gmac_attach(struct dwc_gmac_softc *sc, uint8_t *ep)
96 {
97 uint8_t enaddr[ETHER_ADDR_LEN];
98 uint32_t maclo, machi;
99 struct mii_data * const mii = &sc->sc_mii;
100 struct ifnet * const ifp = &sc->sc_ec.ec_if;
101
102 mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
103
104 /*
105 * If the frontend did not pass in a pre-configured ethernet mac
106 * address, try to read on from the current filter setup,
107 * before resetting the chip.
108 */
109 if (ep == NULL) {
110 maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
111 machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
112 enaddr[0] = maclo & 0x0ff;
113 enaddr[1] = (maclo >> 8) & 0x0ff;
114 enaddr[2] = (maclo >> 16) & 0x0ff;
115 enaddr[3] = (maclo >> 24) & 0x0ff;
116 enaddr[4] = machi & 0x0ff;
117 enaddr[5] = (machi >> 8) & 0x0ff;
118 ep = enaddr;
119 }
120
121 /*
122 * Init chip and do intial setup
123 */
124 if (dwc_gmac_reset(sc) != 0)
125 return; /* not much to cleanup, haven't attached yet */
126 dwc_gmac_write_hwaddr(sc, ep);
127 aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
128 ether_sprintf(enaddr));
129
130 /*
131 * Allocate Tx and Rx rings
132 */
133 if (dwc_gmac_alloc_dma_rings(sc) != 0) {
134 aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
135 goto fail;
136 }
137
138 if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
139 aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
140 goto fail;
141 }
142
143 mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
144 if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
145 aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
146 goto fail;
147 }
148
149 /*
150 * Prepare interface data
151 */
152 ifp->if_softc = sc;
153 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
154 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
155 ifp->if_ioctl = dwc_gmac_ioctl;
156 ifp->if_start = dwc_gmac_start;
157 ifp->if_init = dwc_gmac_init;
158 ifp->if_stop = dwc_gmac_stop;
159 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
160 IFQ_SET_READY(&ifp->if_snd);
161
162 /*
163 * Attach MII subdevices
164 */
165 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
166 mii->mii_ifp = ifp;
167 mii->mii_readreg = dwc_gmac_miibus_read_reg;
168 mii->mii_writereg = dwc_gmac_miibus_write_reg;
169 mii->mii_statchg = dwc_gmac_miibus_statchg;
170 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
171
172 if (LIST_EMPTY(&mii->mii_phys)) {
173 aprint_error_dev(sc->sc_dev, "no PHY found!\n");
174 ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
175 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
176 } else {
177 ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
178 }
179
180 /*
181 * Ready, attach interface
182 */
183 if_attach(ifp);
184 ether_ifattach(ifp, enaddr);
185
186 /*
187 * Enable interrupts
188 */
189 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
190 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
191
192 return;
193
194 fail:
195 dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
196 dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
197 }
198
199
200
201 static int
202 dwc_gmac_reset(struct dwc_gmac_softc *sc)
203 {
204 size_t cnt;
205 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
206 bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
207 for (cnt = 0; cnt < 3000; cnt++) {
208 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
209 & GMAC_BUSMODE_RESET) == 0)
210 return 0;
211 delay(10);
212 }
213
214 aprint_error_dev(sc->sc_dev, "reset timed out\n");
215 return EIO;
216 }
217
218 static void
219 dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
220 uint8_t enaddr[ETHER_ADDR_LEN])
221 {
222 uint32_t lo, hi;
223
224 lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
225 | (enaddr[3] << 24);
226 hi = enaddr[4] | (enaddr[5] << 8);
227 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
228 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
229 }
230
231 static int
232 dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
233 {
234 struct dwc_gmac_softc * const sc = device_private(self);
235 uint16_t miiaddr;
236 size_t cnt;
237 int rv = 0;
238
239 miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
240 | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK);
241
242 mutex_enter(&sc->sc_mdio_lock);
243 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, miiaddr
244 | GMAC_MII_CLK_150_250M | GMAC_MII_BUSY);
245
246 for (cnt = 0; cnt < 1000; cnt++) {
247 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR)
248 & GMAC_MII_BUSY)) {
249 rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA);
250 break;
251 }
252 delay(10);
253 }
254
255 mutex_exit(&sc->sc_mdio_lock);
256
257 return rv;
258 }
259
260 static void
261 dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
262 {
263 struct dwc_gmac_softc * const sc = device_private(self);
264 uint16_t miiaddr;
265 size_t cnt;
266
267 miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
268 | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK)
269 | GMAC_MII_BUSY | GMAC_MII_WRITE;
270
271 mutex_enter(&sc->sc_mdio_lock);
272 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
273 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, miiaddr);
274
275 for (cnt = 0; cnt < 1000; cnt++) {
276 if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR)
277 & GMAC_MII_BUSY))
278 break;
279 delay(10);
280 }
281
282 mutex_exit(&sc->sc_mdio_lock);
283 }
284
285 static int
286 dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
287 struct dwc_gmac_rx_ring *ring)
288 {
289 struct dwc_gmac_rx_data *data;
290 bus_addr_t physaddr;
291 const size_t descsize =
292 AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
293 int error, i, next;
294
295 ring->r_cur = ring->r_next = 0;
296 memset(ring->r_desc, 0, descsize);
297
298 /*
299 * Pre-allocate Rx buffers and populate Rx ring.
300 */
301 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
302 struct dwc_gmac_dev_dmadesc *desc;
303
304 data = &sc->sc_rxq.r_data[i];
305
306 MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
307 if (data->rd_m == NULL) {
308 aprint_error_dev(sc->sc_dev,
309 "could not allocate rx mbuf #%d\n", i);
310 error = ENOMEM;
311 goto fail;
312 }
313 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
314 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
315 if (error != 0) {
316 aprint_error_dev(sc->sc_dev,
317 "could not create DMA map\n");
318 data->rd_map = NULL;
319 goto fail;
320 }
321 MCLGET(data->rd_m, M_DONTWAIT);
322 if (!(data->rd_m->m_flags & M_EXT)) {
323 aprint_error_dev(sc->sc_dev,
324 "could not allocate mbuf cluster #%d\n", i);
325 error = ENOMEM;
326 goto fail;
327 }
328
329 error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
330 mtod(data->rd_m, void *), MCLBYTES, NULL,
331 BUS_DMA_READ | BUS_DMA_NOWAIT);
332 if (error != 0) {
333 aprint_error_dev(sc->sc_dev,
334 "could not load rx buf DMA map #%d", i);
335 goto fail;
336 }
337 physaddr = data->rd_map->dm_segs[0].ds_addr;
338
339 desc = &sc->sc_rxq.r_desc[i];
340 desc->ddesc_data = htole32(physaddr);
341 next = i < (AWGE_RX_RING_COUNT-1) ? i+1 : 0;
342 desc->ddesc_next = htole32(ring->r_physaddr
343 + next * sizeof(*desc));
344 desc->ddesc_cntl = htole32(
345 (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
346 << DDESC_CNTL_SIZE1SHIFT);
347 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
348 }
349
350 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
351 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
352 BUS_DMASYNC_PREREAD);
353 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
354 htole32(ring->r_physaddr));
355
356 return 0;
357
358 fail:
359 dwc_gmac_free_rx_ring(sc, ring);
360 return error;
361 }
362
363 static void
364 dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
365 struct dwc_gmac_rx_ring *ring)
366 {
367 struct dwc_gmac_dev_dmadesc *desc;
368 int i;
369
370 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
371 desc = &sc->sc_rxq.r_desc[i];
372 desc->ddesc_cntl = htole32(
373 (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
374 << DDESC_CNTL_SIZE1SHIFT);
375 desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
376 }
377
378 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
379 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
380 BUS_DMASYNC_PREWRITE);
381
382 ring->r_cur = ring->r_next = 0;
383 }
384
385 static int
386 dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
387 {
388 const size_t descsize = AWGE_TOTAL_RING_COUNT *
389 sizeof(struct dwc_gmac_dev_dmadesc);
390 int error, nsegs;
391 void *rings;
392
393 error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
394 BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
395 if (error != 0) {
396 aprint_error_dev(sc->sc_dev,
397 "could not create desc DMA map\n");
398 sc->sc_dma_ring_map = NULL;
399 goto fail;
400 }
401
402 error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
403 &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
404 if (error != 0) {
405 aprint_error_dev(sc->sc_dev,
406 "could not map DMA memory\n");
407 goto fail;
408 }
409
410 error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
411 descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
412 if (error != 0) {
413 aprint_error_dev(sc->sc_dev,
414 "could not allocate DMA memory\n");
415 goto fail;
416 }
417
418 error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
419 descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
420 if (error != 0) {
421 aprint_error_dev(sc->sc_dev,
422 "could not load desc DMA map\n");
423 goto fail;
424 }
425
426 /* give first AWGE_RX_RING_COUNT to the RX side */
427 sc->sc_rxq.r_desc = rings;
428 sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
429
430 /* and next rings to the TX side */
431 sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
432 sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
433 AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
434
435 return 0;
436
437 fail:
438 dwc_gmac_free_dma_rings(sc);
439 return error;
440 }
441
442 static void
443 dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
444 {
445 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
446 sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
447 bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
448 bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
449 AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
450 bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
451 }
452
453 static void
454 dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
455 {
456 struct dwc_gmac_rx_data *data;
457 int i;
458
459 if (ring->r_desc == NULL)
460 return;
461
462
463 for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
464 data = &ring->r_data[i];
465
466 if (data->rd_map != NULL) {
467 bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
468 AWGE_RX_RING_COUNT
469 *sizeof(struct dwc_gmac_dev_dmadesc),
470 BUS_DMASYNC_POSTREAD);
471 bus_dmamap_unload(sc->sc_dmat, data->rd_map);
472 bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
473 }
474 if (data->rd_m != NULL)
475 m_freem(data->rd_m);
476 }
477 }
478
479 static int
480 dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
481 struct dwc_gmac_tx_ring *ring)
482 {
483 int i, error = 0;
484
485 ring->t_queued = 0;
486 ring->t_cur = ring->t_next = 0;
487
488 memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
489 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
490 TX_DESC_OFFSET(0),
491 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
492 BUS_DMASYNC_POSTWRITE);
493
494 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
495 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
496 AWGE_TX_RING_COUNT, MCLBYTES, 0,
497 BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
498 &ring->t_data[i].td_map);
499 if (error != 0) {
500 aprint_error_dev(sc->sc_dev,
501 "could not create TX DMA map #%d\n", i);
502 ring->t_data[i].td_map = NULL;
503 goto fail;
504 }
505 ring->t_desc[i].ddesc_next = htole32(
506 ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
507 *((i+1)&AWGE_TX_RING_COUNT));
508 }
509
510 return 0;
511
512 fail:
513 dwc_gmac_free_tx_ring(sc, ring);
514 return error;
515 }
516
517 static void
518 dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
519 {
520 /* 'end' is pointing one descriptor beyound the last we want to sync */
521 if (end > start) {
522 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
523 TX_DESC_OFFSET(start),
524 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
525 ops);
526 return;
527 }
528 /* sync from 'start' to end of ring */
529 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
530 TX_DESC_OFFSET(start),
531 TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
532 ops);
533 /* sync from start of ring to 'end' */
534 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
535 TX_DESC_OFFSET(0),
536 TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
537 ops);
538 }
539
540 static void
541 dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
542 struct dwc_gmac_tx_ring *ring)
543 {
544 int i;
545
546 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
547 struct dwc_gmac_tx_data *data = &ring->t_data[i];
548
549 if (data->td_m != NULL) {
550 bus_dmamap_sync(sc->sc_dmat, data->td_active,
551 0, data->td_active->dm_mapsize,
552 BUS_DMASYNC_POSTWRITE);
553 bus_dmamap_unload(sc->sc_dmat, data->td_active);
554 m_freem(data->td_m);
555 data->td_m = NULL;
556 }
557 }
558
559 bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
560 TX_DESC_OFFSET(0),
561 AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
562 BUS_DMASYNC_PREWRITE);
563
564 ring->t_queued = 0;
565 ring->t_cur = ring->t_next = 0;
566 }
567
568 static void
569 dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
570 struct dwc_gmac_tx_ring *ring)
571 {
572 int i;
573
574 /* unload the maps */
575 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
576 struct dwc_gmac_tx_data *data = &ring->t_data[i];
577
578 if (data->td_m != NULL) {
579 bus_dmamap_sync(sc->sc_dmat, data->td_active,
580 0, data->td_map->dm_mapsize,
581 BUS_DMASYNC_POSTWRITE);
582 bus_dmamap_unload(sc->sc_dmat, data->td_active);
583 m_freem(data->td_m);
584 data->td_m = NULL;
585 }
586 }
587
588 /* and actually free them */
589 for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
590 struct dwc_gmac_tx_data *data = &ring->t_data[i];
591
592 bus_dmamap_destroy(sc->sc_dmat, data->td_map);
593 }
594 }
595
596 static void
597 dwc_gmac_miibus_statchg(struct ifnet *ifp)
598 {
599 struct dwc_gmac_softc * const sc = ifp->if_softc;
600 struct mii_data * const mii = &sc->sc_mii;
601
602 printf("dwc_gmac_miibus_statchg called\n");
603
604 /*
605 * Set MII or GMII interface based on the speed
606 * negotiated by the PHY.
607 */
608 switch (IFM_SUBTYPE(mii->mii_media_active)) {
609 case IFM_10_T:
610 case IFM_100_TX:
611 /* XXX */
612 break;
613 case IFM_1000_T:
614 /* XXX */
615 break;
616 }
617 }
618
619 static int
620 dwc_gmac_init(struct ifnet *ifp)
621 {
622 struct dwc_gmac_softc *sc = ifp->if_softc;
623
624 if (ifp->if_flags & IFF_RUNNING)
625 return 0;
626
627 dwc_gmac_stop(ifp, 0);
628
629 /*
630 * Set up dma pointer for RX ring
631 */
632 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, sc->sc_rxq.r_physaddr);
633
634 ifp->if_flags |= IFF_RUNNING;
635 ifp->if_flags &= ~IFF_OACTIVE;
636
637 return 0;
638 }
639
640 static void
641 dwc_gmac_start(struct ifnet *ifp)
642 {
643 struct dwc_gmac_softc *sc = ifp->if_softc;
644 int old = sc->sc_txq.t_queued;
645 struct mbuf *m0;
646
647 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
648 return;
649
650 for (;;) {
651 IFQ_POLL(&ifp->if_snd, m0);
652 if (m0 == NULL)
653 break;
654 if (dwc_gmac_queue(sc, m0) != 0) {
655 ifp->if_flags |= IFF_OACTIVE;
656 break;
657 }
658 IFQ_DEQUEUE(&ifp->if_snd, m0);
659 bpf_mtap(ifp, m0);
660 }
661
662 if (sc->sc_txq.t_queued != old) {
663 /* packets have been queued, kick it off */
664 dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
665 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
666 bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
667 sc->sc_txq.t_physaddr
668 + old*sizeof(struct dwc_gmac_dev_dmadesc));
669 }
670 }
671
672 static void
673 dwc_gmac_stop(struct ifnet *ifp, int disable)
674 {
675 struct dwc_gmac_softc *sc = ifp->if_softc;
676
677 mii_down(&sc->sc_mii);
678 dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
679 dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
680 }
681
682 /*
683 * Add m0 to the TX ring
684 */
685 static int
686 dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
687 {
688 struct dwc_gmac_dev_dmadesc *desc = NULL;
689 struct dwc_gmac_tx_data *data = NULL;
690 bus_dmamap_t map;
691 uint32_t status, flags, len;
692 int error, i, first;
693
694 first = sc->sc_txq.t_cur;
695 map = sc->sc_txq.t_data[first].td_map;
696 flags = 0;
697
698 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
699 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
700 if (error != 0) {
701 aprint_error_dev(sc->sc_dev, "could not map mbuf "
702 "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
703 return error;
704 }
705
706 if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
707 bus_dmamap_unload(sc->sc_dmat, map);
708 return ENOBUFS;
709 }
710
711 data = NULL;
712 flags = DDESC_STATUS_TXINT|DDESC_STATUS_TXCHAIN;
713 for (i = 0; i < map->dm_nsegs; i++) {
714 data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
715 desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
716
717 desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
718 len = (map->dm_segs[i].ds_len & DDESC_CNTL_SIZE1MASK)
719 << DDESC_CNTL_SIZE1SHIFT;
720 desc->ddesc_cntl = htole32(len);
721 status = flags;
722 desc->ddesc_status = htole32(status);
723 sc->sc_txq.t_queued++;
724
725 /*
726 * Defer passing ownership of the first descriptor
727 * untill we are done.
728 */
729 flags |= DDESC_STATUS_OWNEDBYDEV;
730
731 sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
732 & (AWGE_TX_RING_COUNT-1);
733 }
734
735 /* Fixup last */
736 status = flags|DDESC_STATUS_TXLAST;
737 desc->ddesc_status = htole32(status);
738
739 /* Finalize first */
740 status = flags|DDESC_STATUS_TXFIRST;
741 sc->sc_txq.t_desc[first].ddesc_status = htole32(status);
742
743 data->td_m = m0;
744 data->td_active = map;
745
746 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
747 BUS_DMASYNC_PREWRITE);
748
749 return 0;
750 }
751
752 static int
753 dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
754 {
755 // struct dwc_gmac_softc *sc = ifp->if_softc;
756 struct ifaddr *ifa = (struct ifaddr *)data;
757 int s, error = 0;
758
759 s = splnet();
760
761 switch (cmd) {
762 case SIOCINITIFADDR:
763 ifp->if_flags |= IFF_UP;
764 dwc_gmac_init(ifp);
765 switch (ifa->ifa_addr->sa_family) {
766 #ifdef INET
767 case AF_INET:
768 arp_ifinit(ifp, ifa);
769 break;
770 #endif
771 default:
772 break;
773 }
774 default:
775 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
776 break;
777 error = 0;
778 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
779 ;
780 else if (ifp->if_flags & IFF_RUNNING)
781 /* setmulti */;
782 break;
783 }
784
785 splx(s);
786
787 return error;
788 }
789
790 int
791 dwc_gmac_intr(struct dwc_gmac_softc *sc)
792 {
793 uint32_t status, dma_status;
794
795 status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
796 if (status & AWIN_GMAC_MII_IRQ)
797 (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
798 AWIN_GMAC_MII_STATUS);
799
800 dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
801 AWIN_GMAC_DMA_STATUS);
802
803 printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
804 status, dma_status);
805
806 static size_t cnt = 0;
807 if (++cnt > 20)
808 panic("enough now");
809
810 return 1;
811 }
812