dwc_gmac.c revision 1.2 1 1.1 martin /*-
2 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 1.1 martin * All rights reserved.
4 1.1 martin *
5 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
6 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 1.1 martin *
8 1.1 martin * Redistribution and use in source and binary forms, with or without
9 1.1 martin * modification, are permitted provided that the following conditions
10 1.1 martin * are met:
11 1.1 martin * 1. Redistributions of source code must retain the above copyright
12 1.1 martin * notice, this list of conditions and the following disclaimer.
13 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer in the
15 1.1 martin * documentation and/or other materials provided with the distribution.
16 1.1 martin *
17 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
28 1.1 martin */
29 1.1 martin
30 1.1 martin /*
31 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
32 1.1 martin * on Allwinner A20 cores and others.
33 1.1 martin *
34 1.1 martin * Real documentation seems to not be available, the marketing product
35 1.1 martin * documents could be found here:
36 1.1 martin *
37 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 1.1 martin */
39 1.1 martin
40 1.1 martin #include <sys/cdefs.h>
41 1.1 martin
42 1.2 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.2 2014/09/09 07:18:35 martin Exp $");
43 1.1 martin
44 1.1 martin #include "opt_inet.h"
45 1.1 martin
46 1.1 martin #include <sys/param.h>
47 1.1 martin #include <sys/bus.h>
48 1.1 martin #include <sys/device.h>
49 1.1 martin #include <sys/intr.h>
50 1.1 martin #include <sys/systm.h>
51 1.1 martin #include <sys/sockio.h>
52 1.1 martin
53 1.1 martin #include <net/if.h>
54 1.1 martin #include <net/if_ether.h>
55 1.1 martin #include <net/if_media.h>
56 1.1 martin #include <net/bpf.h>
57 1.1 martin #ifdef INET
58 1.1 martin #include <netinet/if_inarp.h>
59 1.1 martin #endif
60 1.1 martin
61 1.1 martin #include <dev/mii/miivar.h>
62 1.1 martin
63 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
64 1.1 martin #include <dev/ic/dwc_gmac_var.h>
65 1.1 martin
66 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
67 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
68 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
69 1.1 martin
70 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
71 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
72 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
73 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
74 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
75 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
76 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
77 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
79 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
80 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
82 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
83 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
84 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
85 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
86 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
87 1.1 martin
88 1.1 martin
89 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
90 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
91 1.1 martin
92 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
93 1.1 martin
94 1.1 martin void
95 1.1 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint8_t *ep)
96 1.1 martin {
97 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
98 1.1 martin uint32_t maclo, machi;
99 1.1 martin struct mii_data * const mii = &sc->sc_mii;
100 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
101 1.1 martin
102 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
103 1.1 martin
104 1.1 martin /*
105 1.1 martin * If the frontend did not pass in a pre-configured ethernet mac
106 1.1 martin * address, try to read on from the current filter setup,
107 1.1 martin * before resetting the chip.
108 1.1 martin */
109 1.1 martin if (ep == NULL) {
110 1.1 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
111 1.1 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
112 1.1 martin enaddr[0] = maclo & 0x0ff;
113 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
114 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
115 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
116 1.1 martin enaddr[4] = machi & 0x0ff;
117 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
118 1.1 martin ep = enaddr;
119 1.1 martin }
120 1.1 martin
121 1.1 martin /*
122 1.1 martin * Init chip and do intial setup
123 1.1 martin */
124 1.1 martin if (dwc_gmac_reset(sc) != 0)
125 1.1 martin return; /* not much to cleanup, haven't attached yet */
126 1.1 martin dwc_gmac_write_hwaddr(sc, ep);
127 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
128 1.1 martin ether_sprintf(enaddr));
129 1.1 martin
130 1.1 martin /*
131 1.1 martin * Allocate Tx and Rx rings
132 1.1 martin */
133 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
134 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
135 1.1 martin goto fail;
136 1.1 martin }
137 1.1 martin
138 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
139 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
140 1.1 martin goto fail;
141 1.1 martin }
142 1.1 martin
143 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
144 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
145 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
146 1.1 martin goto fail;
147 1.1 martin }
148 1.1 martin
149 1.1 martin /*
150 1.1 martin * Prepare interface data
151 1.1 martin */
152 1.1 martin ifp->if_softc = sc;
153 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
154 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
155 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
156 1.1 martin ifp->if_start = dwc_gmac_start;
157 1.1 martin ifp->if_init = dwc_gmac_init;
158 1.1 martin ifp->if_stop = dwc_gmac_stop;
159 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
160 1.1 martin IFQ_SET_READY(&ifp->if_snd);
161 1.1 martin
162 1.1 martin /*
163 1.1 martin * Attach MII subdevices
164 1.1 martin */
165 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
166 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
167 1.1 martin mii->mii_ifp = ifp;
168 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
169 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
170 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
171 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
172 1.1 martin
173 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
174 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
175 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
176 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
177 1.1 martin } else {
178 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
179 1.1 martin }
180 1.1 martin
181 1.1 martin /*
182 1.1 martin * Ready, attach interface
183 1.1 martin */
184 1.1 martin if_attach(ifp);
185 1.1 martin ether_ifattach(ifp, enaddr);
186 1.1 martin
187 1.1 martin /*
188 1.1 martin * Enable interrupts
189 1.1 martin */
190 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
191 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
192 1.1 martin
193 1.1 martin return;
194 1.1 martin
195 1.1 martin fail:
196 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
197 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
198 1.1 martin }
199 1.1 martin
200 1.1 martin
201 1.1 martin
202 1.1 martin static int
203 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
204 1.1 martin {
205 1.1 martin size_t cnt;
206 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
207 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
208 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
209 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
210 1.1 martin & GMAC_BUSMODE_RESET) == 0)
211 1.1 martin return 0;
212 1.1 martin delay(10);
213 1.1 martin }
214 1.1 martin
215 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
216 1.1 martin return EIO;
217 1.1 martin }
218 1.1 martin
219 1.1 martin static void
220 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
221 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
222 1.1 martin {
223 1.1 martin uint32_t lo, hi;
224 1.1 martin
225 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
226 1.1 martin | (enaddr[3] << 24);
227 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
228 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
229 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
230 1.1 martin }
231 1.1 martin
232 1.1 martin static int
233 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
234 1.1 martin {
235 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
236 1.1 martin uint16_t miiaddr;
237 1.1 martin size_t cnt;
238 1.1 martin int rv = 0;
239 1.1 martin
240 1.1 martin miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
241 1.1 martin | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK);
242 1.1 martin
243 1.1 martin mutex_enter(&sc->sc_mdio_lock);
244 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, miiaddr
245 1.1 martin | GMAC_MII_CLK_150_250M | GMAC_MII_BUSY);
246 1.1 martin
247 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
248 1.1 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR)
249 1.1 martin & GMAC_MII_BUSY)) {
250 1.1 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA);
251 1.1 martin break;
252 1.1 martin }
253 1.1 martin delay(10);
254 1.1 martin }
255 1.1 martin
256 1.1 martin mutex_exit(&sc->sc_mdio_lock);
257 1.1 martin
258 1.1 martin return rv;
259 1.1 martin }
260 1.1 martin
261 1.1 martin static void
262 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
263 1.1 martin {
264 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
265 1.1 martin uint16_t miiaddr;
266 1.1 martin size_t cnt;
267 1.1 martin
268 1.1 martin miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
269 1.1 martin | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK)
270 1.1 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
271 1.1 martin
272 1.1 martin mutex_enter(&sc->sc_mdio_lock);
273 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
274 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, miiaddr);
275 1.1 martin
276 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
277 1.1 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR)
278 1.1 martin & GMAC_MII_BUSY))
279 1.1 martin break;
280 1.1 martin delay(10);
281 1.1 martin }
282 1.1 martin
283 1.1 martin mutex_exit(&sc->sc_mdio_lock);
284 1.1 martin }
285 1.1 martin
286 1.1 martin static int
287 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
288 1.1 martin struct dwc_gmac_rx_ring *ring)
289 1.1 martin {
290 1.1 martin struct dwc_gmac_rx_data *data;
291 1.1 martin bus_addr_t physaddr;
292 1.1 martin const size_t descsize =
293 1.1 martin AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
294 1.1 martin int error, i, next;
295 1.1 martin
296 1.1 martin ring->r_cur = ring->r_next = 0;
297 1.1 martin memset(ring->r_desc, 0, descsize);
298 1.1 martin
299 1.1 martin /*
300 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
301 1.1 martin */
302 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
303 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
304 1.1 martin
305 1.1 martin data = &sc->sc_rxq.r_data[i];
306 1.1 martin
307 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
308 1.1 martin if (data->rd_m == NULL) {
309 1.1 martin aprint_error_dev(sc->sc_dev,
310 1.1 martin "could not allocate rx mbuf #%d\n", i);
311 1.1 martin error = ENOMEM;
312 1.1 martin goto fail;
313 1.1 martin }
314 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
315 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
316 1.1 martin if (error != 0) {
317 1.1 martin aprint_error_dev(sc->sc_dev,
318 1.1 martin "could not create DMA map\n");
319 1.1 martin data->rd_map = NULL;
320 1.1 martin goto fail;
321 1.1 martin }
322 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
323 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
324 1.1 martin aprint_error_dev(sc->sc_dev,
325 1.1 martin "could not allocate mbuf cluster #%d\n", i);
326 1.1 martin error = ENOMEM;
327 1.1 martin goto fail;
328 1.1 martin }
329 1.1 martin
330 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
331 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
332 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
333 1.1 martin if (error != 0) {
334 1.1 martin aprint_error_dev(sc->sc_dev,
335 1.1 martin "could not load rx buf DMA map #%d", i);
336 1.1 martin goto fail;
337 1.1 martin }
338 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
339 1.1 martin
340 1.1 martin desc = &sc->sc_rxq.r_desc[i];
341 1.1 martin desc->ddesc_data = htole32(physaddr);
342 1.1 martin next = i < (AWGE_RX_RING_COUNT-1) ? i+1 : 0;
343 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
344 1.1 martin + next * sizeof(*desc));
345 1.1 martin desc->ddesc_cntl = htole32(
346 1.1 martin (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
347 1.1 martin << DDESC_CNTL_SIZE1SHIFT);
348 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
349 1.1 martin }
350 1.1 martin
351 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
352 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
353 1.1 martin BUS_DMASYNC_PREREAD);
354 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
355 1.1 martin htole32(ring->r_physaddr));
356 1.1 martin
357 1.1 martin return 0;
358 1.1 martin
359 1.1 martin fail:
360 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
361 1.1 martin return error;
362 1.1 martin }
363 1.1 martin
364 1.1 martin static void
365 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
366 1.1 martin struct dwc_gmac_rx_ring *ring)
367 1.1 martin {
368 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
369 1.1 martin int i;
370 1.1 martin
371 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
372 1.1 martin desc = &sc->sc_rxq.r_desc[i];
373 1.1 martin desc->ddesc_cntl = htole32(
374 1.1 martin (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
375 1.1 martin << DDESC_CNTL_SIZE1SHIFT);
376 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
377 1.1 martin }
378 1.1 martin
379 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
380 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
381 1.1 martin BUS_DMASYNC_PREWRITE);
382 1.1 martin
383 1.1 martin ring->r_cur = ring->r_next = 0;
384 1.1 martin }
385 1.1 martin
386 1.1 martin static int
387 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
388 1.1 martin {
389 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
390 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
391 1.1 martin int error, nsegs;
392 1.1 martin void *rings;
393 1.1 martin
394 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
395 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
396 1.1 martin if (error != 0) {
397 1.1 martin aprint_error_dev(sc->sc_dev,
398 1.1 martin "could not create desc DMA map\n");
399 1.1 martin sc->sc_dma_ring_map = NULL;
400 1.1 martin goto fail;
401 1.1 martin }
402 1.1 martin
403 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
404 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
405 1.1 martin if (error != 0) {
406 1.1 martin aprint_error_dev(sc->sc_dev,
407 1.1 martin "could not map DMA memory\n");
408 1.1 martin goto fail;
409 1.1 martin }
410 1.1 martin
411 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
412 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
413 1.1 martin if (error != 0) {
414 1.1 martin aprint_error_dev(sc->sc_dev,
415 1.1 martin "could not allocate DMA memory\n");
416 1.1 martin goto fail;
417 1.1 martin }
418 1.1 martin
419 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
420 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
421 1.1 martin if (error != 0) {
422 1.1 martin aprint_error_dev(sc->sc_dev,
423 1.1 martin "could not load desc DMA map\n");
424 1.1 martin goto fail;
425 1.1 martin }
426 1.1 martin
427 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
428 1.1 martin sc->sc_rxq.r_desc = rings;
429 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
430 1.1 martin
431 1.1 martin /* and next rings to the TX side */
432 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
433 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
434 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
435 1.1 martin
436 1.1 martin return 0;
437 1.1 martin
438 1.1 martin fail:
439 1.1 martin dwc_gmac_free_dma_rings(sc);
440 1.1 martin return error;
441 1.1 martin }
442 1.1 martin
443 1.1 martin static void
444 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
445 1.1 martin {
446 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
447 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
448 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
449 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
450 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
451 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
452 1.1 martin }
453 1.1 martin
454 1.1 martin static void
455 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
456 1.1 martin {
457 1.1 martin struct dwc_gmac_rx_data *data;
458 1.1 martin int i;
459 1.1 martin
460 1.1 martin if (ring->r_desc == NULL)
461 1.1 martin return;
462 1.1 martin
463 1.1 martin
464 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
465 1.1 martin data = &ring->r_data[i];
466 1.1 martin
467 1.1 martin if (data->rd_map != NULL) {
468 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
469 1.1 martin AWGE_RX_RING_COUNT
470 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
471 1.1 martin BUS_DMASYNC_POSTREAD);
472 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
473 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
474 1.1 martin }
475 1.1 martin if (data->rd_m != NULL)
476 1.1 martin m_freem(data->rd_m);
477 1.1 martin }
478 1.1 martin }
479 1.1 martin
480 1.1 martin static int
481 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
482 1.1 martin struct dwc_gmac_tx_ring *ring)
483 1.1 martin {
484 1.1 martin int i, error = 0;
485 1.1 martin
486 1.1 martin ring->t_queued = 0;
487 1.1 martin ring->t_cur = ring->t_next = 0;
488 1.1 martin
489 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
490 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
491 1.1 martin TX_DESC_OFFSET(0),
492 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
493 1.1 martin BUS_DMASYNC_POSTWRITE);
494 1.1 martin
495 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
496 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
497 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
498 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
499 1.1 martin &ring->t_data[i].td_map);
500 1.1 martin if (error != 0) {
501 1.1 martin aprint_error_dev(sc->sc_dev,
502 1.1 martin "could not create TX DMA map #%d\n", i);
503 1.1 martin ring->t_data[i].td_map = NULL;
504 1.1 martin goto fail;
505 1.1 martin }
506 1.1 martin ring->t_desc[i].ddesc_next = htole32(
507 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
508 1.1 martin *((i+1)&AWGE_TX_RING_COUNT));
509 1.1 martin }
510 1.1 martin
511 1.1 martin return 0;
512 1.1 martin
513 1.1 martin fail:
514 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
515 1.1 martin return error;
516 1.1 martin }
517 1.1 martin
518 1.1 martin static void
519 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
520 1.1 martin {
521 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
522 1.1 martin if (end > start) {
523 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
524 1.1 martin TX_DESC_OFFSET(start),
525 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
526 1.1 martin ops);
527 1.1 martin return;
528 1.1 martin }
529 1.1 martin /* sync from 'start' to end of ring */
530 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
531 1.1 martin TX_DESC_OFFSET(start),
532 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
533 1.1 martin ops);
534 1.1 martin /* sync from start of ring to 'end' */
535 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
536 1.1 martin TX_DESC_OFFSET(0),
537 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
538 1.1 martin ops);
539 1.1 martin }
540 1.1 martin
541 1.1 martin static void
542 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
543 1.1 martin struct dwc_gmac_tx_ring *ring)
544 1.1 martin {
545 1.1 martin int i;
546 1.1 martin
547 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
548 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
549 1.1 martin
550 1.1 martin if (data->td_m != NULL) {
551 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
552 1.1 martin 0, data->td_active->dm_mapsize,
553 1.1 martin BUS_DMASYNC_POSTWRITE);
554 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
555 1.1 martin m_freem(data->td_m);
556 1.1 martin data->td_m = NULL;
557 1.1 martin }
558 1.1 martin }
559 1.1 martin
560 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
561 1.1 martin TX_DESC_OFFSET(0),
562 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
563 1.1 martin BUS_DMASYNC_PREWRITE);
564 1.1 martin
565 1.1 martin ring->t_queued = 0;
566 1.1 martin ring->t_cur = ring->t_next = 0;
567 1.1 martin }
568 1.1 martin
569 1.1 martin static void
570 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
571 1.1 martin struct dwc_gmac_tx_ring *ring)
572 1.1 martin {
573 1.1 martin int i;
574 1.1 martin
575 1.1 martin /* unload the maps */
576 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
577 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
578 1.1 martin
579 1.1 martin if (data->td_m != NULL) {
580 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
581 1.1 martin 0, data->td_map->dm_mapsize,
582 1.1 martin BUS_DMASYNC_POSTWRITE);
583 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
584 1.1 martin m_freem(data->td_m);
585 1.1 martin data->td_m = NULL;
586 1.1 martin }
587 1.1 martin }
588 1.1 martin
589 1.1 martin /* and actually free them */
590 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
591 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
592 1.1 martin
593 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
594 1.1 martin }
595 1.1 martin }
596 1.1 martin
597 1.1 martin static void
598 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
599 1.1 martin {
600 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
601 1.1 martin struct mii_data * const mii = &sc->sc_mii;
602 1.1 martin
603 1.1 martin printf("dwc_gmac_miibus_statchg called\n");
604 1.1 martin
605 1.1 martin /*
606 1.1 martin * Set MII or GMII interface based on the speed
607 1.1 martin * negotiated by the PHY.
608 1.1 martin */
609 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
610 1.1 martin case IFM_10_T:
611 1.1 martin case IFM_100_TX:
612 1.1 martin /* XXX */
613 1.1 martin break;
614 1.1 martin case IFM_1000_T:
615 1.1 martin /* XXX */
616 1.1 martin break;
617 1.1 martin }
618 1.1 martin }
619 1.1 martin
620 1.1 martin static int
621 1.1 martin dwc_gmac_init(struct ifnet *ifp)
622 1.1 martin {
623 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
624 1.1 martin
625 1.1 martin if (ifp->if_flags & IFF_RUNNING)
626 1.1 martin return 0;
627 1.1 martin
628 1.1 martin dwc_gmac_stop(ifp, 0);
629 1.1 martin
630 1.1 martin /*
631 1.1 martin * Set up dma pointer for RX ring
632 1.1 martin */
633 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, sc->sc_rxq.r_physaddr);
634 1.1 martin
635 1.1 martin ifp->if_flags |= IFF_RUNNING;
636 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
637 1.1 martin
638 1.1 martin return 0;
639 1.1 martin }
640 1.1 martin
641 1.1 martin static void
642 1.1 martin dwc_gmac_start(struct ifnet *ifp)
643 1.1 martin {
644 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
645 1.1 martin int old = sc->sc_txq.t_queued;
646 1.1 martin struct mbuf *m0;
647 1.1 martin
648 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
649 1.1 martin return;
650 1.1 martin
651 1.1 martin for (;;) {
652 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
653 1.1 martin if (m0 == NULL)
654 1.1 martin break;
655 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
656 1.1 martin ifp->if_flags |= IFF_OACTIVE;
657 1.1 martin break;
658 1.1 martin }
659 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
660 1.1 martin bpf_mtap(ifp, m0);
661 1.1 martin }
662 1.1 martin
663 1.1 martin if (sc->sc_txq.t_queued != old) {
664 1.1 martin /* packets have been queued, kick it off */
665 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
666 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
667 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
668 1.1 martin sc->sc_txq.t_physaddr
669 1.1 martin + old*sizeof(struct dwc_gmac_dev_dmadesc));
670 1.1 martin }
671 1.1 martin }
672 1.1 martin
673 1.1 martin static void
674 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
675 1.1 martin {
676 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
677 1.1 martin
678 1.1 martin mii_down(&sc->sc_mii);
679 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
680 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
681 1.1 martin }
682 1.1 martin
683 1.1 martin /*
684 1.1 martin * Add m0 to the TX ring
685 1.1 martin */
686 1.1 martin static int
687 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
688 1.1 martin {
689 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
690 1.1 martin struct dwc_gmac_tx_data *data = NULL;
691 1.1 martin bus_dmamap_t map;
692 1.1 martin uint32_t status, flags, len;
693 1.1 martin int error, i, first;
694 1.1 martin
695 1.1 martin first = sc->sc_txq.t_cur;
696 1.1 martin map = sc->sc_txq.t_data[first].td_map;
697 1.1 martin flags = 0;
698 1.1 martin
699 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
700 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
701 1.1 martin if (error != 0) {
702 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
703 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
704 1.1 martin return error;
705 1.1 martin }
706 1.1 martin
707 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
708 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
709 1.1 martin return ENOBUFS;
710 1.1 martin }
711 1.1 martin
712 1.1 martin data = NULL;
713 1.1 martin flags = DDESC_STATUS_TXINT|DDESC_STATUS_TXCHAIN;
714 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
715 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
716 1.1 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
717 1.1 martin
718 1.1 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
719 1.1 martin len = (map->dm_segs[i].ds_len & DDESC_CNTL_SIZE1MASK)
720 1.1 martin << DDESC_CNTL_SIZE1SHIFT;
721 1.1 martin desc->ddesc_cntl = htole32(len);
722 1.1 martin status = flags;
723 1.1 martin desc->ddesc_status = htole32(status);
724 1.1 martin sc->sc_txq.t_queued++;
725 1.1 martin
726 1.1 martin /*
727 1.1 martin * Defer passing ownership of the first descriptor
728 1.1 martin * untill we are done.
729 1.1 martin */
730 1.1 martin flags |= DDESC_STATUS_OWNEDBYDEV;
731 1.1 martin
732 1.1 martin sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
733 1.1 martin & (AWGE_TX_RING_COUNT-1);
734 1.1 martin }
735 1.1 martin
736 1.1 martin /* Fixup last */
737 1.1 martin status = flags|DDESC_STATUS_TXLAST;
738 1.1 martin desc->ddesc_status = htole32(status);
739 1.1 martin
740 1.1 martin /* Finalize first */
741 1.1 martin status = flags|DDESC_STATUS_TXFIRST;
742 1.1 martin sc->sc_txq.t_desc[first].ddesc_status = htole32(status);
743 1.1 martin
744 1.1 martin data->td_m = m0;
745 1.1 martin data->td_active = map;
746 1.1 martin
747 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
748 1.1 martin BUS_DMASYNC_PREWRITE);
749 1.1 martin
750 1.1 martin return 0;
751 1.1 martin }
752 1.1 martin
753 1.1 martin static int
754 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
755 1.1 martin {
756 1.1 martin // struct dwc_gmac_softc *sc = ifp->if_softc;
757 1.1 martin struct ifaddr *ifa = (struct ifaddr *)data;
758 1.1 martin int s, error = 0;
759 1.1 martin
760 1.1 martin s = splnet();
761 1.1 martin
762 1.1 martin switch (cmd) {
763 1.1 martin case SIOCINITIFADDR:
764 1.1 martin ifp->if_flags |= IFF_UP;
765 1.1 martin dwc_gmac_init(ifp);
766 1.1 martin switch (ifa->ifa_addr->sa_family) {
767 1.1 martin #ifdef INET
768 1.1 martin case AF_INET:
769 1.1 martin arp_ifinit(ifp, ifa);
770 1.1 martin break;
771 1.1 martin #endif
772 1.1 martin default:
773 1.1 martin break;
774 1.1 martin }
775 1.1 martin default:
776 1.1 martin if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
777 1.1 martin break;
778 1.1 martin error = 0;
779 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
780 1.1 martin ;
781 1.1 martin else if (ifp->if_flags & IFF_RUNNING)
782 1.1 martin /* setmulti */;
783 1.1 martin break;
784 1.1 martin }
785 1.1 martin
786 1.1 martin splx(s);
787 1.1 martin
788 1.1 martin return error;
789 1.1 martin }
790 1.1 martin
791 1.1 martin int
792 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
793 1.1 martin {
794 1.1 martin uint32_t status, dma_status;
795 1.1 martin
796 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
797 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
798 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
799 1.1 martin AWIN_GMAC_MII_STATUS);
800 1.2 martin mii_pollstat(&sc->sc_mii);
801 1.2 martin }
802 1.1 martin
803 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
804 1.1 martin AWIN_GMAC_DMA_STATUS);
805 1.1 martin
806 1.1 martin printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
807 1.1 martin status, dma_status);
808 1.1 martin
809 1.1 martin static size_t cnt = 0;
810 1.1 martin if (++cnt > 20)
811 1.1 martin panic("enough now");
812 1.1 martin
813 1.1 martin return 1;
814 1.1 martin }
815