dwc_gmac.c revision 1.1 1 1.1 martin /*-
2 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 1.1 martin * All rights reserved.
4 1.1 martin *
5 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
6 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 1.1 martin *
8 1.1 martin * Redistribution and use in source and binary forms, with or without
9 1.1 martin * modification, are permitted provided that the following conditions
10 1.1 martin * are met:
11 1.1 martin * 1. Redistributions of source code must retain the above copyright
12 1.1 martin * notice, this list of conditions and the following disclaimer.
13 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer in the
15 1.1 martin * documentation and/or other materials provided with the distribution.
16 1.1 martin *
17 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
28 1.1 martin */
29 1.1 martin
30 1.1 martin /*
31 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
32 1.1 martin * on Allwinner A20 cores and others.
33 1.1 martin *
34 1.1 martin * Real documentation seems to not be available, the marketing product
35 1.1 martin * documents could be found here:
36 1.1 martin *
37 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 1.1 martin */
39 1.1 martin
40 1.1 martin #include <sys/cdefs.h>
41 1.1 martin
42 1.1 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.1 2014/09/08 14:24:32 martin Exp $");
43 1.1 martin
44 1.1 martin #include "opt_inet.h"
45 1.1 martin
46 1.1 martin #include <sys/param.h>
47 1.1 martin #include <sys/bus.h>
48 1.1 martin #include <sys/device.h>
49 1.1 martin #include <sys/intr.h>
50 1.1 martin #include <sys/systm.h>
51 1.1 martin #include <sys/sockio.h>
52 1.1 martin
53 1.1 martin #include <net/if.h>
54 1.1 martin #include <net/if_ether.h>
55 1.1 martin #include <net/if_media.h>
56 1.1 martin #include <net/bpf.h>
57 1.1 martin #ifdef INET
58 1.1 martin #include <netinet/if_inarp.h>
59 1.1 martin #endif
60 1.1 martin
61 1.1 martin #include <dev/mii/miivar.h>
62 1.1 martin
63 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
64 1.1 martin #include <dev/ic/dwc_gmac_var.h>
65 1.1 martin
66 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
67 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
68 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
69 1.1 martin
70 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
71 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
72 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
73 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
74 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
75 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
76 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
77 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
79 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
80 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
82 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
83 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
84 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
85 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
86 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
87 1.1 martin
88 1.1 martin
89 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
90 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
91 1.1 martin
92 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
93 1.1 martin
94 1.1 martin void
95 1.1 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint8_t *ep)
96 1.1 martin {
97 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
98 1.1 martin uint32_t maclo, machi;
99 1.1 martin struct mii_data * const mii = &sc->sc_mii;
100 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
101 1.1 martin
102 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
103 1.1 martin
104 1.1 martin /*
105 1.1 martin * If the frontend did not pass in a pre-configured ethernet mac
106 1.1 martin * address, try to read on from the current filter setup,
107 1.1 martin * before resetting the chip.
108 1.1 martin */
109 1.1 martin if (ep == NULL) {
110 1.1 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
111 1.1 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
112 1.1 martin enaddr[0] = maclo & 0x0ff;
113 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
114 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
115 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
116 1.1 martin enaddr[4] = machi & 0x0ff;
117 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
118 1.1 martin ep = enaddr;
119 1.1 martin }
120 1.1 martin
121 1.1 martin /*
122 1.1 martin * Init chip and do intial setup
123 1.1 martin */
124 1.1 martin if (dwc_gmac_reset(sc) != 0)
125 1.1 martin return; /* not much to cleanup, haven't attached yet */
126 1.1 martin dwc_gmac_write_hwaddr(sc, ep);
127 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
128 1.1 martin ether_sprintf(enaddr));
129 1.1 martin
130 1.1 martin /*
131 1.1 martin * Allocate Tx and Rx rings
132 1.1 martin */
133 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
134 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
135 1.1 martin goto fail;
136 1.1 martin }
137 1.1 martin
138 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
139 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
140 1.1 martin goto fail;
141 1.1 martin }
142 1.1 martin
143 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
144 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
145 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
146 1.1 martin goto fail;
147 1.1 martin }
148 1.1 martin
149 1.1 martin /*
150 1.1 martin * Prepare interface data
151 1.1 martin */
152 1.1 martin ifp->if_softc = sc;
153 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
154 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
155 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
156 1.1 martin ifp->if_start = dwc_gmac_start;
157 1.1 martin ifp->if_init = dwc_gmac_init;
158 1.1 martin ifp->if_stop = dwc_gmac_stop;
159 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
160 1.1 martin IFQ_SET_READY(&ifp->if_snd);
161 1.1 martin
162 1.1 martin /*
163 1.1 martin * Attach MII subdevices
164 1.1 martin */
165 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
166 1.1 martin mii->mii_ifp = ifp;
167 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
168 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
169 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
170 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
171 1.1 martin
172 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
173 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
174 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
175 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
176 1.1 martin } else {
177 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
178 1.1 martin }
179 1.1 martin
180 1.1 martin /*
181 1.1 martin * Ready, attach interface
182 1.1 martin */
183 1.1 martin if_attach(ifp);
184 1.1 martin ether_ifattach(ifp, enaddr);
185 1.1 martin
186 1.1 martin /*
187 1.1 martin * Enable interrupts
188 1.1 martin */
189 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
190 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
191 1.1 martin
192 1.1 martin return;
193 1.1 martin
194 1.1 martin fail:
195 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
196 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
197 1.1 martin }
198 1.1 martin
199 1.1 martin
200 1.1 martin
201 1.1 martin static int
202 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
203 1.1 martin {
204 1.1 martin size_t cnt;
205 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
206 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
207 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
208 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
209 1.1 martin & GMAC_BUSMODE_RESET) == 0)
210 1.1 martin return 0;
211 1.1 martin delay(10);
212 1.1 martin }
213 1.1 martin
214 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
215 1.1 martin return EIO;
216 1.1 martin }
217 1.1 martin
218 1.1 martin static void
219 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
220 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
221 1.1 martin {
222 1.1 martin uint32_t lo, hi;
223 1.1 martin
224 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
225 1.1 martin | (enaddr[3] << 24);
226 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
227 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
228 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
229 1.1 martin }
230 1.1 martin
231 1.1 martin static int
232 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
233 1.1 martin {
234 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
235 1.1 martin uint16_t miiaddr;
236 1.1 martin size_t cnt;
237 1.1 martin int rv = 0;
238 1.1 martin
239 1.1 martin miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
240 1.1 martin | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK);
241 1.1 martin
242 1.1 martin mutex_enter(&sc->sc_mdio_lock);
243 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, miiaddr
244 1.1 martin | GMAC_MII_CLK_150_250M | GMAC_MII_BUSY);
245 1.1 martin
246 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
247 1.1 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR)
248 1.1 martin & GMAC_MII_BUSY)) {
249 1.1 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA);
250 1.1 martin break;
251 1.1 martin }
252 1.1 martin delay(10);
253 1.1 martin }
254 1.1 martin
255 1.1 martin mutex_exit(&sc->sc_mdio_lock);
256 1.1 martin
257 1.1 martin return rv;
258 1.1 martin }
259 1.1 martin
260 1.1 martin static void
261 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
262 1.1 martin {
263 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
264 1.1 martin uint16_t miiaddr;
265 1.1 martin size_t cnt;
266 1.1 martin
267 1.1 martin miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
268 1.1 martin | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK)
269 1.1 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
270 1.1 martin
271 1.1 martin mutex_enter(&sc->sc_mdio_lock);
272 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
273 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, miiaddr);
274 1.1 martin
275 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
276 1.1 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR)
277 1.1 martin & GMAC_MII_BUSY))
278 1.1 martin break;
279 1.1 martin delay(10);
280 1.1 martin }
281 1.1 martin
282 1.1 martin mutex_exit(&sc->sc_mdio_lock);
283 1.1 martin }
284 1.1 martin
285 1.1 martin static int
286 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
287 1.1 martin struct dwc_gmac_rx_ring *ring)
288 1.1 martin {
289 1.1 martin struct dwc_gmac_rx_data *data;
290 1.1 martin bus_addr_t physaddr;
291 1.1 martin const size_t descsize =
292 1.1 martin AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
293 1.1 martin int error, i, next;
294 1.1 martin
295 1.1 martin ring->r_cur = ring->r_next = 0;
296 1.1 martin memset(ring->r_desc, 0, descsize);
297 1.1 martin
298 1.1 martin /*
299 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
300 1.1 martin */
301 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
302 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
303 1.1 martin
304 1.1 martin data = &sc->sc_rxq.r_data[i];
305 1.1 martin
306 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
307 1.1 martin if (data->rd_m == NULL) {
308 1.1 martin aprint_error_dev(sc->sc_dev,
309 1.1 martin "could not allocate rx mbuf #%d\n", i);
310 1.1 martin error = ENOMEM;
311 1.1 martin goto fail;
312 1.1 martin }
313 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
314 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
315 1.1 martin if (error != 0) {
316 1.1 martin aprint_error_dev(sc->sc_dev,
317 1.1 martin "could not create DMA map\n");
318 1.1 martin data->rd_map = NULL;
319 1.1 martin goto fail;
320 1.1 martin }
321 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
322 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
323 1.1 martin aprint_error_dev(sc->sc_dev,
324 1.1 martin "could not allocate mbuf cluster #%d\n", i);
325 1.1 martin error = ENOMEM;
326 1.1 martin goto fail;
327 1.1 martin }
328 1.1 martin
329 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
330 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
331 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
332 1.1 martin if (error != 0) {
333 1.1 martin aprint_error_dev(sc->sc_dev,
334 1.1 martin "could not load rx buf DMA map #%d", i);
335 1.1 martin goto fail;
336 1.1 martin }
337 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
338 1.1 martin
339 1.1 martin desc = &sc->sc_rxq.r_desc[i];
340 1.1 martin desc->ddesc_data = htole32(physaddr);
341 1.1 martin next = i < (AWGE_RX_RING_COUNT-1) ? i+1 : 0;
342 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
343 1.1 martin + next * sizeof(*desc));
344 1.1 martin desc->ddesc_cntl = htole32(
345 1.1 martin (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
346 1.1 martin << DDESC_CNTL_SIZE1SHIFT);
347 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
348 1.1 martin }
349 1.1 martin
350 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
351 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
352 1.1 martin BUS_DMASYNC_PREREAD);
353 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
354 1.1 martin htole32(ring->r_physaddr));
355 1.1 martin
356 1.1 martin return 0;
357 1.1 martin
358 1.1 martin fail:
359 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
360 1.1 martin return error;
361 1.1 martin }
362 1.1 martin
363 1.1 martin static void
364 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
365 1.1 martin struct dwc_gmac_rx_ring *ring)
366 1.1 martin {
367 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
368 1.1 martin int i;
369 1.1 martin
370 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
371 1.1 martin desc = &sc->sc_rxq.r_desc[i];
372 1.1 martin desc->ddesc_cntl = htole32(
373 1.1 martin (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
374 1.1 martin << DDESC_CNTL_SIZE1SHIFT);
375 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
376 1.1 martin }
377 1.1 martin
378 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
379 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
380 1.1 martin BUS_DMASYNC_PREWRITE);
381 1.1 martin
382 1.1 martin ring->r_cur = ring->r_next = 0;
383 1.1 martin }
384 1.1 martin
385 1.1 martin static int
386 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
387 1.1 martin {
388 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
389 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
390 1.1 martin int error, nsegs;
391 1.1 martin void *rings;
392 1.1 martin
393 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
394 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
395 1.1 martin if (error != 0) {
396 1.1 martin aprint_error_dev(sc->sc_dev,
397 1.1 martin "could not create desc DMA map\n");
398 1.1 martin sc->sc_dma_ring_map = NULL;
399 1.1 martin goto fail;
400 1.1 martin }
401 1.1 martin
402 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
403 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
404 1.1 martin if (error != 0) {
405 1.1 martin aprint_error_dev(sc->sc_dev,
406 1.1 martin "could not map DMA memory\n");
407 1.1 martin goto fail;
408 1.1 martin }
409 1.1 martin
410 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
411 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
412 1.1 martin if (error != 0) {
413 1.1 martin aprint_error_dev(sc->sc_dev,
414 1.1 martin "could not allocate DMA memory\n");
415 1.1 martin goto fail;
416 1.1 martin }
417 1.1 martin
418 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
419 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
420 1.1 martin if (error != 0) {
421 1.1 martin aprint_error_dev(sc->sc_dev,
422 1.1 martin "could not load desc DMA map\n");
423 1.1 martin goto fail;
424 1.1 martin }
425 1.1 martin
426 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
427 1.1 martin sc->sc_rxq.r_desc = rings;
428 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
429 1.1 martin
430 1.1 martin /* and next rings to the TX side */
431 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
432 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
433 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
434 1.1 martin
435 1.1 martin return 0;
436 1.1 martin
437 1.1 martin fail:
438 1.1 martin dwc_gmac_free_dma_rings(sc);
439 1.1 martin return error;
440 1.1 martin }
441 1.1 martin
442 1.1 martin static void
443 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
444 1.1 martin {
445 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
446 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
447 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
448 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
449 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
450 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
451 1.1 martin }
452 1.1 martin
453 1.1 martin static void
454 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
455 1.1 martin {
456 1.1 martin struct dwc_gmac_rx_data *data;
457 1.1 martin int i;
458 1.1 martin
459 1.1 martin if (ring->r_desc == NULL)
460 1.1 martin return;
461 1.1 martin
462 1.1 martin
463 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
464 1.1 martin data = &ring->r_data[i];
465 1.1 martin
466 1.1 martin if (data->rd_map != NULL) {
467 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
468 1.1 martin AWGE_RX_RING_COUNT
469 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
470 1.1 martin BUS_DMASYNC_POSTREAD);
471 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
472 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
473 1.1 martin }
474 1.1 martin if (data->rd_m != NULL)
475 1.1 martin m_freem(data->rd_m);
476 1.1 martin }
477 1.1 martin }
478 1.1 martin
479 1.1 martin static int
480 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
481 1.1 martin struct dwc_gmac_tx_ring *ring)
482 1.1 martin {
483 1.1 martin int i, error = 0;
484 1.1 martin
485 1.1 martin ring->t_queued = 0;
486 1.1 martin ring->t_cur = ring->t_next = 0;
487 1.1 martin
488 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
489 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
490 1.1 martin TX_DESC_OFFSET(0),
491 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
492 1.1 martin BUS_DMASYNC_POSTWRITE);
493 1.1 martin
494 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
495 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
496 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
497 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
498 1.1 martin &ring->t_data[i].td_map);
499 1.1 martin if (error != 0) {
500 1.1 martin aprint_error_dev(sc->sc_dev,
501 1.1 martin "could not create TX DMA map #%d\n", i);
502 1.1 martin ring->t_data[i].td_map = NULL;
503 1.1 martin goto fail;
504 1.1 martin }
505 1.1 martin ring->t_desc[i].ddesc_next = htole32(
506 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
507 1.1 martin *((i+1)&AWGE_TX_RING_COUNT));
508 1.1 martin }
509 1.1 martin
510 1.1 martin return 0;
511 1.1 martin
512 1.1 martin fail:
513 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
514 1.1 martin return error;
515 1.1 martin }
516 1.1 martin
517 1.1 martin static void
518 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
519 1.1 martin {
520 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
521 1.1 martin if (end > start) {
522 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
523 1.1 martin TX_DESC_OFFSET(start),
524 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
525 1.1 martin ops);
526 1.1 martin return;
527 1.1 martin }
528 1.1 martin /* sync from 'start' to end of ring */
529 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
530 1.1 martin TX_DESC_OFFSET(start),
531 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
532 1.1 martin ops);
533 1.1 martin /* sync from start of ring to 'end' */
534 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
535 1.1 martin TX_DESC_OFFSET(0),
536 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
537 1.1 martin ops);
538 1.1 martin }
539 1.1 martin
540 1.1 martin static void
541 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
542 1.1 martin struct dwc_gmac_tx_ring *ring)
543 1.1 martin {
544 1.1 martin int i;
545 1.1 martin
546 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
547 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
548 1.1 martin
549 1.1 martin if (data->td_m != NULL) {
550 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
551 1.1 martin 0, data->td_active->dm_mapsize,
552 1.1 martin BUS_DMASYNC_POSTWRITE);
553 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
554 1.1 martin m_freem(data->td_m);
555 1.1 martin data->td_m = NULL;
556 1.1 martin }
557 1.1 martin }
558 1.1 martin
559 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
560 1.1 martin TX_DESC_OFFSET(0),
561 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
562 1.1 martin BUS_DMASYNC_PREWRITE);
563 1.1 martin
564 1.1 martin ring->t_queued = 0;
565 1.1 martin ring->t_cur = ring->t_next = 0;
566 1.1 martin }
567 1.1 martin
568 1.1 martin static void
569 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
570 1.1 martin struct dwc_gmac_tx_ring *ring)
571 1.1 martin {
572 1.1 martin int i;
573 1.1 martin
574 1.1 martin /* unload the maps */
575 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
576 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
577 1.1 martin
578 1.1 martin if (data->td_m != NULL) {
579 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
580 1.1 martin 0, data->td_map->dm_mapsize,
581 1.1 martin BUS_DMASYNC_POSTWRITE);
582 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
583 1.1 martin m_freem(data->td_m);
584 1.1 martin data->td_m = NULL;
585 1.1 martin }
586 1.1 martin }
587 1.1 martin
588 1.1 martin /* and actually free them */
589 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
590 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
591 1.1 martin
592 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
593 1.1 martin }
594 1.1 martin }
595 1.1 martin
596 1.1 martin static void
597 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
598 1.1 martin {
599 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
600 1.1 martin struct mii_data * const mii = &sc->sc_mii;
601 1.1 martin
602 1.1 martin printf("dwc_gmac_miibus_statchg called\n");
603 1.1 martin
604 1.1 martin /*
605 1.1 martin * Set MII or GMII interface based on the speed
606 1.1 martin * negotiated by the PHY.
607 1.1 martin */
608 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
609 1.1 martin case IFM_10_T:
610 1.1 martin case IFM_100_TX:
611 1.1 martin /* XXX */
612 1.1 martin break;
613 1.1 martin case IFM_1000_T:
614 1.1 martin /* XXX */
615 1.1 martin break;
616 1.1 martin }
617 1.1 martin }
618 1.1 martin
619 1.1 martin static int
620 1.1 martin dwc_gmac_init(struct ifnet *ifp)
621 1.1 martin {
622 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
623 1.1 martin
624 1.1 martin if (ifp->if_flags & IFF_RUNNING)
625 1.1 martin return 0;
626 1.1 martin
627 1.1 martin dwc_gmac_stop(ifp, 0);
628 1.1 martin
629 1.1 martin /*
630 1.1 martin * Set up dma pointer for RX ring
631 1.1 martin */
632 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, sc->sc_rxq.r_physaddr);
633 1.1 martin
634 1.1 martin ifp->if_flags |= IFF_RUNNING;
635 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
636 1.1 martin
637 1.1 martin return 0;
638 1.1 martin }
639 1.1 martin
640 1.1 martin static void
641 1.1 martin dwc_gmac_start(struct ifnet *ifp)
642 1.1 martin {
643 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
644 1.1 martin int old = sc->sc_txq.t_queued;
645 1.1 martin struct mbuf *m0;
646 1.1 martin
647 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
648 1.1 martin return;
649 1.1 martin
650 1.1 martin for (;;) {
651 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
652 1.1 martin if (m0 == NULL)
653 1.1 martin break;
654 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
655 1.1 martin ifp->if_flags |= IFF_OACTIVE;
656 1.1 martin break;
657 1.1 martin }
658 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
659 1.1 martin bpf_mtap(ifp, m0);
660 1.1 martin }
661 1.1 martin
662 1.1 martin if (sc->sc_txq.t_queued != old) {
663 1.1 martin /* packets have been queued, kick it off */
664 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
665 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
666 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
667 1.1 martin sc->sc_txq.t_physaddr
668 1.1 martin + old*sizeof(struct dwc_gmac_dev_dmadesc));
669 1.1 martin }
670 1.1 martin }
671 1.1 martin
672 1.1 martin static void
673 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
674 1.1 martin {
675 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
676 1.1 martin
677 1.1 martin mii_down(&sc->sc_mii);
678 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
679 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
680 1.1 martin }
681 1.1 martin
682 1.1 martin /*
683 1.1 martin * Add m0 to the TX ring
684 1.1 martin */
685 1.1 martin static int
686 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
687 1.1 martin {
688 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
689 1.1 martin struct dwc_gmac_tx_data *data = NULL;
690 1.1 martin bus_dmamap_t map;
691 1.1 martin uint32_t status, flags, len;
692 1.1 martin int error, i, first;
693 1.1 martin
694 1.1 martin first = sc->sc_txq.t_cur;
695 1.1 martin map = sc->sc_txq.t_data[first].td_map;
696 1.1 martin flags = 0;
697 1.1 martin
698 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
699 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
700 1.1 martin if (error != 0) {
701 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
702 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
703 1.1 martin return error;
704 1.1 martin }
705 1.1 martin
706 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
707 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
708 1.1 martin return ENOBUFS;
709 1.1 martin }
710 1.1 martin
711 1.1 martin data = NULL;
712 1.1 martin flags = DDESC_STATUS_TXINT|DDESC_STATUS_TXCHAIN;
713 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
714 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
715 1.1 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
716 1.1 martin
717 1.1 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
718 1.1 martin len = (map->dm_segs[i].ds_len & DDESC_CNTL_SIZE1MASK)
719 1.1 martin << DDESC_CNTL_SIZE1SHIFT;
720 1.1 martin desc->ddesc_cntl = htole32(len);
721 1.1 martin status = flags;
722 1.1 martin desc->ddesc_status = htole32(status);
723 1.1 martin sc->sc_txq.t_queued++;
724 1.1 martin
725 1.1 martin /*
726 1.1 martin * Defer passing ownership of the first descriptor
727 1.1 martin * untill we are done.
728 1.1 martin */
729 1.1 martin flags |= DDESC_STATUS_OWNEDBYDEV;
730 1.1 martin
731 1.1 martin sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
732 1.1 martin & (AWGE_TX_RING_COUNT-1);
733 1.1 martin }
734 1.1 martin
735 1.1 martin /* Fixup last */
736 1.1 martin status = flags|DDESC_STATUS_TXLAST;
737 1.1 martin desc->ddesc_status = htole32(status);
738 1.1 martin
739 1.1 martin /* Finalize first */
740 1.1 martin status = flags|DDESC_STATUS_TXFIRST;
741 1.1 martin sc->sc_txq.t_desc[first].ddesc_status = htole32(status);
742 1.1 martin
743 1.1 martin data->td_m = m0;
744 1.1 martin data->td_active = map;
745 1.1 martin
746 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
747 1.1 martin BUS_DMASYNC_PREWRITE);
748 1.1 martin
749 1.1 martin return 0;
750 1.1 martin }
751 1.1 martin
752 1.1 martin static int
753 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
754 1.1 martin {
755 1.1 martin // struct dwc_gmac_softc *sc = ifp->if_softc;
756 1.1 martin struct ifaddr *ifa = (struct ifaddr *)data;
757 1.1 martin int s, error = 0;
758 1.1 martin
759 1.1 martin s = splnet();
760 1.1 martin
761 1.1 martin switch (cmd) {
762 1.1 martin case SIOCINITIFADDR:
763 1.1 martin ifp->if_flags |= IFF_UP;
764 1.1 martin dwc_gmac_init(ifp);
765 1.1 martin switch (ifa->ifa_addr->sa_family) {
766 1.1 martin #ifdef INET
767 1.1 martin case AF_INET:
768 1.1 martin arp_ifinit(ifp, ifa);
769 1.1 martin break;
770 1.1 martin #endif
771 1.1 martin default:
772 1.1 martin break;
773 1.1 martin }
774 1.1 martin default:
775 1.1 martin if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
776 1.1 martin break;
777 1.1 martin error = 0;
778 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
779 1.1 martin ;
780 1.1 martin else if (ifp->if_flags & IFF_RUNNING)
781 1.1 martin /* setmulti */;
782 1.1 martin break;
783 1.1 martin }
784 1.1 martin
785 1.1 martin splx(s);
786 1.1 martin
787 1.1 martin return error;
788 1.1 martin }
789 1.1 martin
790 1.1 martin int
791 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
792 1.1 martin {
793 1.1 martin uint32_t status, dma_status;
794 1.1 martin
795 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
796 1.1 martin if (status & AWIN_GMAC_MII_IRQ)
797 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
798 1.1 martin AWIN_GMAC_MII_STATUS);
799 1.1 martin
800 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
801 1.1 martin AWIN_GMAC_DMA_STATUS);
802 1.1 martin
803 1.1 martin printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
804 1.1 martin status, dma_status);
805 1.1 martin
806 1.1 martin static size_t cnt = 0;
807 1.1 martin if (++cnt > 20)
808 1.1 martin panic("enough now");
809 1.1 martin
810 1.1 martin return 1;
811 1.1 martin }
812