dwc_gmac.c revision 1.5 1 1.1 martin /*-
2 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 1.1 martin * All rights reserved.
4 1.1 martin *
5 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
6 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 1.1 martin *
8 1.1 martin * Redistribution and use in source and binary forms, with or without
9 1.1 martin * modification, are permitted provided that the following conditions
10 1.1 martin * are met:
11 1.1 martin * 1. Redistributions of source code must retain the above copyright
12 1.1 martin * notice, this list of conditions and the following disclaimer.
13 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer in the
15 1.1 martin * documentation and/or other materials provided with the distribution.
16 1.1 martin *
17 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
28 1.1 martin */
29 1.1 martin
30 1.1 martin /*
31 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
32 1.1 martin * on Allwinner A20 cores and others.
33 1.1 martin *
34 1.1 martin * Real documentation seems to not be available, the marketing product
35 1.1 martin * documents could be found here:
36 1.1 martin *
37 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 1.1 martin */
39 1.1 martin
40 1.1 martin #include <sys/cdefs.h>
41 1.1 martin
42 1.5 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.5 2014/09/11 06:56:05 martin Exp $");
43 1.1 martin
44 1.1 martin #include "opt_inet.h"
45 1.1 martin
46 1.1 martin #include <sys/param.h>
47 1.1 martin #include <sys/bus.h>
48 1.1 martin #include <sys/device.h>
49 1.1 martin #include <sys/intr.h>
50 1.1 martin #include <sys/systm.h>
51 1.1 martin #include <sys/sockio.h>
52 1.1 martin
53 1.1 martin #include <net/if.h>
54 1.1 martin #include <net/if_ether.h>
55 1.1 martin #include <net/if_media.h>
56 1.1 martin #include <net/bpf.h>
57 1.1 martin #ifdef INET
58 1.1 martin #include <netinet/if_inarp.h>
59 1.1 martin #endif
60 1.1 martin
61 1.1 martin #include <dev/mii/miivar.h>
62 1.1 martin
63 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
64 1.1 martin #include <dev/ic/dwc_gmac_var.h>
65 1.1 martin
66 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
67 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
68 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
69 1.1 martin
70 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
71 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
72 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
73 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
74 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
75 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
76 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
77 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
79 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
80 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
82 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
83 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
84 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
85 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
86 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
87 1.1 martin
88 1.1 martin
89 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
90 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
91 1.1 martin
92 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
93 1.1 martin
94 1.1 martin void
95 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
96 1.1 martin {
97 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
98 1.1 martin uint32_t maclo, machi;
99 1.1 martin struct mii_data * const mii = &sc->sc_mii;
100 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
101 1.5 martin prop_dictionary_t dict;
102 1.1 martin
103 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
104 1.3 martin sc->sc_mii_clk = mii_clk & 7;
105 1.1 martin
106 1.5 martin dict = device_properties(sc->sc_dev);
107 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
108 1.5 martin if (ea != NULL) {
109 1.5 martin /*
110 1.5 martin * If the MAC address is overriden by a device property,
111 1.5 martin * use that.
112 1.5 martin */
113 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
114 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
115 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
116 1.5 martin } else {
117 1.5 martin /*
118 1.5 martin * If we did not get an externaly configure address,
119 1.5 martin * try to read one from the current filter setup,
120 1.5 martin * before resetting the chip.
121 1.5 martin */
122 1.1 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
123 1.1 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
124 1.1 martin enaddr[0] = maclo & 0x0ff;
125 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
126 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
127 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
128 1.1 martin enaddr[4] = machi & 0x0ff;
129 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
130 1.1 martin }
131 1.1 martin
132 1.1 martin /*
133 1.1 martin * Init chip and do intial setup
134 1.1 martin */
135 1.1 martin if (dwc_gmac_reset(sc) != 0)
136 1.1 martin return; /* not much to cleanup, haven't attached yet */
137 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
138 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
139 1.1 martin ether_sprintf(enaddr));
140 1.1 martin
141 1.1 martin /*
142 1.1 martin * Allocate Tx and Rx rings
143 1.1 martin */
144 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
145 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
146 1.1 martin goto fail;
147 1.1 martin }
148 1.1 martin
149 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
150 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
151 1.1 martin goto fail;
152 1.1 martin }
153 1.1 martin
154 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
155 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
156 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
157 1.1 martin goto fail;
158 1.1 martin }
159 1.1 martin
160 1.1 martin /*
161 1.1 martin * Prepare interface data
162 1.1 martin */
163 1.1 martin ifp->if_softc = sc;
164 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
165 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
166 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
167 1.1 martin ifp->if_start = dwc_gmac_start;
168 1.1 martin ifp->if_init = dwc_gmac_init;
169 1.1 martin ifp->if_stop = dwc_gmac_stop;
170 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
171 1.1 martin IFQ_SET_READY(&ifp->if_snd);
172 1.1 martin
173 1.1 martin /*
174 1.1 martin * Attach MII subdevices
175 1.1 martin */
176 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
177 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
178 1.1 martin mii->mii_ifp = ifp;
179 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
180 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
181 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
182 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
183 1.1 martin
184 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
185 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
186 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
187 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
188 1.1 martin } else {
189 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
190 1.1 martin }
191 1.1 martin
192 1.1 martin /*
193 1.1 martin * Ready, attach interface
194 1.1 martin */
195 1.1 martin if_attach(ifp);
196 1.1 martin ether_ifattach(ifp, enaddr);
197 1.1 martin
198 1.1 martin /*
199 1.1 martin * Enable interrupts
200 1.1 martin */
201 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
202 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
203 1.1 martin
204 1.1 martin return;
205 1.1 martin
206 1.1 martin fail:
207 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
208 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
209 1.1 martin }
210 1.1 martin
211 1.1 martin
212 1.1 martin
213 1.1 martin static int
214 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
215 1.1 martin {
216 1.1 martin size_t cnt;
217 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
218 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
219 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
220 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
221 1.1 martin & GMAC_BUSMODE_RESET) == 0)
222 1.1 martin return 0;
223 1.1 martin delay(10);
224 1.1 martin }
225 1.1 martin
226 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
227 1.1 martin return EIO;
228 1.1 martin }
229 1.1 martin
230 1.1 martin static void
231 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
232 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
233 1.1 martin {
234 1.1 martin uint32_t lo, hi;
235 1.1 martin
236 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
237 1.1 martin | (enaddr[3] << 24);
238 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
239 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
240 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
241 1.1 martin }
242 1.1 martin
243 1.1 martin static int
244 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
245 1.1 martin {
246 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
247 1.1 martin uint16_t miiaddr;
248 1.1 martin size_t cnt;
249 1.1 martin int rv = 0;
250 1.1 martin
251 1.1 martin miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
252 1.1 martin | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK);
253 1.1 martin
254 1.1 martin mutex_enter(&sc->sc_mdio_lock);
255 1.3 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR,
256 1.3 martin miiaddr | GMAC_MII_BUSY | (sc->sc_mii_clk << 2));
257 1.1 martin
258 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
259 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
260 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
261 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
262 1.3 martin AWIN_GMAC_MAC_MIIDATA);
263 1.1 martin break;
264 1.1 martin }
265 1.1 martin delay(10);
266 1.1 martin }
267 1.1 martin
268 1.1 martin mutex_exit(&sc->sc_mdio_lock);
269 1.1 martin
270 1.1 martin return rv;
271 1.1 martin }
272 1.1 martin
273 1.1 martin static void
274 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
275 1.1 martin {
276 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
277 1.1 martin uint16_t miiaddr;
278 1.1 martin size_t cnt;
279 1.1 martin
280 1.1 martin miiaddr = ((phy << GMAC_MII_PHY_SHIFT) & GMAC_MII_PHY_MASK)
281 1.1 martin | ((reg << GMAC_MII_REG_SHIFT) & GMAC_MII_REG_MASK)
282 1.3 martin | GMAC_MII_BUSY | GMAC_MII_WRITE | (sc->sc_mii_clk << 2);
283 1.1 martin
284 1.1 martin mutex_enter(&sc->sc_mdio_lock);
285 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
286 1.3 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR,
287 1.3 martin miiaddr);
288 1.1 martin
289 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
290 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
291 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
292 1.1 martin break;
293 1.1 martin delay(10);
294 1.1 martin }
295 1.1 martin
296 1.1 martin mutex_exit(&sc->sc_mdio_lock);
297 1.1 martin }
298 1.1 martin
299 1.1 martin static int
300 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
301 1.1 martin struct dwc_gmac_rx_ring *ring)
302 1.1 martin {
303 1.1 martin struct dwc_gmac_rx_data *data;
304 1.1 martin bus_addr_t physaddr;
305 1.1 martin const size_t descsize =
306 1.1 martin AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
307 1.1 martin int error, i, next;
308 1.1 martin
309 1.1 martin ring->r_cur = ring->r_next = 0;
310 1.1 martin memset(ring->r_desc, 0, descsize);
311 1.1 martin
312 1.1 martin /*
313 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
314 1.1 martin */
315 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
316 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
317 1.1 martin
318 1.1 martin data = &sc->sc_rxq.r_data[i];
319 1.1 martin
320 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
321 1.1 martin if (data->rd_m == NULL) {
322 1.1 martin aprint_error_dev(sc->sc_dev,
323 1.1 martin "could not allocate rx mbuf #%d\n", i);
324 1.1 martin error = ENOMEM;
325 1.1 martin goto fail;
326 1.1 martin }
327 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
328 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
329 1.1 martin if (error != 0) {
330 1.1 martin aprint_error_dev(sc->sc_dev,
331 1.1 martin "could not create DMA map\n");
332 1.1 martin data->rd_map = NULL;
333 1.1 martin goto fail;
334 1.1 martin }
335 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
336 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
337 1.1 martin aprint_error_dev(sc->sc_dev,
338 1.1 martin "could not allocate mbuf cluster #%d\n", i);
339 1.1 martin error = ENOMEM;
340 1.1 martin goto fail;
341 1.1 martin }
342 1.1 martin
343 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
344 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
345 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
346 1.1 martin if (error != 0) {
347 1.1 martin aprint_error_dev(sc->sc_dev,
348 1.1 martin "could not load rx buf DMA map #%d", i);
349 1.1 martin goto fail;
350 1.1 martin }
351 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
352 1.1 martin
353 1.1 martin desc = &sc->sc_rxq.r_desc[i];
354 1.1 martin desc->ddesc_data = htole32(physaddr);
355 1.1 martin next = i < (AWGE_RX_RING_COUNT-1) ? i+1 : 0;
356 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
357 1.1 martin + next * sizeof(*desc));
358 1.1 martin desc->ddesc_cntl = htole32(
359 1.1 martin (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
360 1.1 martin << DDESC_CNTL_SIZE1SHIFT);
361 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
362 1.1 martin }
363 1.1 martin
364 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
365 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
366 1.1 martin BUS_DMASYNC_PREREAD);
367 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
368 1.1 martin htole32(ring->r_physaddr));
369 1.1 martin
370 1.1 martin return 0;
371 1.1 martin
372 1.1 martin fail:
373 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
374 1.1 martin return error;
375 1.1 martin }
376 1.1 martin
377 1.1 martin static void
378 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
379 1.1 martin struct dwc_gmac_rx_ring *ring)
380 1.1 martin {
381 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
382 1.1 martin int i;
383 1.1 martin
384 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
385 1.1 martin desc = &sc->sc_rxq.r_desc[i];
386 1.1 martin desc->ddesc_cntl = htole32(
387 1.1 martin (AWGE_MAX_PACKET & DDESC_CNTL_SIZE1MASK)
388 1.1 martin << DDESC_CNTL_SIZE1SHIFT);
389 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
390 1.1 martin }
391 1.1 martin
392 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
393 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
394 1.1 martin BUS_DMASYNC_PREWRITE);
395 1.1 martin
396 1.1 martin ring->r_cur = ring->r_next = 0;
397 1.1 martin }
398 1.1 martin
399 1.1 martin static int
400 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
401 1.1 martin {
402 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
403 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
404 1.1 martin int error, nsegs;
405 1.1 martin void *rings;
406 1.1 martin
407 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
408 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
409 1.1 martin if (error != 0) {
410 1.1 martin aprint_error_dev(sc->sc_dev,
411 1.1 martin "could not create desc DMA map\n");
412 1.1 martin sc->sc_dma_ring_map = NULL;
413 1.1 martin goto fail;
414 1.1 martin }
415 1.1 martin
416 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
417 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
418 1.1 martin if (error != 0) {
419 1.1 martin aprint_error_dev(sc->sc_dev,
420 1.1 martin "could not map DMA memory\n");
421 1.1 martin goto fail;
422 1.1 martin }
423 1.1 martin
424 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
425 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
426 1.1 martin if (error != 0) {
427 1.1 martin aprint_error_dev(sc->sc_dev,
428 1.1 martin "could not allocate DMA memory\n");
429 1.1 martin goto fail;
430 1.1 martin }
431 1.1 martin
432 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
433 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
434 1.1 martin if (error != 0) {
435 1.1 martin aprint_error_dev(sc->sc_dev,
436 1.1 martin "could not load desc DMA map\n");
437 1.1 martin goto fail;
438 1.1 martin }
439 1.1 martin
440 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
441 1.1 martin sc->sc_rxq.r_desc = rings;
442 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
443 1.1 martin
444 1.1 martin /* and next rings to the TX side */
445 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
446 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
447 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
448 1.1 martin
449 1.1 martin return 0;
450 1.1 martin
451 1.1 martin fail:
452 1.1 martin dwc_gmac_free_dma_rings(sc);
453 1.1 martin return error;
454 1.1 martin }
455 1.1 martin
456 1.1 martin static void
457 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
458 1.1 martin {
459 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
460 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
461 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
462 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
463 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
464 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
465 1.1 martin }
466 1.1 martin
467 1.1 martin static void
468 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
469 1.1 martin {
470 1.1 martin struct dwc_gmac_rx_data *data;
471 1.1 martin int i;
472 1.1 martin
473 1.1 martin if (ring->r_desc == NULL)
474 1.1 martin return;
475 1.1 martin
476 1.1 martin
477 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
478 1.1 martin data = &ring->r_data[i];
479 1.1 martin
480 1.1 martin if (data->rd_map != NULL) {
481 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
482 1.1 martin AWGE_RX_RING_COUNT
483 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
484 1.1 martin BUS_DMASYNC_POSTREAD);
485 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
486 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
487 1.1 martin }
488 1.1 martin if (data->rd_m != NULL)
489 1.1 martin m_freem(data->rd_m);
490 1.1 martin }
491 1.1 martin }
492 1.1 martin
493 1.1 martin static int
494 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
495 1.1 martin struct dwc_gmac_tx_ring *ring)
496 1.1 martin {
497 1.1 martin int i, error = 0;
498 1.1 martin
499 1.1 martin ring->t_queued = 0;
500 1.1 martin ring->t_cur = ring->t_next = 0;
501 1.1 martin
502 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
503 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
504 1.1 martin TX_DESC_OFFSET(0),
505 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
506 1.1 martin BUS_DMASYNC_POSTWRITE);
507 1.1 martin
508 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
509 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
510 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
511 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
512 1.1 martin &ring->t_data[i].td_map);
513 1.1 martin if (error != 0) {
514 1.1 martin aprint_error_dev(sc->sc_dev,
515 1.1 martin "could not create TX DMA map #%d\n", i);
516 1.1 martin ring->t_data[i].td_map = NULL;
517 1.1 martin goto fail;
518 1.1 martin }
519 1.1 martin ring->t_desc[i].ddesc_next = htole32(
520 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
521 1.1 martin *((i+1)&AWGE_TX_RING_COUNT));
522 1.1 martin }
523 1.1 martin
524 1.1 martin return 0;
525 1.1 martin
526 1.1 martin fail:
527 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
528 1.1 martin return error;
529 1.1 martin }
530 1.1 martin
531 1.1 martin static void
532 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
533 1.1 martin {
534 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
535 1.1 martin if (end > start) {
536 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
537 1.1 martin TX_DESC_OFFSET(start),
538 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
539 1.1 martin ops);
540 1.1 martin return;
541 1.1 martin }
542 1.1 martin /* sync from 'start' to end of ring */
543 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
544 1.1 martin TX_DESC_OFFSET(start),
545 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
546 1.1 martin ops);
547 1.1 martin /* sync from start of ring to 'end' */
548 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
549 1.1 martin TX_DESC_OFFSET(0),
550 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
551 1.1 martin ops);
552 1.1 martin }
553 1.1 martin
554 1.1 martin static void
555 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
556 1.1 martin struct dwc_gmac_tx_ring *ring)
557 1.1 martin {
558 1.1 martin int i;
559 1.1 martin
560 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
561 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
562 1.1 martin
563 1.1 martin if (data->td_m != NULL) {
564 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
565 1.1 martin 0, data->td_active->dm_mapsize,
566 1.1 martin BUS_DMASYNC_POSTWRITE);
567 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
568 1.1 martin m_freem(data->td_m);
569 1.1 martin data->td_m = NULL;
570 1.1 martin }
571 1.1 martin }
572 1.1 martin
573 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
574 1.1 martin TX_DESC_OFFSET(0),
575 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
576 1.1 martin BUS_DMASYNC_PREWRITE);
577 1.1 martin
578 1.1 martin ring->t_queued = 0;
579 1.1 martin ring->t_cur = ring->t_next = 0;
580 1.1 martin }
581 1.1 martin
582 1.1 martin static void
583 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
584 1.1 martin struct dwc_gmac_tx_ring *ring)
585 1.1 martin {
586 1.1 martin int i;
587 1.1 martin
588 1.1 martin /* unload the maps */
589 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
590 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
591 1.1 martin
592 1.1 martin if (data->td_m != NULL) {
593 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
594 1.1 martin 0, data->td_map->dm_mapsize,
595 1.1 martin BUS_DMASYNC_POSTWRITE);
596 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
597 1.1 martin m_freem(data->td_m);
598 1.1 martin data->td_m = NULL;
599 1.1 martin }
600 1.1 martin }
601 1.1 martin
602 1.1 martin /* and actually free them */
603 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
604 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
605 1.1 martin
606 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
607 1.1 martin }
608 1.1 martin }
609 1.1 martin
610 1.1 martin static void
611 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
612 1.1 martin {
613 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
614 1.1 martin struct mii_data * const mii = &sc->sc_mii;
615 1.1 martin
616 1.1 martin /*
617 1.1 martin * Set MII or GMII interface based on the speed
618 1.1 martin * negotiated by the PHY.
619 1.1 martin */
620 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
621 1.1 martin case IFM_10_T:
622 1.1 martin case IFM_100_TX:
623 1.1 martin /* XXX */
624 1.1 martin break;
625 1.1 martin case IFM_1000_T:
626 1.1 martin /* XXX */
627 1.1 martin break;
628 1.1 martin }
629 1.1 martin }
630 1.1 martin
631 1.1 martin static int
632 1.1 martin dwc_gmac_init(struct ifnet *ifp)
633 1.1 martin {
634 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
635 1.1 martin
636 1.1 martin if (ifp->if_flags & IFF_RUNNING)
637 1.1 martin return 0;
638 1.1 martin
639 1.1 martin dwc_gmac_stop(ifp, 0);
640 1.1 martin
641 1.1 martin /*
642 1.1 martin * Set up dma pointer for RX ring
643 1.1 martin */
644 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR, sc->sc_rxq.r_physaddr);
645 1.1 martin
646 1.1 martin ifp->if_flags |= IFF_RUNNING;
647 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
648 1.1 martin
649 1.1 martin return 0;
650 1.1 martin }
651 1.1 martin
652 1.1 martin static void
653 1.1 martin dwc_gmac_start(struct ifnet *ifp)
654 1.1 martin {
655 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
656 1.1 martin int old = sc->sc_txq.t_queued;
657 1.1 martin struct mbuf *m0;
658 1.1 martin
659 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
660 1.1 martin return;
661 1.1 martin
662 1.1 martin for (;;) {
663 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
664 1.1 martin if (m0 == NULL)
665 1.1 martin break;
666 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
667 1.1 martin ifp->if_flags |= IFF_OACTIVE;
668 1.1 martin break;
669 1.1 martin }
670 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
671 1.1 martin bpf_mtap(ifp, m0);
672 1.1 martin }
673 1.1 martin
674 1.1 martin if (sc->sc_txq.t_queued != old) {
675 1.1 martin /* packets have been queued, kick it off */
676 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
677 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
678 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
679 1.1 martin sc->sc_txq.t_physaddr
680 1.1 martin + old*sizeof(struct dwc_gmac_dev_dmadesc));
681 1.1 martin }
682 1.1 martin }
683 1.1 martin
684 1.1 martin static void
685 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
686 1.1 martin {
687 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
688 1.1 martin
689 1.1 martin mii_down(&sc->sc_mii);
690 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
691 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
692 1.1 martin }
693 1.1 martin
694 1.1 martin /*
695 1.1 martin * Add m0 to the TX ring
696 1.1 martin */
697 1.1 martin static int
698 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
699 1.1 martin {
700 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
701 1.1 martin struct dwc_gmac_tx_data *data = NULL;
702 1.1 martin bus_dmamap_t map;
703 1.1 martin uint32_t status, flags, len;
704 1.1 martin int error, i, first;
705 1.1 martin
706 1.1 martin first = sc->sc_txq.t_cur;
707 1.1 martin map = sc->sc_txq.t_data[first].td_map;
708 1.1 martin flags = 0;
709 1.1 martin
710 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
711 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
712 1.1 martin if (error != 0) {
713 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
714 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
715 1.1 martin return error;
716 1.1 martin }
717 1.1 martin
718 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
719 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
720 1.1 martin return ENOBUFS;
721 1.1 martin }
722 1.1 martin
723 1.1 martin data = NULL;
724 1.1 martin flags = DDESC_STATUS_TXINT|DDESC_STATUS_TXCHAIN;
725 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
726 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
727 1.1 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
728 1.1 martin
729 1.1 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
730 1.1 martin len = (map->dm_segs[i].ds_len & DDESC_CNTL_SIZE1MASK)
731 1.1 martin << DDESC_CNTL_SIZE1SHIFT;
732 1.1 martin desc->ddesc_cntl = htole32(len);
733 1.1 martin status = flags;
734 1.1 martin desc->ddesc_status = htole32(status);
735 1.1 martin sc->sc_txq.t_queued++;
736 1.1 martin
737 1.1 martin /*
738 1.1 martin * Defer passing ownership of the first descriptor
739 1.1 martin * untill we are done.
740 1.1 martin */
741 1.1 martin flags |= DDESC_STATUS_OWNEDBYDEV;
742 1.1 martin
743 1.1 martin sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
744 1.1 martin & (AWGE_TX_RING_COUNT-1);
745 1.1 martin }
746 1.1 martin
747 1.1 martin /* Fixup last */
748 1.1 martin status = flags|DDESC_STATUS_TXLAST;
749 1.1 martin desc->ddesc_status = htole32(status);
750 1.1 martin
751 1.1 martin /* Finalize first */
752 1.1 martin status = flags|DDESC_STATUS_TXFIRST;
753 1.1 martin sc->sc_txq.t_desc[first].ddesc_status = htole32(status);
754 1.1 martin
755 1.1 martin data->td_m = m0;
756 1.1 martin data->td_active = map;
757 1.1 martin
758 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
759 1.1 martin BUS_DMASYNC_PREWRITE);
760 1.1 martin
761 1.1 martin return 0;
762 1.1 martin }
763 1.1 martin
764 1.1 martin static int
765 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
766 1.1 martin {
767 1.1 martin // struct dwc_gmac_softc *sc = ifp->if_softc;
768 1.1 martin struct ifaddr *ifa = (struct ifaddr *)data;
769 1.1 martin int s, error = 0;
770 1.1 martin
771 1.1 martin s = splnet();
772 1.1 martin
773 1.1 martin switch (cmd) {
774 1.1 martin case SIOCINITIFADDR:
775 1.1 martin ifp->if_flags |= IFF_UP;
776 1.1 martin dwc_gmac_init(ifp);
777 1.1 martin switch (ifa->ifa_addr->sa_family) {
778 1.1 martin #ifdef INET
779 1.1 martin case AF_INET:
780 1.1 martin arp_ifinit(ifp, ifa);
781 1.1 martin break;
782 1.1 martin #endif
783 1.1 martin default:
784 1.1 martin break;
785 1.1 martin }
786 1.1 martin default:
787 1.1 martin if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
788 1.1 martin break;
789 1.1 martin error = 0;
790 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
791 1.1 martin ;
792 1.1 martin else if (ifp->if_flags & IFF_RUNNING)
793 1.1 martin /* setmulti */;
794 1.1 martin break;
795 1.1 martin }
796 1.1 martin
797 1.1 martin splx(s);
798 1.1 martin
799 1.1 martin return error;
800 1.1 martin }
801 1.1 martin
802 1.1 martin int
803 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
804 1.1 martin {
805 1.1 martin uint32_t status, dma_status;
806 1.1 martin
807 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
808 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
809 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
810 1.1 martin AWIN_GMAC_MII_STATUS);
811 1.2 martin mii_pollstat(&sc->sc_mii);
812 1.2 martin }
813 1.1 martin
814 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
815 1.1 martin AWIN_GMAC_DMA_STATUS);
816 1.1 martin
817 1.1 martin printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
818 1.1 martin status, dma_status);
819 1.1 martin
820 1.1 martin static size_t cnt = 0;
821 1.1 martin if (++cnt > 20)
822 1.1 martin panic("enough now");
823 1.1 martin
824 1.1 martin return 1;
825 1.1 martin }
826