dwc_gmac.c revision 1.9 1 1.1 martin /*-
2 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 1.1 martin * All rights reserved.
4 1.1 martin *
5 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
6 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 1.1 martin *
8 1.1 martin * Redistribution and use in source and binary forms, with or without
9 1.1 martin * modification, are permitted provided that the following conditions
10 1.1 martin * are met:
11 1.1 martin * 1. Redistributions of source code must retain the above copyright
12 1.1 martin * notice, this list of conditions and the following disclaimer.
13 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer in the
15 1.1 martin * documentation and/or other materials provided with the distribution.
16 1.1 martin *
17 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
28 1.1 martin */
29 1.1 martin
30 1.1 martin /*
31 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
32 1.1 martin * on Allwinner A20 cores and others.
33 1.1 martin *
34 1.1 martin * Real documentation seems to not be available, the marketing product
35 1.1 martin * documents could be found here:
36 1.1 martin *
37 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 1.1 martin */
39 1.1 martin
40 1.1 martin #include <sys/cdefs.h>
41 1.1 martin
42 1.9 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.9 2014/10/13 08:24:52 martin Exp $");
43 1.7 martin
44 1.7 martin /* #define DWC_GMAC_DEBUG 1 */
45 1.1 martin
46 1.1 martin #include "opt_inet.h"
47 1.1 martin
48 1.1 martin #include <sys/param.h>
49 1.1 martin #include <sys/bus.h>
50 1.1 martin #include <sys/device.h>
51 1.1 martin #include <sys/intr.h>
52 1.1 martin #include <sys/systm.h>
53 1.1 martin #include <sys/sockio.h>
54 1.1 martin
55 1.1 martin #include <net/if.h>
56 1.1 martin #include <net/if_ether.h>
57 1.1 martin #include <net/if_media.h>
58 1.1 martin #include <net/bpf.h>
59 1.1 martin #ifdef INET
60 1.1 martin #include <netinet/if_inarp.h>
61 1.1 martin #endif
62 1.1 martin
63 1.1 martin #include <dev/mii/miivar.h>
64 1.1 martin
65 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
66 1.1 martin #include <dev/ic/dwc_gmac_var.h>
67 1.1 martin
68 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
71 1.1 martin
72 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
75 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
85 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
87 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 1.8 martin static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
90 1.8 martin static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
91 1.1 martin
92 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
93 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
94 1.8 martin #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
95 1.1 martin
96 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
97 1.8 martin #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
98 1.8 martin
99 1.8 martin
100 1.8 martin
101 1.8 martin #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
102 1.8 martin GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
103 1.8 martin GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
104 1.8 martin
105 1.8 martin #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
106 1.8 martin GMAC_DMA_INT_FBE|GMAC_DMA_INT_ETE| \
107 1.8 martin GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
108 1.8 martin GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
109 1.8 martin GMAC_DMA_INT_TJE|GMAC_DMA_INT_TUE)
110 1.8 martin
111 1.8 martin #define AWIN_DEF_MAC_INTRMASK \
112 1.8 martin (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
113 1.8 martin AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
114 1.1 martin
115 1.7 martin
116 1.7 martin #ifdef DWC_GMAC_DEBUG
117 1.7 martin static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
118 1.7 martin static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
119 1.8 martin static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
120 1.7 martin #endif
121 1.7 martin
122 1.1 martin void
123 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
124 1.1 martin {
125 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
126 1.1 martin uint32_t maclo, machi;
127 1.1 martin struct mii_data * const mii = &sc->sc_mii;
128 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
129 1.5 martin prop_dictionary_t dict;
130 1.1 martin
131 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
132 1.3 martin sc->sc_mii_clk = mii_clk & 7;
133 1.1 martin
134 1.5 martin dict = device_properties(sc->sc_dev);
135 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
136 1.5 martin if (ea != NULL) {
137 1.5 martin /*
138 1.5 martin * If the MAC address is overriden by a device property,
139 1.5 martin * use that.
140 1.5 martin */
141 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
142 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
143 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
144 1.5 martin } else {
145 1.5 martin /*
146 1.5 martin * If we did not get an externaly configure address,
147 1.5 martin * try to read one from the current filter setup,
148 1.5 martin * before resetting the chip.
149 1.5 martin */
150 1.8 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
151 1.8 martin AWIN_GMAC_MAC_ADDR0LO);
152 1.8 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
153 1.8 martin AWIN_GMAC_MAC_ADDR0HI);
154 1.1 martin enaddr[0] = maclo & 0x0ff;
155 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
156 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
157 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
158 1.1 martin enaddr[4] = machi & 0x0ff;
159 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
160 1.1 martin }
161 1.1 martin
162 1.1 martin /*
163 1.1 martin * Init chip and do intial setup
164 1.1 martin */
165 1.1 martin if (dwc_gmac_reset(sc) != 0)
166 1.1 martin return; /* not much to cleanup, haven't attached yet */
167 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
168 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
169 1.1 martin ether_sprintf(enaddr));
170 1.1 martin
171 1.1 martin /*
172 1.1 martin * Allocate Tx and Rx rings
173 1.1 martin */
174 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
175 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
176 1.1 martin goto fail;
177 1.1 martin }
178 1.1 martin
179 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
180 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
181 1.1 martin goto fail;
182 1.1 martin }
183 1.1 martin
184 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
185 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
186 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
187 1.1 martin goto fail;
188 1.1 martin }
189 1.1 martin
190 1.1 martin /*
191 1.1 martin * Prepare interface data
192 1.1 martin */
193 1.1 martin ifp->if_softc = sc;
194 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
195 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
196 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
197 1.1 martin ifp->if_start = dwc_gmac_start;
198 1.1 martin ifp->if_init = dwc_gmac_init;
199 1.1 martin ifp->if_stop = dwc_gmac_stop;
200 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
201 1.1 martin IFQ_SET_READY(&ifp->if_snd);
202 1.1 martin
203 1.1 martin /*
204 1.1 martin * Attach MII subdevices
205 1.1 martin */
206 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
207 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
208 1.1 martin mii->mii_ifp = ifp;
209 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
210 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
211 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
212 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
213 1.1 martin
214 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
215 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
216 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
217 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
218 1.1 martin } else {
219 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
220 1.1 martin }
221 1.1 martin
222 1.1 martin /*
223 1.1 martin * Ready, attach interface
224 1.1 martin */
225 1.1 martin if_attach(ifp);
226 1.1 martin ether_ifattach(ifp, enaddr);
227 1.1 martin
228 1.1 martin /*
229 1.1 martin * Enable interrupts
230 1.1 martin */
231 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
232 1.8 martin AWIN_DEF_MAC_INTRMASK);
233 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
234 1.8 martin GMAC_DEF_DMA_INT_MASK);
235 1.1 martin
236 1.1 martin return;
237 1.1 martin
238 1.1 martin fail:
239 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
240 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
241 1.1 martin }
242 1.1 martin
243 1.1 martin
244 1.1 martin
245 1.1 martin static int
246 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
247 1.1 martin {
248 1.1 martin size_t cnt;
249 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
250 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
251 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
252 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
253 1.1 martin & GMAC_BUSMODE_RESET) == 0)
254 1.1 martin return 0;
255 1.1 martin delay(10);
256 1.1 martin }
257 1.1 martin
258 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
259 1.1 martin return EIO;
260 1.1 martin }
261 1.1 martin
262 1.1 martin static void
263 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
264 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
265 1.1 martin {
266 1.1 martin uint32_t lo, hi;
267 1.1 martin
268 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
269 1.1 martin | (enaddr[3] << 24);
270 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
271 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
272 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
273 1.1 martin }
274 1.1 martin
275 1.1 martin static int
276 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
277 1.1 martin {
278 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
279 1.6 martin uint16_t mii;
280 1.1 martin size_t cnt;
281 1.1 martin int rv = 0;
282 1.1 martin
283 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
284 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
285 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
286 1.6 martin | GMAC_MII_BUSY;
287 1.1 martin
288 1.1 martin mutex_enter(&sc->sc_mdio_lock);
289 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
290 1.1 martin
291 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
292 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
293 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
294 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
295 1.3 martin AWIN_GMAC_MAC_MIIDATA);
296 1.1 martin break;
297 1.1 martin }
298 1.1 martin delay(10);
299 1.1 martin }
300 1.1 martin
301 1.1 martin mutex_exit(&sc->sc_mdio_lock);
302 1.1 martin
303 1.1 martin return rv;
304 1.1 martin }
305 1.1 martin
306 1.1 martin static void
307 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
308 1.1 martin {
309 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
310 1.6 martin uint16_t mii;
311 1.1 martin size_t cnt;
312 1.1 martin
313 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
314 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
315 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
316 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
317 1.1 martin
318 1.1 martin mutex_enter(&sc->sc_mdio_lock);
319 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
320 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
321 1.1 martin
322 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
323 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
324 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
325 1.1 martin break;
326 1.1 martin delay(10);
327 1.1 martin }
328 1.1 martin
329 1.1 martin mutex_exit(&sc->sc_mdio_lock);
330 1.1 martin }
331 1.1 martin
332 1.1 martin static int
333 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
334 1.1 martin struct dwc_gmac_rx_ring *ring)
335 1.1 martin {
336 1.1 martin struct dwc_gmac_rx_data *data;
337 1.1 martin bus_addr_t physaddr;
338 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
339 1.1 martin int error, i, next;
340 1.1 martin
341 1.1 martin ring->r_cur = ring->r_next = 0;
342 1.1 martin memset(ring->r_desc, 0, descsize);
343 1.1 martin
344 1.1 martin /*
345 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
346 1.1 martin */
347 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
348 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
349 1.1 martin
350 1.1 martin data = &sc->sc_rxq.r_data[i];
351 1.1 martin
352 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
353 1.1 martin if (data->rd_m == NULL) {
354 1.1 martin aprint_error_dev(sc->sc_dev,
355 1.1 martin "could not allocate rx mbuf #%d\n", i);
356 1.1 martin error = ENOMEM;
357 1.1 martin goto fail;
358 1.1 martin }
359 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
360 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
361 1.1 martin if (error != 0) {
362 1.1 martin aprint_error_dev(sc->sc_dev,
363 1.1 martin "could not create DMA map\n");
364 1.1 martin data->rd_map = NULL;
365 1.1 martin goto fail;
366 1.1 martin }
367 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
368 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
369 1.1 martin aprint_error_dev(sc->sc_dev,
370 1.1 martin "could not allocate mbuf cluster #%d\n", i);
371 1.1 martin error = ENOMEM;
372 1.1 martin goto fail;
373 1.1 martin }
374 1.1 martin
375 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
376 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
377 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
378 1.1 martin if (error != 0) {
379 1.1 martin aprint_error_dev(sc->sc_dev,
380 1.1 martin "could not load rx buf DMA map #%d", i);
381 1.1 martin goto fail;
382 1.1 martin }
383 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
384 1.1 martin
385 1.1 martin desc = &sc->sc_rxq.r_desc[i];
386 1.1 martin desc->ddesc_data = htole32(physaddr);
387 1.8 martin next = RX_NEXT(i);
388 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
389 1.1 martin + next * sizeof(*desc));
390 1.1 martin desc->ddesc_cntl = htole32(
391 1.6 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
392 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
393 1.1 martin }
394 1.1 martin
395 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
396 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
397 1.1 martin BUS_DMASYNC_PREREAD);
398 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
399 1.6 martin ring->r_physaddr);
400 1.1 martin
401 1.1 martin return 0;
402 1.1 martin
403 1.1 martin fail:
404 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
405 1.1 martin return error;
406 1.1 martin }
407 1.1 martin
408 1.1 martin static void
409 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
410 1.1 martin struct dwc_gmac_rx_ring *ring)
411 1.1 martin {
412 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
413 1.1 martin int i;
414 1.1 martin
415 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
416 1.1 martin desc = &sc->sc_rxq.r_desc[i];
417 1.1 martin desc->ddesc_cntl = htole32(
418 1.6 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
419 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
420 1.1 martin }
421 1.1 martin
422 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
423 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
424 1.1 martin BUS_DMASYNC_PREWRITE);
425 1.1 martin
426 1.1 martin ring->r_cur = ring->r_next = 0;
427 1.1 martin }
428 1.1 martin
429 1.1 martin static int
430 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
431 1.1 martin {
432 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
433 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
434 1.1 martin int error, nsegs;
435 1.1 martin void *rings;
436 1.1 martin
437 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
438 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
439 1.1 martin if (error != 0) {
440 1.1 martin aprint_error_dev(sc->sc_dev,
441 1.1 martin "could not create desc DMA map\n");
442 1.1 martin sc->sc_dma_ring_map = NULL;
443 1.1 martin goto fail;
444 1.1 martin }
445 1.1 martin
446 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
447 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
448 1.1 martin if (error != 0) {
449 1.1 martin aprint_error_dev(sc->sc_dev,
450 1.1 martin "could not map DMA memory\n");
451 1.1 martin goto fail;
452 1.1 martin }
453 1.1 martin
454 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
455 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
456 1.1 martin if (error != 0) {
457 1.1 martin aprint_error_dev(sc->sc_dev,
458 1.1 martin "could not allocate DMA memory\n");
459 1.1 martin goto fail;
460 1.1 martin }
461 1.1 martin
462 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
463 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
464 1.1 martin if (error != 0) {
465 1.1 martin aprint_error_dev(sc->sc_dev,
466 1.1 martin "could not load desc DMA map\n");
467 1.1 martin goto fail;
468 1.1 martin }
469 1.1 martin
470 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
471 1.1 martin sc->sc_rxq.r_desc = rings;
472 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
473 1.1 martin
474 1.1 martin /* and next rings to the TX side */
475 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
476 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
477 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
478 1.1 martin
479 1.1 martin return 0;
480 1.1 martin
481 1.1 martin fail:
482 1.1 martin dwc_gmac_free_dma_rings(sc);
483 1.1 martin return error;
484 1.1 martin }
485 1.1 martin
486 1.1 martin static void
487 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
488 1.1 martin {
489 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
490 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
491 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
492 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
493 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
494 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
495 1.1 martin }
496 1.1 martin
497 1.1 martin static void
498 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
499 1.1 martin {
500 1.1 martin struct dwc_gmac_rx_data *data;
501 1.1 martin int i;
502 1.1 martin
503 1.1 martin if (ring->r_desc == NULL)
504 1.1 martin return;
505 1.1 martin
506 1.1 martin
507 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
508 1.1 martin data = &ring->r_data[i];
509 1.1 martin
510 1.1 martin if (data->rd_map != NULL) {
511 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
512 1.1 martin AWGE_RX_RING_COUNT
513 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
514 1.1 martin BUS_DMASYNC_POSTREAD);
515 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
516 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
517 1.1 martin }
518 1.1 martin if (data->rd_m != NULL)
519 1.1 martin m_freem(data->rd_m);
520 1.1 martin }
521 1.1 martin }
522 1.1 martin
523 1.1 martin static int
524 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
525 1.1 martin struct dwc_gmac_tx_ring *ring)
526 1.1 martin {
527 1.1 martin int i, error = 0;
528 1.1 martin
529 1.1 martin ring->t_queued = 0;
530 1.1 martin ring->t_cur = ring->t_next = 0;
531 1.1 martin
532 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
533 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
534 1.1 martin TX_DESC_OFFSET(0),
535 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
536 1.1 martin BUS_DMASYNC_POSTWRITE);
537 1.1 martin
538 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
539 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
540 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
541 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
542 1.1 martin &ring->t_data[i].td_map);
543 1.1 martin if (error != 0) {
544 1.1 martin aprint_error_dev(sc->sc_dev,
545 1.1 martin "could not create TX DMA map #%d\n", i);
546 1.1 martin ring->t_data[i].td_map = NULL;
547 1.1 martin goto fail;
548 1.1 martin }
549 1.1 martin ring->t_desc[i].ddesc_next = htole32(
550 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
551 1.8 martin *TX_NEXT(i));
552 1.1 martin }
553 1.1 martin
554 1.1 martin return 0;
555 1.1 martin
556 1.1 martin fail:
557 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
558 1.1 martin return error;
559 1.1 martin }
560 1.1 martin
561 1.1 martin static void
562 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
563 1.1 martin {
564 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
565 1.1 martin if (end > start) {
566 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
567 1.1 martin TX_DESC_OFFSET(start),
568 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
569 1.1 martin ops);
570 1.1 martin return;
571 1.1 martin }
572 1.1 martin /* sync from 'start' to end of ring */
573 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
574 1.1 martin TX_DESC_OFFSET(start),
575 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
576 1.1 martin ops);
577 1.1 martin /* sync from start of ring to 'end' */
578 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
579 1.1 martin TX_DESC_OFFSET(0),
580 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
581 1.1 martin ops);
582 1.1 martin }
583 1.1 martin
584 1.1 martin static void
585 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
586 1.1 martin struct dwc_gmac_tx_ring *ring)
587 1.1 martin {
588 1.1 martin int i;
589 1.1 martin
590 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
591 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
592 1.1 martin
593 1.1 martin if (data->td_m != NULL) {
594 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
595 1.1 martin 0, data->td_active->dm_mapsize,
596 1.1 martin BUS_DMASYNC_POSTWRITE);
597 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
598 1.1 martin m_freem(data->td_m);
599 1.1 martin data->td_m = NULL;
600 1.1 martin }
601 1.1 martin }
602 1.1 martin
603 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
604 1.1 martin TX_DESC_OFFSET(0),
605 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
606 1.1 martin BUS_DMASYNC_PREWRITE);
607 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
608 1.6 martin sc->sc_txq.t_physaddr);
609 1.1 martin
610 1.1 martin ring->t_queued = 0;
611 1.1 martin ring->t_cur = ring->t_next = 0;
612 1.1 martin }
613 1.1 martin
614 1.1 martin static void
615 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
616 1.1 martin struct dwc_gmac_tx_ring *ring)
617 1.1 martin {
618 1.1 martin int i;
619 1.1 martin
620 1.1 martin /* unload the maps */
621 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
622 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
623 1.1 martin
624 1.1 martin if (data->td_m != NULL) {
625 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
626 1.1 martin 0, data->td_map->dm_mapsize,
627 1.1 martin BUS_DMASYNC_POSTWRITE);
628 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
629 1.1 martin m_freem(data->td_m);
630 1.1 martin data->td_m = NULL;
631 1.1 martin }
632 1.1 martin }
633 1.1 martin
634 1.1 martin /* and actually free them */
635 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
636 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
637 1.1 martin
638 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
639 1.1 martin }
640 1.1 martin }
641 1.1 martin
642 1.1 martin static void
643 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
644 1.1 martin {
645 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
646 1.1 martin struct mii_data * const mii = &sc->sc_mii;
647 1.9 martin uint32_t conf;
648 1.1 martin
649 1.1 martin /*
650 1.1 martin * Set MII or GMII interface based on the speed
651 1.1 martin * negotiated by the PHY.
652 1.9 martin */
653 1.9 martin conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
654 1.9 martin conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
655 1.9 martin |AWIN_GMAC_MAC_CONF_FULLDPLX);
656 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST | AWIN_GMAC_MAC_CONF_TXENABLE
657 1.9 martin | AWIN_GMAC_MAC_CONF_RXENABLE;
658 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
659 1.1 martin case IFM_10_T:
660 1.9 martin break;
661 1.1 martin case IFM_100_TX:
662 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FES100;
663 1.1 martin break;
664 1.1 martin case IFM_1000_T:
665 1.9 martin conf |= AWIN_GMAC_MAC_CONF_MIISEL;
666 1.1 martin break;
667 1.1 martin }
668 1.9 martin if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
669 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
670 1.9 martin
671 1.9 martin #ifdef DWC_GMAC_DEBUG
672 1.9 martin aprint_normal_dev(sc->sc_dev,
673 1.9 martin "setting MAC conf register: %08x\n", conf);
674 1.9 martin #endif
675 1.9 martin
676 1.9 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
677 1.9 martin AWIN_GMAC_MAC_CONF, conf);
678 1.1 martin }
679 1.1 martin
680 1.1 martin static int
681 1.1 martin dwc_gmac_init(struct ifnet *ifp)
682 1.1 martin {
683 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
684 1.1 martin
685 1.1 martin if (ifp->if_flags & IFF_RUNNING)
686 1.1 martin return 0;
687 1.1 martin
688 1.1 martin dwc_gmac_stop(ifp, 0);
689 1.1 martin
690 1.1 martin /*
691 1.6 martin * Set up dma pointer for RX and TX ring
692 1.1 martin */
693 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
694 1.6 martin sc->sc_rxq.r_physaddr);
695 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
696 1.6 martin sc->sc_txq.t_physaddr);
697 1.6 martin
698 1.6 martin /*
699 1.6 martin * Start RX part
700 1.6 martin */
701 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
702 1.6 martin AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
703 1.1 martin
704 1.1 martin ifp->if_flags |= IFF_RUNNING;
705 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
706 1.1 martin
707 1.1 martin return 0;
708 1.1 martin }
709 1.1 martin
710 1.1 martin static void
711 1.1 martin dwc_gmac_start(struct ifnet *ifp)
712 1.1 martin {
713 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
714 1.1 martin int old = sc->sc_txq.t_queued;
715 1.1 martin struct mbuf *m0;
716 1.1 martin
717 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
718 1.1 martin return;
719 1.1 martin
720 1.1 martin for (;;) {
721 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
722 1.1 martin if (m0 == NULL)
723 1.1 martin break;
724 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
725 1.1 martin ifp->if_flags |= IFF_OACTIVE;
726 1.1 martin break;
727 1.1 martin }
728 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
729 1.1 martin bpf_mtap(ifp, m0);
730 1.1 martin }
731 1.1 martin
732 1.1 martin if (sc->sc_txq.t_queued != old) {
733 1.1 martin /* packets have been queued, kick it off */
734 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
735 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
736 1.6 martin
737 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
738 1.6 martin AWIN_GMAC_DMA_OPMODE,
739 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
740 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
741 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
742 1.6 martin AWIN_GMAC_DMA_OPMODE,
743 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
744 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
745 1.1 martin }
746 1.1 martin }
747 1.1 martin
748 1.1 martin static void
749 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
750 1.1 martin {
751 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
752 1.1 martin
753 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
754 1.6 martin AWIN_GMAC_DMA_OPMODE,
755 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
756 1.6 martin AWIN_GMAC_DMA_OPMODE)
757 1.6 martin & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
758 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
759 1.6 martin AWIN_GMAC_DMA_OPMODE,
760 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
761 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
762 1.6 martin
763 1.1 martin mii_down(&sc->sc_mii);
764 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
765 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
766 1.1 martin }
767 1.1 martin
768 1.1 martin /*
769 1.1 martin * Add m0 to the TX ring
770 1.1 martin */
771 1.1 martin static int
772 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
773 1.1 martin {
774 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
775 1.1 martin struct dwc_gmac_tx_data *data = NULL;
776 1.1 martin bus_dmamap_t map;
777 1.6 martin uint32_t flags, len;
778 1.1 martin int error, i, first;
779 1.1 martin
780 1.8 martin #ifdef DWC_GMAC_DEBUG
781 1.8 martin aprint_normal_dev(sc->sc_dev,
782 1.8 martin "dwc_gmac_queue: adding mbuf chain %p\n", m0);
783 1.8 martin #endif
784 1.8 martin
785 1.1 martin first = sc->sc_txq.t_cur;
786 1.1 martin map = sc->sc_txq.t_data[first].td_map;
787 1.1 martin flags = 0;
788 1.1 martin
789 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
790 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
791 1.1 martin if (error != 0) {
792 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
793 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
794 1.1 martin return error;
795 1.1 martin }
796 1.1 martin
797 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
798 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
799 1.1 martin return ENOBUFS;
800 1.1 martin }
801 1.1 martin
802 1.1 martin data = NULL;
803 1.8 martin flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
804 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
805 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
806 1.8 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
807 1.8 martin
808 1.8 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
809 1.8 martin len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
810 1.8 martin if (i == map->dm_nsegs-1)
811 1.8 martin flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
812 1.7 martin
813 1.7 martin #ifdef DWC_GMAC_DEBUG
814 1.7 martin aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
815 1.8 martin "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
816 1.7 martin (unsigned long)map->dm_segs[i].ds_addr,
817 1.8 martin (unsigned long)map->dm_segs[i].ds_len,
818 1.8 martin flags, len);
819 1.7 martin #endif
820 1.7 martin
821 1.6 martin desc->ddesc_cntl = htole32(len|flags);
822 1.6 martin flags &= ~DDESC_CNTL_TXFIRST;
823 1.1 martin
824 1.1 martin /*
825 1.1 martin * Defer passing ownership of the first descriptor
826 1.1 martin * untill we are done.
827 1.1 martin */
828 1.6 martin if (i)
829 1.6 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
830 1.8 martin
831 1.6 martin sc->sc_txq.t_queued++;
832 1.8 martin sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
833 1.1 martin }
834 1.1 martin
835 1.6 martin /* Pass first to device */
836 1.6 martin sc->sc_txq.t_desc[first].ddesc_status
837 1.6 martin = htole32(DDESC_STATUS_OWNEDBYDEV);
838 1.1 martin
839 1.1 martin data->td_m = m0;
840 1.1 martin data->td_active = map;
841 1.1 martin
842 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
843 1.1 martin BUS_DMASYNC_PREWRITE);
844 1.1 martin
845 1.1 martin return 0;
846 1.1 martin }
847 1.1 martin
848 1.1 martin static int
849 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
850 1.1 martin {
851 1.1 martin // struct dwc_gmac_softc *sc = ifp->if_softc;
852 1.1 martin struct ifaddr *ifa = (struct ifaddr *)data;
853 1.1 martin int s, error = 0;
854 1.1 martin
855 1.1 martin s = splnet();
856 1.1 martin
857 1.1 martin switch (cmd) {
858 1.1 martin case SIOCINITIFADDR:
859 1.1 martin ifp->if_flags |= IFF_UP;
860 1.1 martin dwc_gmac_init(ifp);
861 1.1 martin switch (ifa->ifa_addr->sa_family) {
862 1.1 martin #ifdef INET
863 1.1 martin case AF_INET:
864 1.1 martin arp_ifinit(ifp, ifa);
865 1.1 martin break;
866 1.1 martin #endif
867 1.1 martin default:
868 1.1 martin break;
869 1.1 martin }
870 1.1 martin default:
871 1.1 martin if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
872 1.1 martin break;
873 1.1 martin error = 0;
874 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
875 1.1 martin ;
876 1.1 martin else if (ifp->if_flags & IFF_RUNNING)
877 1.1 martin /* setmulti */;
878 1.1 martin break;
879 1.1 martin }
880 1.1 martin
881 1.1 martin splx(s);
882 1.1 martin
883 1.1 martin return error;
884 1.1 martin }
885 1.1 martin
886 1.8 martin static void
887 1.8 martin dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
888 1.8 martin {
889 1.8 martin struct dwc_gmac_tx_data *data;
890 1.8 martin struct dwc_gmac_dev_dmadesc *desc;
891 1.8 martin uint32_t flags;
892 1.8 martin int i;
893 1.8 martin
894 1.8 martin for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
895 1.8 martin i = TX_NEXT(i), sc->sc_txq.t_queued--) {
896 1.8 martin
897 1.8 martin #ifdef DWC_GMAC_DEBUG
898 1.8 martin aprint_normal_dev(sc->sc_dev,
899 1.8 martin "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
900 1.8 martin i, sc->sc_txq.t_queued);
901 1.8 martin #endif
902 1.8 martin
903 1.8 martin desc = &sc->sc_txq.t_desc[i];
904 1.8 martin dwc_gmac_txdesc_sync(sc, i, i+1,
905 1.8 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
906 1.8 martin flags = le32toh(desc->ddesc_status);
907 1.8 martin if (flags & DDESC_STATUS_OWNEDBYDEV)
908 1.8 martin break;
909 1.8 martin data = &sc->sc_txq.t_data[i];
910 1.8 martin if (data->td_m == NULL)
911 1.8 martin continue;
912 1.8 martin sc->sc_ec.ec_if.if_opackets++;
913 1.8 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
914 1.8 martin data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
915 1.8 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
916 1.8 martin
917 1.8 martin #ifdef DWC_GMAC_DEBUG
918 1.8 martin aprint_normal_dev(sc->sc_dev,
919 1.8 martin "dwc_gmac_tx_intr: done with packet at desc #%d, "
920 1.8 martin "freeing mbuf %p\n", i, data->td_m);
921 1.8 martin #endif
922 1.8 martin
923 1.8 martin m_freem(data->td_m);
924 1.8 martin data->td_m = NULL;
925 1.8 martin }
926 1.8 martin
927 1.8 martin sc->sc_txq.t_next = i;
928 1.8 martin
929 1.8 martin if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
930 1.8 martin sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
931 1.8 martin }
932 1.8 martin }
933 1.8 martin
934 1.8 martin static void
935 1.8 martin dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
936 1.8 martin {
937 1.8 martin #ifdef DWC_GMAC_DEBUG
938 1.8 martin aprint_normal_dev(sc->sc_dev, "rx intr\n");
939 1.8 martin /* XXX */
940 1.8 martin #endif
941 1.8 martin }
942 1.8 martin
943 1.1 martin int
944 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
945 1.1 martin {
946 1.1 martin uint32_t status, dma_status;
947 1.8 martin int rv = 0;
948 1.1 martin
949 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
950 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
951 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
952 1.1 martin AWIN_GMAC_MII_STATUS);
953 1.8 martin rv = 1;
954 1.2 martin mii_pollstat(&sc->sc_mii);
955 1.2 martin }
956 1.1 martin
957 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
958 1.1 martin AWIN_GMAC_DMA_STATUS);
959 1.1 martin
960 1.8 martin if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
961 1.8 martin rv = 1;
962 1.1 martin
963 1.8 martin if (dma_status & GMAC_DMA_INT_TIE)
964 1.8 martin dwc_gmac_tx_intr(sc);
965 1.1 martin
966 1.8 martin if (dma_status & GMAC_DMA_INT_RIE)
967 1.8 martin dwc_gmac_rx_intr(sc);
968 1.8 martin
969 1.8 martin /*
970 1.8 martin * Check error conditions
971 1.8 martin */
972 1.8 martin if (dma_status & GMAC_DMA_INT_ERRORS) {
973 1.8 martin sc->sc_ec.ec_if.if_oerrors++;
974 1.8 martin #ifdef DWC_GMAC_DEBUG
975 1.8 martin dwc_dump_and_abort(sc, "interrupt error condition");
976 1.8 martin #endif
977 1.8 martin }
978 1.8 martin
979 1.8 martin /* ack interrupt */
980 1.8 martin if (dma_status)
981 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
982 1.8 martin AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
983 1.8 martin
984 1.8 martin return rv;
985 1.1 martin }
986 1.7 martin
987 1.7 martin #ifdef DWC_GMAC_DEBUG
988 1.7 martin static void
989 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
990 1.7 martin {
991 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
992 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
993 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
994 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
995 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
996 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
997 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
998 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
999 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1000 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1001 1.7 martin aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1002 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1003 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1004 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1005 1.7 martin aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1006 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1007 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1008 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1009 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1010 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1011 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1012 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1013 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1014 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1015 1.7 martin }
1016 1.7 martin
1017 1.7 martin static void
1018 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1019 1.7 martin {
1020 1.7 martin int i;
1021 1.7 martin
1022 1.8 martin aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1023 1.8 martin sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1024 1.8 martin aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1025 1.7 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1026 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1027 1.7 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
1028 1.7 martin i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
1029 1.7 martin le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1030 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1031 1.7 martin
1032 1.7 martin }
1033 1.7 martin }
1034 1.8 martin
1035 1.8 martin static void
1036 1.8 martin dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1037 1.8 martin {
1038 1.8 martin uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1039 1.8 martin AWIN_GMAC_MAC_INTR);
1040 1.8 martin uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1041 1.8 martin AWIN_GMAC_DMA_STATUS);
1042 1.8 martin char buf[200];
1043 1.8 martin
1044 1.8 martin /* print interrupt state */
1045 1.8 martin snprintb(buf, sizeof(buf), "\177\20"
1046 1.8 martin "b\x10""NIE\0"
1047 1.8 martin "b\x0f""AIE\0"
1048 1.8 martin "b\x0e""ERE\0"
1049 1.8 martin "b\x0d""FBE\0"
1050 1.8 martin "b\x0a""ETE\0"
1051 1.8 martin "b\x09""RWE\0"
1052 1.8 martin "b\x08""RSE\0"
1053 1.8 martin "b\x07""RUE\0"
1054 1.8 martin "b\x06""RIE\0"
1055 1.8 martin "b\x05""UNE\0"
1056 1.8 martin "b\x04""OVE\0"
1057 1.8 martin "b\x03""TJE\0"
1058 1.8 martin "b\x02""TUE\0"
1059 1.8 martin "b\x01""TSE\0"
1060 1.8 martin "b\x00""TIE\0"
1061 1.8 martin "\0", dma_status);
1062 1.8 martin printf("%s: INTR status: %08x, DMA status: %s\n",
1063 1.8 martin device_xname(sc->sc_dev),
1064 1.8 martin status, buf);
1065 1.8 martin
1066 1.8 martin dwc_gmac_dump_dma(sc);
1067 1.8 martin dwc_gmac_dump_tx_desc(sc);
1068 1.8 martin
1069 1.8 martin panic(msg);
1070 1.8 martin }
1071 1.7 martin #endif
1072