dwc_gmac.c revision 1.7 1 1.1 martin /*-
2 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 1.1 martin * All rights reserved.
4 1.1 martin *
5 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
6 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 1.1 martin *
8 1.1 martin * Redistribution and use in source and binary forms, with or without
9 1.1 martin * modification, are permitted provided that the following conditions
10 1.1 martin * are met:
11 1.1 martin * 1. Redistributions of source code must retain the above copyright
12 1.1 martin * notice, this list of conditions and the following disclaimer.
13 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer in the
15 1.1 martin * documentation and/or other materials provided with the distribution.
16 1.1 martin *
17 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
28 1.1 martin */
29 1.1 martin
30 1.1 martin /*
31 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
32 1.1 martin * on Allwinner A20 cores and others.
33 1.1 martin *
34 1.1 martin * Real documentation seems to not be available, the marketing product
35 1.1 martin * documents could be found here:
36 1.1 martin *
37 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 1.1 martin */
39 1.1 martin
40 1.1 martin #include <sys/cdefs.h>
41 1.1 martin
42 1.7 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.7 2014/09/14 18:28:37 martin Exp $");
43 1.7 martin
44 1.7 martin /* #define DWC_GMAC_DEBUG 1 */
45 1.1 martin
46 1.1 martin #include "opt_inet.h"
47 1.1 martin
48 1.1 martin #include <sys/param.h>
49 1.1 martin #include <sys/bus.h>
50 1.1 martin #include <sys/device.h>
51 1.1 martin #include <sys/intr.h>
52 1.1 martin #include <sys/systm.h>
53 1.1 martin #include <sys/sockio.h>
54 1.1 martin
55 1.1 martin #include <net/if.h>
56 1.1 martin #include <net/if_ether.h>
57 1.1 martin #include <net/if_media.h>
58 1.1 martin #include <net/bpf.h>
59 1.1 martin #ifdef INET
60 1.1 martin #include <netinet/if_inarp.h>
61 1.1 martin #endif
62 1.1 martin
63 1.1 martin #include <dev/mii/miivar.h>
64 1.1 martin
65 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
66 1.1 martin #include <dev/ic/dwc_gmac_var.h>
67 1.1 martin
68 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
69 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
70 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
71 1.1 martin
72 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
73 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
74 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
75 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
76 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
77 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
79 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
80 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
82 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
83 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
84 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
85 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
86 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
87 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
88 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
89 1.1 martin
90 1.1 martin
91 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
92 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
93 1.1 martin
94 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
95 1.1 martin
96 1.7 martin
97 1.7 martin #ifdef DWC_GMAC_DEBUG
98 1.7 martin static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
99 1.7 martin static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
100 1.7 martin #endif
101 1.7 martin
102 1.1 martin void
103 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
104 1.1 martin {
105 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
106 1.1 martin uint32_t maclo, machi;
107 1.1 martin struct mii_data * const mii = &sc->sc_mii;
108 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
109 1.5 martin prop_dictionary_t dict;
110 1.1 martin
111 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
112 1.3 martin sc->sc_mii_clk = mii_clk & 7;
113 1.1 martin
114 1.5 martin dict = device_properties(sc->sc_dev);
115 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
116 1.5 martin if (ea != NULL) {
117 1.5 martin /*
118 1.5 martin * If the MAC address is overriden by a device property,
119 1.5 martin * use that.
120 1.5 martin */
121 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
122 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
123 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
124 1.5 martin } else {
125 1.5 martin /*
126 1.5 martin * If we did not get an externaly configure address,
127 1.5 martin * try to read one from the current filter setup,
128 1.5 martin * before resetting the chip.
129 1.5 martin */
130 1.1 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
131 1.1 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
132 1.1 martin enaddr[0] = maclo & 0x0ff;
133 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
134 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
135 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
136 1.1 martin enaddr[4] = machi & 0x0ff;
137 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
138 1.1 martin }
139 1.1 martin
140 1.7 martin #ifdef DWC_GMAC_DEBUG
141 1.7 martin dwc_gmac_dump_dma(sc);
142 1.7 martin #endif
143 1.7 martin
144 1.1 martin /*
145 1.1 martin * Init chip and do intial setup
146 1.1 martin */
147 1.1 martin if (dwc_gmac_reset(sc) != 0)
148 1.1 martin return; /* not much to cleanup, haven't attached yet */
149 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
150 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
151 1.1 martin ether_sprintf(enaddr));
152 1.1 martin
153 1.1 martin /*
154 1.1 martin * Allocate Tx and Rx rings
155 1.1 martin */
156 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
157 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
158 1.1 martin goto fail;
159 1.1 martin }
160 1.1 martin
161 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
162 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
163 1.1 martin goto fail;
164 1.1 martin }
165 1.1 martin
166 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
167 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
168 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
169 1.1 martin goto fail;
170 1.1 martin }
171 1.1 martin
172 1.1 martin /*
173 1.1 martin * Prepare interface data
174 1.1 martin */
175 1.1 martin ifp->if_softc = sc;
176 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
177 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
178 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
179 1.1 martin ifp->if_start = dwc_gmac_start;
180 1.1 martin ifp->if_init = dwc_gmac_init;
181 1.1 martin ifp->if_stop = dwc_gmac_stop;
182 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
183 1.1 martin IFQ_SET_READY(&ifp->if_snd);
184 1.1 martin
185 1.1 martin /*
186 1.1 martin * Attach MII subdevices
187 1.1 martin */
188 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
189 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
190 1.1 martin mii->mii_ifp = ifp;
191 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
192 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
193 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
194 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
195 1.1 martin
196 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
197 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
198 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
199 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
200 1.1 martin } else {
201 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
202 1.1 martin }
203 1.1 martin
204 1.1 martin /*
205 1.1 martin * Ready, attach interface
206 1.1 martin */
207 1.1 martin if_attach(ifp);
208 1.1 martin ether_ifattach(ifp, enaddr);
209 1.1 martin
210 1.1 martin /*
211 1.1 martin * Enable interrupts
212 1.1 martin */
213 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
214 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
215 1.1 martin
216 1.1 martin return;
217 1.1 martin
218 1.1 martin fail:
219 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
220 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
221 1.1 martin }
222 1.1 martin
223 1.1 martin
224 1.1 martin
225 1.1 martin static int
226 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
227 1.1 martin {
228 1.1 martin size_t cnt;
229 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
230 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
231 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
232 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
233 1.1 martin & GMAC_BUSMODE_RESET) == 0)
234 1.1 martin return 0;
235 1.1 martin delay(10);
236 1.1 martin }
237 1.1 martin
238 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
239 1.1 martin return EIO;
240 1.1 martin }
241 1.1 martin
242 1.1 martin static void
243 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
244 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
245 1.1 martin {
246 1.1 martin uint32_t lo, hi;
247 1.1 martin
248 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
249 1.1 martin | (enaddr[3] << 24);
250 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
251 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
252 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
253 1.1 martin }
254 1.1 martin
255 1.1 martin static int
256 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
257 1.1 martin {
258 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
259 1.6 martin uint16_t mii;
260 1.1 martin size_t cnt;
261 1.1 martin int rv = 0;
262 1.1 martin
263 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
264 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
265 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
266 1.6 martin | GMAC_MII_BUSY;
267 1.1 martin
268 1.1 martin mutex_enter(&sc->sc_mdio_lock);
269 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
270 1.1 martin
271 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
272 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
273 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
274 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
275 1.3 martin AWIN_GMAC_MAC_MIIDATA);
276 1.1 martin break;
277 1.1 martin }
278 1.1 martin delay(10);
279 1.1 martin }
280 1.1 martin
281 1.1 martin mutex_exit(&sc->sc_mdio_lock);
282 1.1 martin
283 1.1 martin return rv;
284 1.1 martin }
285 1.1 martin
286 1.1 martin static void
287 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
288 1.1 martin {
289 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
290 1.6 martin uint16_t mii;
291 1.1 martin size_t cnt;
292 1.1 martin
293 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
294 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
295 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
296 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
297 1.1 martin
298 1.1 martin mutex_enter(&sc->sc_mdio_lock);
299 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
300 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
301 1.1 martin
302 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
303 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
304 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
305 1.1 martin break;
306 1.1 martin delay(10);
307 1.1 martin }
308 1.1 martin
309 1.1 martin mutex_exit(&sc->sc_mdio_lock);
310 1.1 martin }
311 1.1 martin
312 1.1 martin static int
313 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
314 1.1 martin struct dwc_gmac_rx_ring *ring)
315 1.1 martin {
316 1.1 martin struct dwc_gmac_rx_data *data;
317 1.1 martin bus_addr_t physaddr;
318 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
319 1.1 martin int error, i, next;
320 1.1 martin
321 1.1 martin ring->r_cur = ring->r_next = 0;
322 1.1 martin memset(ring->r_desc, 0, descsize);
323 1.1 martin
324 1.1 martin /*
325 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
326 1.1 martin */
327 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
328 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
329 1.1 martin
330 1.1 martin data = &sc->sc_rxq.r_data[i];
331 1.1 martin
332 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
333 1.1 martin if (data->rd_m == NULL) {
334 1.1 martin aprint_error_dev(sc->sc_dev,
335 1.1 martin "could not allocate rx mbuf #%d\n", i);
336 1.1 martin error = ENOMEM;
337 1.1 martin goto fail;
338 1.1 martin }
339 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
340 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
341 1.1 martin if (error != 0) {
342 1.1 martin aprint_error_dev(sc->sc_dev,
343 1.1 martin "could not create DMA map\n");
344 1.1 martin data->rd_map = NULL;
345 1.1 martin goto fail;
346 1.1 martin }
347 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
348 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
349 1.1 martin aprint_error_dev(sc->sc_dev,
350 1.1 martin "could not allocate mbuf cluster #%d\n", i);
351 1.1 martin error = ENOMEM;
352 1.1 martin goto fail;
353 1.1 martin }
354 1.1 martin
355 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
356 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
357 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
358 1.1 martin if (error != 0) {
359 1.1 martin aprint_error_dev(sc->sc_dev,
360 1.1 martin "could not load rx buf DMA map #%d", i);
361 1.1 martin goto fail;
362 1.1 martin }
363 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
364 1.1 martin
365 1.1 martin desc = &sc->sc_rxq.r_desc[i];
366 1.1 martin desc->ddesc_data = htole32(physaddr);
367 1.7 martin next = (i+1) % AWGE_RX_RING_COUNT;
368 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
369 1.1 martin + next * sizeof(*desc));
370 1.1 martin desc->ddesc_cntl = htole32(
371 1.6 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
372 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
373 1.1 martin }
374 1.1 martin
375 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
376 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
377 1.1 martin BUS_DMASYNC_PREREAD);
378 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
379 1.6 martin ring->r_physaddr);
380 1.1 martin
381 1.1 martin return 0;
382 1.1 martin
383 1.1 martin fail:
384 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
385 1.1 martin return error;
386 1.1 martin }
387 1.1 martin
388 1.1 martin static void
389 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
390 1.1 martin struct dwc_gmac_rx_ring *ring)
391 1.1 martin {
392 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
393 1.1 martin int i;
394 1.1 martin
395 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
396 1.1 martin desc = &sc->sc_rxq.r_desc[i];
397 1.1 martin desc->ddesc_cntl = htole32(
398 1.6 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
399 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
400 1.1 martin }
401 1.1 martin
402 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
403 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
404 1.1 martin BUS_DMASYNC_PREWRITE);
405 1.1 martin
406 1.1 martin ring->r_cur = ring->r_next = 0;
407 1.1 martin }
408 1.1 martin
409 1.1 martin static int
410 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
411 1.1 martin {
412 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
413 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
414 1.1 martin int error, nsegs;
415 1.1 martin void *rings;
416 1.1 martin
417 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
418 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
419 1.1 martin if (error != 0) {
420 1.1 martin aprint_error_dev(sc->sc_dev,
421 1.1 martin "could not create desc DMA map\n");
422 1.1 martin sc->sc_dma_ring_map = NULL;
423 1.1 martin goto fail;
424 1.1 martin }
425 1.1 martin
426 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
427 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
428 1.1 martin if (error != 0) {
429 1.1 martin aprint_error_dev(sc->sc_dev,
430 1.1 martin "could not map DMA memory\n");
431 1.1 martin goto fail;
432 1.1 martin }
433 1.1 martin
434 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
435 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
436 1.1 martin if (error != 0) {
437 1.1 martin aprint_error_dev(sc->sc_dev,
438 1.1 martin "could not allocate DMA memory\n");
439 1.1 martin goto fail;
440 1.1 martin }
441 1.1 martin
442 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
443 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
444 1.1 martin if (error != 0) {
445 1.1 martin aprint_error_dev(sc->sc_dev,
446 1.1 martin "could not load desc DMA map\n");
447 1.1 martin goto fail;
448 1.1 martin }
449 1.1 martin
450 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
451 1.1 martin sc->sc_rxq.r_desc = rings;
452 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
453 1.1 martin
454 1.1 martin /* and next rings to the TX side */
455 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
456 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
457 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
458 1.1 martin
459 1.1 martin return 0;
460 1.1 martin
461 1.1 martin fail:
462 1.1 martin dwc_gmac_free_dma_rings(sc);
463 1.1 martin return error;
464 1.1 martin }
465 1.1 martin
466 1.1 martin static void
467 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
468 1.1 martin {
469 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
470 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
471 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
472 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
473 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
474 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
475 1.1 martin }
476 1.1 martin
477 1.1 martin static void
478 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
479 1.1 martin {
480 1.1 martin struct dwc_gmac_rx_data *data;
481 1.1 martin int i;
482 1.1 martin
483 1.1 martin if (ring->r_desc == NULL)
484 1.1 martin return;
485 1.1 martin
486 1.1 martin
487 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
488 1.1 martin data = &ring->r_data[i];
489 1.1 martin
490 1.1 martin if (data->rd_map != NULL) {
491 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
492 1.1 martin AWGE_RX_RING_COUNT
493 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
494 1.1 martin BUS_DMASYNC_POSTREAD);
495 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
496 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
497 1.1 martin }
498 1.1 martin if (data->rd_m != NULL)
499 1.1 martin m_freem(data->rd_m);
500 1.1 martin }
501 1.1 martin }
502 1.1 martin
503 1.1 martin static int
504 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
505 1.1 martin struct dwc_gmac_tx_ring *ring)
506 1.1 martin {
507 1.1 martin int i, error = 0;
508 1.1 martin
509 1.1 martin ring->t_queued = 0;
510 1.1 martin ring->t_cur = ring->t_next = 0;
511 1.1 martin
512 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
513 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
514 1.1 martin TX_DESC_OFFSET(0),
515 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
516 1.1 martin BUS_DMASYNC_POSTWRITE);
517 1.1 martin
518 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
519 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
520 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
521 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
522 1.1 martin &ring->t_data[i].td_map);
523 1.1 martin if (error != 0) {
524 1.1 martin aprint_error_dev(sc->sc_dev,
525 1.1 martin "could not create TX DMA map #%d\n", i);
526 1.1 martin ring->t_data[i].td_map = NULL;
527 1.1 martin goto fail;
528 1.1 martin }
529 1.1 martin ring->t_desc[i].ddesc_next = htole32(
530 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
531 1.7 martin *((i+1)%AWGE_TX_RING_COUNT));
532 1.1 martin }
533 1.1 martin
534 1.1 martin return 0;
535 1.1 martin
536 1.1 martin fail:
537 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
538 1.1 martin return error;
539 1.1 martin }
540 1.1 martin
541 1.1 martin static void
542 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
543 1.1 martin {
544 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
545 1.1 martin if (end > start) {
546 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
547 1.1 martin TX_DESC_OFFSET(start),
548 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
549 1.1 martin ops);
550 1.1 martin return;
551 1.1 martin }
552 1.1 martin /* sync from 'start' to end of ring */
553 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
554 1.1 martin TX_DESC_OFFSET(start),
555 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
556 1.1 martin ops);
557 1.1 martin /* sync from start of ring to 'end' */
558 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
559 1.1 martin TX_DESC_OFFSET(0),
560 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
561 1.1 martin ops);
562 1.1 martin }
563 1.1 martin
564 1.1 martin static void
565 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
566 1.1 martin struct dwc_gmac_tx_ring *ring)
567 1.1 martin {
568 1.1 martin int i;
569 1.1 martin
570 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
571 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
572 1.1 martin
573 1.1 martin if (data->td_m != NULL) {
574 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
575 1.1 martin 0, data->td_active->dm_mapsize,
576 1.1 martin BUS_DMASYNC_POSTWRITE);
577 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
578 1.1 martin m_freem(data->td_m);
579 1.1 martin data->td_m = NULL;
580 1.1 martin }
581 1.1 martin }
582 1.1 martin
583 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
584 1.1 martin TX_DESC_OFFSET(0),
585 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
586 1.1 martin BUS_DMASYNC_PREWRITE);
587 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
588 1.6 martin sc->sc_txq.t_physaddr);
589 1.1 martin
590 1.1 martin ring->t_queued = 0;
591 1.1 martin ring->t_cur = ring->t_next = 0;
592 1.1 martin }
593 1.1 martin
594 1.1 martin static void
595 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
596 1.1 martin struct dwc_gmac_tx_ring *ring)
597 1.1 martin {
598 1.1 martin int i;
599 1.1 martin
600 1.1 martin /* unload the maps */
601 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
602 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
603 1.1 martin
604 1.1 martin if (data->td_m != NULL) {
605 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
606 1.1 martin 0, data->td_map->dm_mapsize,
607 1.1 martin BUS_DMASYNC_POSTWRITE);
608 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
609 1.1 martin m_freem(data->td_m);
610 1.1 martin data->td_m = NULL;
611 1.1 martin }
612 1.1 martin }
613 1.1 martin
614 1.1 martin /* and actually free them */
615 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
616 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
617 1.1 martin
618 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
619 1.1 martin }
620 1.1 martin }
621 1.1 martin
622 1.1 martin static void
623 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
624 1.1 martin {
625 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
626 1.1 martin struct mii_data * const mii = &sc->sc_mii;
627 1.1 martin
628 1.1 martin /*
629 1.1 martin * Set MII or GMII interface based on the speed
630 1.1 martin * negotiated by the PHY.
631 1.1 martin */
632 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
633 1.1 martin case IFM_10_T:
634 1.1 martin case IFM_100_TX:
635 1.1 martin /* XXX */
636 1.1 martin break;
637 1.1 martin case IFM_1000_T:
638 1.1 martin /* XXX */
639 1.1 martin break;
640 1.1 martin }
641 1.1 martin }
642 1.1 martin
643 1.1 martin static int
644 1.1 martin dwc_gmac_init(struct ifnet *ifp)
645 1.1 martin {
646 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
647 1.1 martin
648 1.1 martin if (ifp->if_flags & IFF_RUNNING)
649 1.1 martin return 0;
650 1.1 martin
651 1.1 martin dwc_gmac_stop(ifp, 0);
652 1.1 martin
653 1.1 martin /*
654 1.6 martin * Set up dma pointer for RX and TX ring
655 1.1 martin */
656 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
657 1.6 martin sc->sc_rxq.r_physaddr);
658 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
659 1.6 martin sc->sc_txq.t_physaddr);
660 1.6 martin
661 1.6 martin /*
662 1.6 martin * Start RX part
663 1.6 martin */
664 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
665 1.6 martin AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
666 1.1 martin
667 1.1 martin ifp->if_flags |= IFF_RUNNING;
668 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
669 1.1 martin
670 1.1 martin return 0;
671 1.1 martin }
672 1.1 martin
673 1.1 martin static void
674 1.1 martin dwc_gmac_start(struct ifnet *ifp)
675 1.1 martin {
676 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
677 1.1 martin int old = sc->sc_txq.t_queued;
678 1.1 martin struct mbuf *m0;
679 1.1 martin
680 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
681 1.1 martin return;
682 1.1 martin
683 1.1 martin for (;;) {
684 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
685 1.1 martin if (m0 == NULL)
686 1.1 martin break;
687 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
688 1.1 martin ifp->if_flags |= IFF_OACTIVE;
689 1.1 martin break;
690 1.1 martin }
691 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
692 1.1 martin bpf_mtap(ifp, m0);
693 1.1 martin }
694 1.1 martin
695 1.1 martin if (sc->sc_txq.t_queued != old) {
696 1.1 martin /* packets have been queued, kick it off */
697 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
698 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
699 1.6 martin
700 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
701 1.6 martin AWIN_GMAC_DMA_OPMODE,
702 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
703 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
704 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
705 1.6 martin AWIN_GMAC_DMA_OPMODE,
706 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
707 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
708 1.1 martin }
709 1.7 martin
710 1.7 martin #ifdef DWC_GMAC_DEBUG
711 1.7 martin dwc_gmac_dump_dma(sc);
712 1.7 martin dwc_gmac_dump_tx_desc(sc);
713 1.7 martin #endif
714 1.1 martin }
715 1.1 martin
716 1.1 martin static void
717 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
718 1.1 martin {
719 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
720 1.1 martin
721 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
722 1.6 martin AWIN_GMAC_DMA_OPMODE,
723 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
724 1.6 martin AWIN_GMAC_DMA_OPMODE)
725 1.6 martin & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
726 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
727 1.6 martin AWIN_GMAC_DMA_OPMODE,
728 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
729 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
730 1.6 martin
731 1.1 martin mii_down(&sc->sc_mii);
732 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
733 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
734 1.1 martin }
735 1.1 martin
736 1.1 martin /*
737 1.1 martin * Add m0 to the TX ring
738 1.1 martin */
739 1.1 martin static int
740 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
741 1.1 martin {
742 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
743 1.1 martin struct dwc_gmac_tx_data *data = NULL;
744 1.1 martin bus_dmamap_t map;
745 1.6 martin uint32_t flags, len;
746 1.1 martin int error, i, first;
747 1.1 martin
748 1.1 martin first = sc->sc_txq.t_cur;
749 1.1 martin map = sc->sc_txq.t_data[first].td_map;
750 1.1 martin flags = 0;
751 1.1 martin
752 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
753 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
754 1.1 martin if (error != 0) {
755 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
756 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
757 1.1 martin return error;
758 1.1 martin }
759 1.1 martin
760 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
761 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
762 1.1 martin return ENOBUFS;
763 1.1 martin }
764 1.1 martin
765 1.1 martin data = NULL;
766 1.6 martin flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXINT|DDESC_CNTL_TXCHAIN;
767 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
768 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
769 1.7 martin
770 1.7 martin #ifdef DWC_GMAC_DEBUG
771 1.7 martin aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
772 1.7 martin "len %lu\n", sc->sc_txq.t_cur,
773 1.7 martin (unsigned long)map->dm_segs[i].ds_addr,
774 1.7 martin (unsigned long)map->dm_segs[i].ds_len);
775 1.7 martin #endif
776 1.7 martin
777 1.1 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
778 1.1 martin
779 1.1 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
780 1.6 martin len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
781 1.6 martin if (i == map->dm_nsegs-1)
782 1.6 martin flags |= DDESC_CNTL_TXLAST;
783 1.6 martin desc->ddesc_cntl = htole32(len|flags);
784 1.6 martin flags &= ~DDESC_CNTL_TXFIRST;
785 1.1 martin
786 1.1 martin /*
787 1.1 martin * Defer passing ownership of the first descriptor
788 1.1 martin * untill we are done.
789 1.1 martin */
790 1.6 martin if (i)
791 1.6 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
792 1.6 martin sc->sc_txq.t_queued++;
793 1.1 martin
794 1.1 martin sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
795 1.1 martin & (AWGE_TX_RING_COUNT-1);
796 1.1 martin }
797 1.1 martin
798 1.6 martin /* Pass first to device */
799 1.6 martin sc->sc_txq.t_desc[first].ddesc_status
800 1.6 martin = htole32(DDESC_STATUS_OWNEDBYDEV);
801 1.1 martin
802 1.1 martin data->td_m = m0;
803 1.1 martin data->td_active = map;
804 1.1 martin
805 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
806 1.1 martin BUS_DMASYNC_PREWRITE);
807 1.1 martin
808 1.1 martin return 0;
809 1.1 martin }
810 1.1 martin
811 1.1 martin static int
812 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
813 1.1 martin {
814 1.1 martin // struct dwc_gmac_softc *sc = ifp->if_softc;
815 1.1 martin struct ifaddr *ifa = (struct ifaddr *)data;
816 1.1 martin int s, error = 0;
817 1.1 martin
818 1.1 martin s = splnet();
819 1.1 martin
820 1.1 martin switch (cmd) {
821 1.1 martin case SIOCINITIFADDR:
822 1.1 martin ifp->if_flags |= IFF_UP;
823 1.1 martin dwc_gmac_init(ifp);
824 1.1 martin switch (ifa->ifa_addr->sa_family) {
825 1.1 martin #ifdef INET
826 1.1 martin case AF_INET:
827 1.1 martin arp_ifinit(ifp, ifa);
828 1.1 martin break;
829 1.1 martin #endif
830 1.1 martin default:
831 1.1 martin break;
832 1.1 martin }
833 1.1 martin default:
834 1.1 martin if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
835 1.1 martin break;
836 1.1 martin error = 0;
837 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
838 1.1 martin ;
839 1.1 martin else if (ifp->if_flags & IFF_RUNNING)
840 1.1 martin /* setmulti */;
841 1.1 martin break;
842 1.1 martin }
843 1.1 martin
844 1.1 martin splx(s);
845 1.1 martin
846 1.1 martin return error;
847 1.1 martin }
848 1.1 martin
849 1.1 martin int
850 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
851 1.1 martin {
852 1.1 martin uint32_t status, dma_status;
853 1.1 martin
854 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
855 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
856 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
857 1.1 martin AWIN_GMAC_MII_STATUS);
858 1.2 martin mii_pollstat(&sc->sc_mii);
859 1.2 martin }
860 1.1 martin
861 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
862 1.1 martin AWIN_GMAC_DMA_STATUS);
863 1.1 martin
864 1.1 martin printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
865 1.1 martin status, dma_status);
866 1.1 martin
867 1.1 martin static size_t cnt = 0;
868 1.1 martin if (++cnt > 20)
869 1.1 martin panic("enough now");
870 1.1 martin
871 1.1 martin return 1;
872 1.1 martin }
873 1.7 martin
874 1.7 martin #ifdef DWC_GMAC_DEBUG
875 1.7 martin static void
876 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
877 1.7 martin {
878 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
879 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
880 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
881 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
882 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
883 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
884 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
885 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
886 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
887 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
888 1.7 martin aprint_normal_dev(sc->sc_dev, "status: %08x\n",
889 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
890 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
891 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
892 1.7 martin aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
893 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
894 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
895 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
896 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
897 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
898 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
899 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
900 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
901 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
902 1.7 martin }
903 1.7 martin
904 1.7 martin static void
905 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
906 1.7 martin {
907 1.7 martin int i;
908 1.7 martin
909 1.7 martin aprint_normal_dev(sc->sc_dev, " TX DMA descriptors:\n");
910 1.7 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
911 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
912 1.7 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x data: %08x next: %08x\n",
913 1.7 martin i, sc->sc_txq.t_physaddr + i*sizeof(struct dwc_gmac_dev_dmadesc),
914 1.7 martin le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
915 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
916 1.7 martin
917 1.7 martin }
918 1.7 martin }
919 1.7 martin #endif
920