dwc_gmac.c revision 1.6 1 1.1 martin /*-
2 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
3 1.1 martin * All rights reserved.
4 1.1 martin *
5 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
6 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
7 1.1 martin *
8 1.1 martin * Redistribution and use in source and binary forms, with or without
9 1.1 martin * modification, are permitted provided that the following conditions
10 1.1 martin * are met:
11 1.1 martin * 1. Redistributions of source code must retain the above copyright
12 1.1 martin * notice, this list of conditions and the following disclaimer.
13 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer in the
15 1.1 martin * documentation and/or other materials provided with the distribution.
16 1.1 martin *
17 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
28 1.1 martin */
29 1.1 martin
30 1.1 martin /*
31 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
32 1.1 martin * on Allwinner A20 cores and others.
33 1.1 martin *
34 1.1 martin * Real documentation seems to not be available, the marketing product
35 1.1 martin * documents could be found here:
36 1.1 martin *
37 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
38 1.1 martin */
39 1.1 martin
40 1.1 martin #include <sys/cdefs.h>
41 1.1 martin
42 1.6 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.6 2014/09/14 11:00:52 martin Exp $");
43 1.1 martin
44 1.1 martin #include "opt_inet.h"
45 1.1 martin
46 1.1 martin #include <sys/param.h>
47 1.1 martin #include <sys/bus.h>
48 1.1 martin #include <sys/device.h>
49 1.1 martin #include <sys/intr.h>
50 1.1 martin #include <sys/systm.h>
51 1.1 martin #include <sys/sockio.h>
52 1.1 martin
53 1.1 martin #include <net/if.h>
54 1.1 martin #include <net/if_ether.h>
55 1.1 martin #include <net/if_media.h>
56 1.1 martin #include <net/bpf.h>
57 1.1 martin #ifdef INET
58 1.1 martin #include <netinet/if_inarp.h>
59 1.1 martin #endif
60 1.1 martin
61 1.1 martin #include <dev/mii/miivar.h>
62 1.1 martin
63 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
64 1.1 martin #include <dev/ic/dwc_gmac_var.h>
65 1.1 martin
66 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
67 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
68 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
69 1.1 martin
70 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
71 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
72 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
73 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
74 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
75 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
76 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
77 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
78 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
79 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
80 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
81 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
82 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
83 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
84 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
85 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
86 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
87 1.1 martin
88 1.1 martin
89 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
90 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
91 1.1 martin
92 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
93 1.1 martin
94 1.1 martin void
95 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
96 1.1 martin {
97 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
98 1.1 martin uint32_t maclo, machi;
99 1.1 martin struct mii_data * const mii = &sc->sc_mii;
100 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
101 1.5 martin prop_dictionary_t dict;
102 1.1 martin
103 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
104 1.3 martin sc->sc_mii_clk = mii_clk & 7;
105 1.1 martin
106 1.5 martin dict = device_properties(sc->sc_dev);
107 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
108 1.5 martin if (ea != NULL) {
109 1.5 martin /*
110 1.5 martin * If the MAC address is overriden by a device property,
111 1.5 martin * use that.
112 1.5 martin */
113 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
114 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
115 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
116 1.5 martin } else {
117 1.5 martin /*
118 1.5 martin * If we did not get an externaly configure address,
119 1.5 martin * try to read one from the current filter setup,
120 1.5 martin * before resetting the chip.
121 1.5 martin */
122 1.1 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO);
123 1.1 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI);
124 1.1 martin enaddr[0] = maclo & 0x0ff;
125 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
126 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
127 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
128 1.1 martin enaddr[4] = machi & 0x0ff;
129 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
130 1.1 martin }
131 1.1 martin
132 1.1 martin /*
133 1.1 martin * Init chip and do intial setup
134 1.1 martin */
135 1.1 martin if (dwc_gmac_reset(sc) != 0)
136 1.1 martin return; /* not much to cleanup, haven't attached yet */
137 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
138 1.1 martin aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
139 1.1 martin ether_sprintf(enaddr));
140 1.1 martin
141 1.1 martin /*
142 1.1 martin * Allocate Tx and Rx rings
143 1.1 martin */
144 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
145 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
146 1.1 martin goto fail;
147 1.1 martin }
148 1.1 martin
149 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
150 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
151 1.1 martin goto fail;
152 1.1 martin }
153 1.1 martin
154 1.1 martin mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
155 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
156 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
157 1.1 martin goto fail;
158 1.1 martin }
159 1.1 martin
160 1.1 martin /*
161 1.1 martin * Prepare interface data
162 1.1 martin */
163 1.1 martin ifp->if_softc = sc;
164 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
165 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
166 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
167 1.1 martin ifp->if_start = dwc_gmac_start;
168 1.1 martin ifp->if_init = dwc_gmac_init;
169 1.1 martin ifp->if_stop = dwc_gmac_stop;
170 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
171 1.1 martin IFQ_SET_READY(&ifp->if_snd);
172 1.1 martin
173 1.1 martin /*
174 1.1 martin * Attach MII subdevices
175 1.1 martin */
176 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
177 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
178 1.1 martin mii->mii_ifp = ifp;
179 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
180 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
181 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
182 1.1 martin mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
183 1.1 martin
184 1.1 martin if (LIST_EMPTY(&mii->mii_phys)) {
185 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
186 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
187 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
188 1.1 martin } else {
189 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
190 1.1 martin }
191 1.1 martin
192 1.1 martin /*
193 1.1 martin * Ready, attach interface
194 1.1 martin */
195 1.1 martin if_attach(ifp);
196 1.1 martin ether_ifattach(ifp, enaddr);
197 1.1 martin
198 1.1 martin /*
199 1.1 martin * Enable interrupts
200 1.1 martin */
201 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR, AWIN_DEF_MAC_INTRMASK);
202 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE, GMAC_DEF_DMA_INT_MASK);
203 1.1 martin
204 1.1 martin return;
205 1.1 martin
206 1.1 martin fail:
207 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
208 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
209 1.1 martin }
210 1.1 martin
211 1.1 martin
212 1.1 martin
213 1.1 martin static int
214 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
215 1.1 martin {
216 1.1 martin size_t cnt;
217 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
218 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
219 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
220 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
221 1.1 martin & GMAC_BUSMODE_RESET) == 0)
222 1.1 martin return 0;
223 1.1 martin delay(10);
224 1.1 martin }
225 1.1 martin
226 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
227 1.1 martin return EIO;
228 1.1 martin }
229 1.1 martin
230 1.1 martin static void
231 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
232 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
233 1.1 martin {
234 1.1 martin uint32_t lo, hi;
235 1.1 martin
236 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
237 1.1 martin | (enaddr[3] << 24);
238 1.1 martin hi = enaddr[4] | (enaddr[5] << 8);
239 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
240 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
241 1.1 martin }
242 1.1 martin
243 1.1 martin static int
244 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
245 1.1 martin {
246 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
247 1.6 martin uint16_t mii;
248 1.1 martin size_t cnt;
249 1.1 martin int rv = 0;
250 1.1 martin
251 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
252 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
253 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
254 1.6 martin | GMAC_MII_BUSY;
255 1.1 martin
256 1.1 martin mutex_enter(&sc->sc_mdio_lock);
257 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
258 1.1 martin
259 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
260 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
261 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
262 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
263 1.3 martin AWIN_GMAC_MAC_MIIDATA);
264 1.1 martin break;
265 1.1 martin }
266 1.1 martin delay(10);
267 1.1 martin }
268 1.1 martin
269 1.1 martin mutex_exit(&sc->sc_mdio_lock);
270 1.1 martin
271 1.1 martin return rv;
272 1.1 martin }
273 1.1 martin
274 1.1 martin static void
275 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
276 1.1 martin {
277 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
278 1.6 martin uint16_t mii;
279 1.1 martin size_t cnt;
280 1.1 martin
281 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
282 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
283 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
284 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
285 1.1 martin
286 1.1 martin mutex_enter(&sc->sc_mdio_lock);
287 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
288 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
289 1.1 martin
290 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
291 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
292 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
293 1.1 martin break;
294 1.1 martin delay(10);
295 1.1 martin }
296 1.1 martin
297 1.1 martin mutex_exit(&sc->sc_mdio_lock);
298 1.1 martin }
299 1.1 martin
300 1.1 martin static int
301 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
302 1.1 martin struct dwc_gmac_rx_ring *ring)
303 1.1 martin {
304 1.1 martin struct dwc_gmac_rx_data *data;
305 1.1 martin bus_addr_t physaddr;
306 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
307 1.1 martin int error, i, next;
308 1.1 martin
309 1.1 martin ring->r_cur = ring->r_next = 0;
310 1.1 martin memset(ring->r_desc, 0, descsize);
311 1.1 martin
312 1.1 martin /*
313 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
314 1.1 martin */
315 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
316 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
317 1.1 martin
318 1.1 martin data = &sc->sc_rxq.r_data[i];
319 1.1 martin
320 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
321 1.1 martin if (data->rd_m == NULL) {
322 1.1 martin aprint_error_dev(sc->sc_dev,
323 1.1 martin "could not allocate rx mbuf #%d\n", i);
324 1.1 martin error = ENOMEM;
325 1.1 martin goto fail;
326 1.1 martin }
327 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
328 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
329 1.1 martin if (error != 0) {
330 1.1 martin aprint_error_dev(sc->sc_dev,
331 1.1 martin "could not create DMA map\n");
332 1.1 martin data->rd_map = NULL;
333 1.1 martin goto fail;
334 1.1 martin }
335 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
336 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
337 1.1 martin aprint_error_dev(sc->sc_dev,
338 1.1 martin "could not allocate mbuf cluster #%d\n", i);
339 1.1 martin error = ENOMEM;
340 1.1 martin goto fail;
341 1.1 martin }
342 1.1 martin
343 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
344 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
345 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
346 1.1 martin if (error != 0) {
347 1.1 martin aprint_error_dev(sc->sc_dev,
348 1.1 martin "could not load rx buf DMA map #%d", i);
349 1.1 martin goto fail;
350 1.1 martin }
351 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
352 1.1 martin
353 1.1 martin desc = &sc->sc_rxq.r_desc[i];
354 1.1 martin desc->ddesc_data = htole32(physaddr);
355 1.6 martin next = (i+1) & AWGE_RX_RING_COUNT;
356 1.1 martin desc->ddesc_next = htole32(ring->r_physaddr
357 1.1 martin + next * sizeof(*desc));
358 1.1 martin desc->ddesc_cntl = htole32(
359 1.6 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
360 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
361 1.1 martin }
362 1.1 martin
363 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
364 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
365 1.1 martin BUS_DMASYNC_PREREAD);
366 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
367 1.6 martin ring->r_physaddr);
368 1.1 martin
369 1.1 martin return 0;
370 1.1 martin
371 1.1 martin fail:
372 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
373 1.1 martin return error;
374 1.1 martin }
375 1.1 martin
376 1.1 martin static void
377 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
378 1.1 martin struct dwc_gmac_rx_ring *ring)
379 1.1 martin {
380 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
381 1.1 martin int i;
382 1.1 martin
383 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
384 1.1 martin desc = &sc->sc_rxq.r_desc[i];
385 1.1 martin desc->ddesc_cntl = htole32(
386 1.6 martin __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK));
387 1.1 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
388 1.1 martin }
389 1.1 martin
390 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
391 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
392 1.1 martin BUS_DMASYNC_PREWRITE);
393 1.1 martin
394 1.1 martin ring->r_cur = ring->r_next = 0;
395 1.1 martin }
396 1.1 martin
397 1.1 martin static int
398 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
399 1.1 martin {
400 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
401 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
402 1.1 martin int error, nsegs;
403 1.1 martin void *rings;
404 1.1 martin
405 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
406 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
407 1.1 martin if (error != 0) {
408 1.1 martin aprint_error_dev(sc->sc_dev,
409 1.1 martin "could not create desc DMA map\n");
410 1.1 martin sc->sc_dma_ring_map = NULL;
411 1.1 martin goto fail;
412 1.1 martin }
413 1.1 martin
414 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
415 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
416 1.1 martin if (error != 0) {
417 1.1 martin aprint_error_dev(sc->sc_dev,
418 1.1 martin "could not map DMA memory\n");
419 1.1 martin goto fail;
420 1.1 martin }
421 1.1 martin
422 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
423 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
424 1.1 martin if (error != 0) {
425 1.1 martin aprint_error_dev(sc->sc_dev,
426 1.1 martin "could not allocate DMA memory\n");
427 1.1 martin goto fail;
428 1.1 martin }
429 1.1 martin
430 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
431 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
432 1.1 martin if (error != 0) {
433 1.1 martin aprint_error_dev(sc->sc_dev,
434 1.1 martin "could not load desc DMA map\n");
435 1.1 martin goto fail;
436 1.1 martin }
437 1.1 martin
438 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
439 1.1 martin sc->sc_rxq.r_desc = rings;
440 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
441 1.1 martin
442 1.1 martin /* and next rings to the TX side */
443 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
444 1.1 martin sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
445 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
446 1.1 martin
447 1.1 martin return 0;
448 1.1 martin
449 1.1 martin fail:
450 1.1 martin dwc_gmac_free_dma_rings(sc);
451 1.1 martin return error;
452 1.1 martin }
453 1.1 martin
454 1.1 martin static void
455 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
456 1.1 martin {
457 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
458 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
459 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
460 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
461 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
462 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
463 1.1 martin }
464 1.1 martin
465 1.1 martin static void
466 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
467 1.1 martin {
468 1.1 martin struct dwc_gmac_rx_data *data;
469 1.1 martin int i;
470 1.1 martin
471 1.1 martin if (ring->r_desc == NULL)
472 1.1 martin return;
473 1.1 martin
474 1.1 martin
475 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
476 1.1 martin data = &ring->r_data[i];
477 1.1 martin
478 1.1 martin if (data->rd_map != NULL) {
479 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
480 1.1 martin AWGE_RX_RING_COUNT
481 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
482 1.1 martin BUS_DMASYNC_POSTREAD);
483 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
484 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
485 1.1 martin }
486 1.1 martin if (data->rd_m != NULL)
487 1.1 martin m_freem(data->rd_m);
488 1.1 martin }
489 1.1 martin }
490 1.1 martin
491 1.1 martin static int
492 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
493 1.1 martin struct dwc_gmac_tx_ring *ring)
494 1.1 martin {
495 1.1 martin int i, error = 0;
496 1.1 martin
497 1.1 martin ring->t_queued = 0;
498 1.1 martin ring->t_cur = ring->t_next = 0;
499 1.1 martin
500 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
501 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
502 1.1 martin TX_DESC_OFFSET(0),
503 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
504 1.1 martin BUS_DMASYNC_POSTWRITE);
505 1.1 martin
506 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
507 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
508 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
509 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
510 1.1 martin &ring->t_data[i].td_map);
511 1.1 martin if (error != 0) {
512 1.1 martin aprint_error_dev(sc->sc_dev,
513 1.1 martin "could not create TX DMA map #%d\n", i);
514 1.1 martin ring->t_data[i].td_map = NULL;
515 1.1 martin goto fail;
516 1.1 martin }
517 1.1 martin ring->t_desc[i].ddesc_next = htole32(
518 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
519 1.1 martin *((i+1)&AWGE_TX_RING_COUNT));
520 1.1 martin }
521 1.1 martin
522 1.1 martin return 0;
523 1.1 martin
524 1.1 martin fail:
525 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
526 1.1 martin return error;
527 1.1 martin }
528 1.1 martin
529 1.1 martin static void
530 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
531 1.1 martin {
532 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
533 1.1 martin if (end > start) {
534 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
535 1.1 martin TX_DESC_OFFSET(start),
536 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
537 1.1 martin ops);
538 1.1 martin return;
539 1.1 martin }
540 1.1 martin /* sync from 'start' to end of ring */
541 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
542 1.1 martin TX_DESC_OFFSET(start),
543 1.1 martin TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
544 1.1 martin ops);
545 1.1 martin /* sync from start of ring to 'end' */
546 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
547 1.1 martin TX_DESC_OFFSET(0),
548 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
549 1.1 martin ops);
550 1.1 martin }
551 1.1 martin
552 1.1 martin static void
553 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
554 1.1 martin struct dwc_gmac_tx_ring *ring)
555 1.1 martin {
556 1.1 martin int i;
557 1.1 martin
558 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
559 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
560 1.1 martin
561 1.1 martin if (data->td_m != NULL) {
562 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
563 1.1 martin 0, data->td_active->dm_mapsize,
564 1.1 martin BUS_DMASYNC_POSTWRITE);
565 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
566 1.1 martin m_freem(data->td_m);
567 1.1 martin data->td_m = NULL;
568 1.1 martin }
569 1.1 martin }
570 1.1 martin
571 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
572 1.1 martin TX_DESC_OFFSET(0),
573 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
574 1.1 martin BUS_DMASYNC_PREWRITE);
575 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
576 1.6 martin sc->sc_txq.t_physaddr);
577 1.1 martin
578 1.1 martin ring->t_queued = 0;
579 1.1 martin ring->t_cur = ring->t_next = 0;
580 1.1 martin }
581 1.1 martin
582 1.1 martin static void
583 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
584 1.1 martin struct dwc_gmac_tx_ring *ring)
585 1.1 martin {
586 1.1 martin int i;
587 1.1 martin
588 1.1 martin /* unload the maps */
589 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
590 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
591 1.1 martin
592 1.1 martin if (data->td_m != NULL) {
593 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
594 1.1 martin 0, data->td_map->dm_mapsize,
595 1.1 martin BUS_DMASYNC_POSTWRITE);
596 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
597 1.1 martin m_freem(data->td_m);
598 1.1 martin data->td_m = NULL;
599 1.1 martin }
600 1.1 martin }
601 1.1 martin
602 1.1 martin /* and actually free them */
603 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
604 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
605 1.1 martin
606 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
607 1.1 martin }
608 1.1 martin }
609 1.1 martin
610 1.1 martin static void
611 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
612 1.1 martin {
613 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
614 1.1 martin struct mii_data * const mii = &sc->sc_mii;
615 1.1 martin
616 1.1 martin /*
617 1.1 martin * Set MII or GMII interface based on the speed
618 1.1 martin * negotiated by the PHY.
619 1.1 martin */
620 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
621 1.1 martin case IFM_10_T:
622 1.1 martin case IFM_100_TX:
623 1.1 martin /* XXX */
624 1.1 martin break;
625 1.1 martin case IFM_1000_T:
626 1.1 martin /* XXX */
627 1.1 martin break;
628 1.1 martin }
629 1.1 martin }
630 1.1 martin
631 1.1 martin static int
632 1.1 martin dwc_gmac_init(struct ifnet *ifp)
633 1.1 martin {
634 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
635 1.1 martin
636 1.1 martin if (ifp->if_flags & IFF_RUNNING)
637 1.1 martin return 0;
638 1.1 martin
639 1.1 martin dwc_gmac_stop(ifp, 0);
640 1.1 martin
641 1.1 martin /*
642 1.6 martin * Set up dma pointer for RX and TX ring
643 1.1 martin */
644 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
645 1.6 martin sc->sc_rxq.r_physaddr);
646 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
647 1.6 martin sc->sc_txq.t_physaddr);
648 1.6 martin
649 1.6 martin /*
650 1.6 martin * Start RX part
651 1.6 martin */
652 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
653 1.6 martin AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART);
654 1.1 martin
655 1.1 martin ifp->if_flags |= IFF_RUNNING;
656 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
657 1.1 martin
658 1.1 martin return 0;
659 1.1 martin }
660 1.1 martin
661 1.1 martin static void
662 1.1 martin dwc_gmac_start(struct ifnet *ifp)
663 1.1 martin {
664 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
665 1.1 martin int old = sc->sc_txq.t_queued;
666 1.1 martin struct mbuf *m0;
667 1.1 martin
668 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
669 1.1 martin return;
670 1.1 martin
671 1.1 martin for (;;) {
672 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
673 1.1 martin if (m0 == NULL)
674 1.1 martin break;
675 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
676 1.1 martin ifp->if_flags |= IFF_OACTIVE;
677 1.1 martin break;
678 1.1 martin }
679 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
680 1.1 martin bpf_mtap(ifp, m0);
681 1.1 martin }
682 1.1 martin
683 1.1 martin if (sc->sc_txq.t_queued != old) {
684 1.1 martin /* packets have been queued, kick it off */
685 1.1 martin dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
686 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
687 1.6 martin
688 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
689 1.6 martin AWIN_GMAC_DMA_OPMODE,
690 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
691 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
692 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
693 1.6 martin AWIN_GMAC_DMA_OPMODE,
694 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
695 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_TXSTART);
696 1.1 martin }
697 1.1 martin }
698 1.1 martin
699 1.1 martin static void
700 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
701 1.1 martin {
702 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
703 1.1 martin
704 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
705 1.6 martin AWIN_GMAC_DMA_OPMODE,
706 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
707 1.6 martin AWIN_GMAC_DMA_OPMODE)
708 1.6 martin & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
709 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
710 1.6 martin AWIN_GMAC_DMA_OPMODE,
711 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
712 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
713 1.6 martin
714 1.1 martin mii_down(&sc->sc_mii);
715 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
716 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
717 1.1 martin }
718 1.1 martin
719 1.1 martin /*
720 1.1 martin * Add m0 to the TX ring
721 1.1 martin */
722 1.1 martin static int
723 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
724 1.1 martin {
725 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
726 1.1 martin struct dwc_gmac_tx_data *data = NULL;
727 1.1 martin bus_dmamap_t map;
728 1.6 martin uint32_t flags, len;
729 1.1 martin int error, i, first;
730 1.1 martin
731 1.1 martin first = sc->sc_txq.t_cur;
732 1.1 martin map = sc->sc_txq.t_data[first].td_map;
733 1.1 martin flags = 0;
734 1.1 martin
735 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
736 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
737 1.1 martin if (error != 0) {
738 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
739 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
740 1.1 martin return error;
741 1.1 martin }
742 1.1 martin
743 1.1 martin if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
744 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
745 1.1 martin return ENOBUFS;
746 1.1 martin }
747 1.1 martin
748 1.1 martin data = NULL;
749 1.6 martin flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXINT|DDESC_CNTL_TXCHAIN;
750 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
751 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
752 1.1 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
753 1.1 martin
754 1.1 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
755 1.6 martin len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
756 1.6 martin if (i == map->dm_nsegs-1)
757 1.6 martin flags |= DDESC_CNTL_TXLAST;
758 1.6 martin desc->ddesc_cntl = htole32(len|flags);
759 1.6 martin flags &= ~DDESC_CNTL_TXFIRST;
760 1.1 martin
761 1.1 martin /*
762 1.1 martin * Defer passing ownership of the first descriptor
763 1.1 martin * untill we are done.
764 1.1 martin */
765 1.6 martin if (i)
766 1.6 martin desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
767 1.6 martin sc->sc_txq.t_queued++;
768 1.1 martin
769 1.1 martin sc->sc_txq.t_cur = (sc->sc_txq.t_cur + 1)
770 1.1 martin & (AWGE_TX_RING_COUNT-1);
771 1.1 martin }
772 1.1 martin
773 1.6 martin /* Pass first to device */
774 1.6 martin sc->sc_txq.t_desc[first].ddesc_status
775 1.6 martin = htole32(DDESC_STATUS_OWNEDBYDEV);
776 1.1 martin
777 1.1 martin data->td_m = m0;
778 1.1 martin data->td_active = map;
779 1.1 martin
780 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
781 1.1 martin BUS_DMASYNC_PREWRITE);
782 1.1 martin
783 1.1 martin return 0;
784 1.1 martin }
785 1.1 martin
786 1.1 martin static int
787 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
788 1.1 martin {
789 1.1 martin // struct dwc_gmac_softc *sc = ifp->if_softc;
790 1.1 martin struct ifaddr *ifa = (struct ifaddr *)data;
791 1.1 martin int s, error = 0;
792 1.1 martin
793 1.1 martin s = splnet();
794 1.1 martin
795 1.1 martin switch (cmd) {
796 1.1 martin case SIOCINITIFADDR:
797 1.1 martin ifp->if_flags |= IFF_UP;
798 1.1 martin dwc_gmac_init(ifp);
799 1.1 martin switch (ifa->ifa_addr->sa_family) {
800 1.1 martin #ifdef INET
801 1.1 martin case AF_INET:
802 1.1 martin arp_ifinit(ifp, ifa);
803 1.1 martin break;
804 1.1 martin #endif
805 1.1 martin default:
806 1.1 martin break;
807 1.1 martin }
808 1.1 martin default:
809 1.1 martin if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
810 1.1 martin break;
811 1.1 martin error = 0;
812 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
813 1.1 martin ;
814 1.1 martin else if (ifp->if_flags & IFF_RUNNING)
815 1.1 martin /* setmulti */;
816 1.1 martin break;
817 1.1 martin }
818 1.1 martin
819 1.1 martin splx(s);
820 1.1 martin
821 1.1 martin return error;
822 1.1 martin }
823 1.1 martin
824 1.1 martin int
825 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
826 1.1 martin {
827 1.1 martin uint32_t status, dma_status;
828 1.1 martin
829 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
830 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
831 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
832 1.1 martin AWIN_GMAC_MII_STATUS);
833 1.2 martin mii_pollstat(&sc->sc_mii);
834 1.2 martin }
835 1.1 martin
836 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
837 1.1 martin AWIN_GMAC_DMA_STATUS);
838 1.1 martin
839 1.1 martin printf("%s: INTR status: %08x, DMA status: %08x\n", device_xname(sc->sc_dev),
840 1.1 martin status, dma_status);
841 1.1 martin
842 1.1 martin static size_t cnt = 0;
843 1.1 martin if (++cnt > 20)
844 1.1 martin panic("enough now");
845 1.1 martin
846 1.1 martin return 1;
847 1.1 martin }
848