dwc_gmac.c revision 1.24.2.4 1 1.24.2.4 martin /* $NetBSD: dwc_gmac.c,v 1.24.2.4 2014/12/09 19:17:50 martin Exp $ */
2 1.24.2.2 snj
3 1.24.2.2 snj /*-
4 1.24.2.2 snj * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 1.24.2.2 snj * All rights reserved.
6 1.24.2.2 snj *
7 1.24.2.2 snj * This code is derived from software contributed to The NetBSD Foundation
8 1.24.2.2 snj * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 1.24.2.2 snj *
10 1.24.2.2 snj * Redistribution and use in source and binary forms, with or without
11 1.24.2.2 snj * modification, are permitted provided that the following conditions
12 1.24.2.2 snj * are met:
13 1.24.2.2 snj * 1. Redistributions of source code must retain the above copyright
14 1.24.2.2 snj * notice, this list of conditions and the following disclaimer.
15 1.24.2.2 snj * 2. Redistributions in binary form must reproduce the above copyright
16 1.24.2.2 snj * notice, this list of conditions and the following disclaimer in the
17 1.24.2.2 snj * documentation and/or other materials provided with the distribution.
18 1.24.2.2 snj *
19 1.24.2.2 snj * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.24.2.2 snj * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.24.2.2 snj * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.24.2.2 snj * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.24.2.2 snj * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.24.2.2 snj * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.24.2.2 snj * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.24.2.2 snj * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.24.2.2 snj * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.24.2.2 snj * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.24.2.2 snj * POSSIBILITY OF SUCH DAMAGE.
30 1.24.2.2 snj */
31 1.24.2.2 snj
32 1.24.2.2 snj /*
33 1.24.2.2 snj * This driver supports the Synopsis Designware GMAC core, as found
34 1.24.2.2 snj * on Allwinner A20 cores and others.
35 1.24.2.2 snj *
36 1.24.2.2 snj * Real documentation seems to not be available, the marketing product
37 1.24.2.2 snj * documents could be found here:
38 1.24.2.2 snj *
39 1.24.2.2 snj * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 1.24.2.2 snj */
41 1.24.2.2 snj
42 1.24.2.2 snj #include <sys/cdefs.h>
43 1.24.2.2 snj
44 1.24.2.4 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.24.2.4 2014/12/09 19:17:50 martin Exp $");
45 1.24.2.2 snj
46 1.24.2.2 snj /* #define DWC_GMAC_DEBUG 1 */
47 1.24.2.2 snj
48 1.24.2.2 snj #include "opt_inet.h"
49 1.24.2.2 snj
50 1.24.2.2 snj #include <sys/param.h>
51 1.24.2.2 snj #include <sys/bus.h>
52 1.24.2.2 snj #include <sys/device.h>
53 1.24.2.2 snj #include <sys/intr.h>
54 1.24.2.2 snj #include <sys/systm.h>
55 1.24.2.2 snj #include <sys/sockio.h>
56 1.24.2.4 martin #include <sys/cprng.h>
57 1.24.2.2 snj
58 1.24.2.2 snj #include <net/if.h>
59 1.24.2.2 snj #include <net/if_ether.h>
60 1.24.2.2 snj #include <net/if_media.h>
61 1.24.2.2 snj #include <net/bpf.h>
62 1.24.2.2 snj #ifdef INET
63 1.24.2.2 snj #include <netinet/if_inarp.h>
64 1.24.2.2 snj #endif
65 1.24.2.2 snj
66 1.24.2.2 snj #include <dev/mii/miivar.h>
67 1.24.2.2 snj
68 1.24.2.2 snj #include <dev/ic/dwc_gmac_reg.h>
69 1.24.2.2 snj #include <dev/ic/dwc_gmac_var.h>
70 1.24.2.2 snj
71 1.24.2.2 snj static int dwc_gmac_miibus_read_reg(device_t, int, int);
72 1.24.2.2 snj static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
73 1.24.2.2 snj static void dwc_gmac_miibus_statchg(struct ifnet *);
74 1.24.2.2 snj
75 1.24.2.2 snj static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
76 1.24.2.2 snj static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
77 1.24.2.2 snj uint8_t enaddr[ETHER_ADDR_LEN]);
78 1.24.2.2 snj static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
79 1.24.2.2 snj static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
80 1.24.2.2 snj static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 1.24.2.2 snj static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 1.24.2.2 snj static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
83 1.24.2.2 snj static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 1.24.2.2 snj static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 1.24.2.2 snj static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
86 1.24.2.2 snj static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
87 1.24.2.2 snj static int dwc_gmac_init(struct ifnet *ifp);
88 1.24.2.2 snj static void dwc_gmac_stop(struct ifnet *ifp, int disable);
89 1.24.2.2 snj static void dwc_gmac_start(struct ifnet *ifp);
90 1.24.2.2 snj static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
91 1.24.2.2 snj static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
92 1.24.2.2 snj static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
93 1.24.2.2 snj static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
94 1.24.2.2 snj static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
95 1.24.2.2 snj static int dwc_gmac_ifflags_cb(struct ethercom *);
96 1.24.2.2 snj static uint32_t bitrev32(uint32_t x);
97 1.24.2.2 snj
98 1.24.2.2 snj #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
99 1.24.2.2 snj *sizeof(struct dwc_gmac_dev_dmadesc))
100 1.24.2.2 snj #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
101 1.24.2.2 snj
102 1.24.2.2 snj #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
103 1.24.2.2 snj #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
104 1.24.2.2 snj
105 1.24.2.2 snj
106 1.24.2.2 snj
107 1.24.2.2 snj #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
108 1.24.2.2 snj GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
109 1.24.2.2 snj GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
110 1.24.2.2 snj
111 1.24.2.2 snj #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
112 1.24.2.2 snj GMAC_DMA_INT_FBE| \
113 1.24.2.2 snj GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
114 1.24.2.2 snj GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
115 1.24.2.2 snj GMAC_DMA_INT_TJE)
116 1.24.2.2 snj
117 1.24.2.2 snj #define AWIN_DEF_MAC_INTRMASK \
118 1.24.2.2 snj (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
119 1.24.2.2 snj AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
120 1.24.2.2 snj
121 1.24.2.2 snj
122 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
123 1.24.2.2 snj static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
124 1.24.2.2 snj static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
125 1.24.2.2 snj static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
126 1.24.2.2 snj static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
127 1.24.2.2 snj static void dwc_dump_status(struct dwc_gmac_softc *sc);
128 1.24.2.2 snj static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
129 1.24.2.2 snj #endif
130 1.24.2.2 snj
131 1.24.2.2 snj void
132 1.24.2.2 snj dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
133 1.24.2.2 snj {
134 1.24.2.2 snj uint8_t enaddr[ETHER_ADDR_LEN];
135 1.24.2.2 snj uint32_t maclo, machi;
136 1.24.2.2 snj struct mii_data * const mii = &sc->sc_mii;
137 1.24.2.2 snj struct ifnet * const ifp = &sc->sc_ec.ec_if;
138 1.24.2.2 snj prop_dictionary_t dict;
139 1.24.2.2 snj int s;
140 1.24.2.2 snj
141 1.24.2.2 snj mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
142 1.24.2.2 snj sc->sc_mii_clk = mii_clk & 7;
143 1.24.2.2 snj
144 1.24.2.2 snj dict = device_properties(sc->sc_dev);
145 1.24.2.2 snj prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
146 1.24.2.2 snj if (ea != NULL) {
147 1.24.2.2 snj /*
148 1.24.2.2 snj * If the MAC address is overriden by a device property,
149 1.24.2.2 snj * use that.
150 1.24.2.2 snj */
151 1.24.2.2 snj KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
152 1.24.2.2 snj KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
153 1.24.2.2 snj memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
154 1.24.2.2 snj } else {
155 1.24.2.2 snj /*
156 1.24.2.2 snj * If we did not get an externaly configure address,
157 1.24.2.2 snj * try to read one from the current filter setup,
158 1.24.2.2 snj * before resetting the chip.
159 1.24.2.2 snj */
160 1.24.2.2 snj maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
161 1.24.2.2 snj AWIN_GMAC_MAC_ADDR0LO);
162 1.24.2.2 snj machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
163 1.24.2.2 snj AWIN_GMAC_MAC_ADDR0HI);
164 1.24.2.2 snj
165 1.24.2.2 snj if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
166 1.24.2.4 martin /* fake MAC address */
167 1.24.2.4 martin maclo = 0x00f2 | (cprng_strong32() << 16);
168 1.24.2.4 martin machi = cprng_strong32();
169 1.24.2.2 snj }
170 1.24.2.2 snj
171 1.24.2.2 snj enaddr[0] = maclo & 0x0ff;
172 1.24.2.2 snj enaddr[1] = (maclo >> 8) & 0x0ff;
173 1.24.2.2 snj enaddr[2] = (maclo >> 16) & 0x0ff;
174 1.24.2.2 snj enaddr[3] = (maclo >> 24) & 0x0ff;
175 1.24.2.2 snj enaddr[4] = machi & 0x0ff;
176 1.24.2.2 snj enaddr[5] = (machi >> 8) & 0x0ff;
177 1.24.2.2 snj }
178 1.24.2.2 snj
179 1.24.2.2 snj /*
180 1.24.2.2 snj * Init chip and do initial setup
181 1.24.2.2 snj */
182 1.24.2.2 snj if (dwc_gmac_reset(sc) != 0)
183 1.24.2.2 snj return; /* not much to cleanup, haven't attached yet */
184 1.24.2.2 snj dwc_gmac_write_hwaddr(sc, enaddr);
185 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
186 1.24.2.2 snj ether_sprintf(enaddr));
187 1.24.2.2 snj
188 1.24.2.2 snj /*
189 1.24.2.2 snj * Allocate Tx and Rx rings
190 1.24.2.2 snj */
191 1.24.2.2 snj if (dwc_gmac_alloc_dma_rings(sc) != 0) {
192 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
193 1.24.2.2 snj goto fail;
194 1.24.2.2 snj }
195 1.24.2.2 snj
196 1.24.2.2 snj if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
197 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
198 1.24.2.2 snj goto fail;
199 1.24.2.2 snj }
200 1.24.2.2 snj
201 1.24.2.2 snj mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
202 1.24.2.2 snj if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
203 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
204 1.24.2.2 snj goto fail;
205 1.24.2.2 snj }
206 1.24.2.2 snj
207 1.24.2.2 snj /*
208 1.24.2.2 snj * Prepare interface data
209 1.24.2.2 snj */
210 1.24.2.2 snj ifp->if_softc = sc;
211 1.24.2.2 snj strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
212 1.24.2.2 snj ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
213 1.24.2.2 snj ifp->if_ioctl = dwc_gmac_ioctl;
214 1.24.2.2 snj ifp->if_start = dwc_gmac_start;
215 1.24.2.2 snj ifp->if_init = dwc_gmac_init;
216 1.24.2.2 snj ifp->if_stop = dwc_gmac_stop;
217 1.24.2.2 snj IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
218 1.24.2.2 snj IFQ_SET_READY(&ifp->if_snd);
219 1.24.2.2 snj
220 1.24.2.2 snj /*
221 1.24.2.2 snj * Attach MII subdevices
222 1.24.2.2 snj */
223 1.24.2.2 snj sc->sc_ec.ec_mii = &sc->sc_mii;
224 1.24.2.2 snj ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
225 1.24.2.2 snj mii->mii_ifp = ifp;
226 1.24.2.2 snj mii->mii_readreg = dwc_gmac_miibus_read_reg;
227 1.24.2.2 snj mii->mii_writereg = dwc_gmac_miibus_write_reg;
228 1.24.2.2 snj mii->mii_statchg = dwc_gmac_miibus_statchg;
229 1.24.2.2 snj mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
230 1.24.2.2 snj
231 1.24.2.2 snj if (LIST_EMPTY(&mii->mii_phys)) {
232 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "no PHY found!\n");
233 1.24.2.2 snj ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
234 1.24.2.2 snj ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
235 1.24.2.2 snj } else {
236 1.24.2.2 snj ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
237 1.24.2.2 snj }
238 1.24.2.2 snj
239 1.24.2.2 snj /*
240 1.24.2.2 snj * Ready, attach interface
241 1.24.2.2 snj */
242 1.24.2.2 snj if_attach(ifp);
243 1.24.2.2 snj ether_ifattach(ifp, enaddr);
244 1.24.2.2 snj ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
245 1.24.2.2 snj
246 1.24.2.2 snj /*
247 1.24.2.2 snj * Enable interrupts
248 1.24.2.2 snj */
249 1.24.2.2 snj s = splnet();
250 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
251 1.24.2.2 snj AWIN_DEF_MAC_INTRMASK);
252 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
253 1.24.2.2 snj GMAC_DEF_DMA_INT_MASK);
254 1.24.2.2 snj splx(s);
255 1.24.2.2 snj
256 1.24.2.2 snj return;
257 1.24.2.2 snj
258 1.24.2.2 snj fail:
259 1.24.2.2 snj dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
260 1.24.2.2 snj dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
261 1.24.2.2 snj }
262 1.24.2.2 snj
263 1.24.2.2 snj
264 1.24.2.2 snj
265 1.24.2.2 snj static int
266 1.24.2.2 snj dwc_gmac_reset(struct dwc_gmac_softc *sc)
267 1.24.2.2 snj {
268 1.24.2.2 snj size_t cnt;
269 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
270 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
271 1.24.2.2 snj for (cnt = 0; cnt < 3000; cnt++) {
272 1.24.2.2 snj if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
273 1.24.2.2 snj & GMAC_BUSMODE_RESET) == 0)
274 1.24.2.2 snj return 0;
275 1.24.2.2 snj delay(10);
276 1.24.2.2 snj }
277 1.24.2.2 snj
278 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "reset timed out\n");
279 1.24.2.2 snj return EIO;
280 1.24.2.2 snj }
281 1.24.2.2 snj
282 1.24.2.2 snj static void
283 1.24.2.2 snj dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
284 1.24.2.2 snj uint8_t enaddr[ETHER_ADDR_LEN])
285 1.24.2.2 snj {
286 1.24.2.2 snj uint32_t lo, hi;
287 1.24.2.2 snj
288 1.24.2.2 snj lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
289 1.24.2.2 snj | (enaddr[3] << 24);
290 1.24.2.2 snj hi = enaddr[4] | (enaddr[5] << 8);
291 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
292 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
293 1.24.2.2 snj }
294 1.24.2.2 snj
295 1.24.2.2 snj static int
296 1.24.2.2 snj dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
297 1.24.2.2 snj {
298 1.24.2.2 snj struct dwc_gmac_softc * const sc = device_private(self);
299 1.24.2.2 snj uint16_t mii;
300 1.24.2.2 snj size_t cnt;
301 1.24.2.2 snj int rv = 0;
302 1.24.2.2 snj
303 1.24.2.2 snj mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
304 1.24.2.2 snj | __SHIFTIN(reg,GMAC_MII_REG_MASK)
305 1.24.2.2 snj | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
306 1.24.2.2 snj | GMAC_MII_BUSY;
307 1.24.2.2 snj
308 1.24.2.2 snj mutex_enter(&sc->sc_mdio_lock);
309 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
310 1.24.2.2 snj
311 1.24.2.2 snj for (cnt = 0; cnt < 1000; cnt++) {
312 1.24.2.2 snj if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
313 1.24.2.2 snj AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
314 1.24.2.2 snj rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
315 1.24.2.2 snj AWIN_GMAC_MAC_MIIDATA);
316 1.24.2.2 snj break;
317 1.24.2.2 snj }
318 1.24.2.2 snj delay(10);
319 1.24.2.2 snj }
320 1.24.2.2 snj
321 1.24.2.2 snj mutex_exit(&sc->sc_mdio_lock);
322 1.24.2.2 snj
323 1.24.2.2 snj return rv;
324 1.24.2.2 snj }
325 1.24.2.2 snj
326 1.24.2.2 snj static void
327 1.24.2.2 snj dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
328 1.24.2.2 snj {
329 1.24.2.2 snj struct dwc_gmac_softc * const sc = device_private(self);
330 1.24.2.2 snj uint16_t mii;
331 1.24.2.2 snj size_t cnt;
332 1.24.2.2 snj
333 1.24.2.2 snj mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
334 1.24.2.2 snj | __SHIFTIN(reg,GMAC_MII_REG_MASK)
335 1.24.2.2 snj | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
336 1.24.2.2 snj | GMAC_MII_BUSY | GMAC_MII_WRITE;
337 1.24.2.2 snj
338 1.24.2.2 snj mutex_enter(&sc->sc_mdio_lock);
339 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
340 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
341 1.24.2.2 snj
342 1.24.2.2 snj for (cnt = 0; cnt < 1000; cnt++) {
343 1.24.2.2 snj if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
344 1.24.2.2 snj AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
345 1.24.2.2 snj break;
346 1.24.2.2 snj delay(10);
347 1.24.2.2 snj }
348 1.24.2.2 snj
349 1.24.2.2 snj mutex_exit(&sc->sc_mdio_lock);
350 1.24.2.2 snj }
351 1.24.2.2 snj
352 1.24.2.2 snj static int
353 1.24.2.2 snj dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
354 1.24.2.2 snj struct dwc_gmac_rx_ring *ring)
355 1.24.2.2 snj {
356 1.24.2.2 snj struct dwc_gmac_rx_data *data;
357 1.24.2.2 snj bus_addr_t physaddr;
358 1.24.2.2 snj const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
359 1.24.2.2 snj int error, i, next;
360 1.24.2.2 snj
361 1.24.2.2 snj ring->r_cur = ring->r_next = 0;
362 1.24.2.2 snj memset(ring->r_desc, 0, descsize);
363 1.24.2.2 snj
364 1.24.2.2 snj /*
365 1.24.2.2 snj * Pre-allocate Rx buffers and populate Rx ring.
366 1.24.2.2 snj */
367 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
368 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
369 1.24.2.2 snj
370 1.24.2.2 snj data = &sc->sc_rxq.r_data[i];
371 1.24.2.2 snj
372 1.24.2.2 snj MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
373 1.24.2.2 snj if (data->rd_m == NULL) {
374 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
375 1.24.2.2 snj "could not allocate rx mbuf #%d\n", i);
376 1.24.2.2 snj error = ENOMEM;
377 1.24.2.2 snj goto fail;
378 1.24.2.2 snj }
379 1.24.2.2 snj error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
380 1.24.2.2 snj MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
381 1.24.2.2 snj if (error != 0) {
382 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
383 1.24.2.2 snj "could not create DMA map\n");
384 1.24.2.2 snj data->rd_map = NULL;
385 1.24.2.2 snj goto fail;
386 1.24.2.2 snj }
387 1.24.2.2 snj MCLGET(data->rd_m, M_DONTWAIT);
388 1.24.2.2 snj if (!(data->rd_m->m_flags & M_EXT)) {
389 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
390 1.24.2.2 snj "could not allocate mbuf cluster #%d\n", i);
391 1.24.2.2 snj error = ENOMEM;
392 1.24.2.2 snj goto fail;
393 1.24.2.2 snj }
394 1.24.2.2 snj
395 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
396 1.24.2.2 snj mtod(data->rd_m, void *), MCLBYTES, NULL,
397 1.24.2.2 snj BUS_DMA_READ | BUS_DMA_NOWAIT);
398 1.24.2.2 snj if (error != 0) {
399 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
400 1.24.2.2 snj "could not load rx buf DMA map #%d", i);
401 1.24.2.2 snj goto fail;
402 1.24.2.2 snj }
403 1.24.2.2 snj physaddr = data->rd_map->dm_segs[0].ds_addr;
404 1.24.2.2 snj
405 1.24.2.2 snj desc = &sc->sc_rxq.r_desc[i];
406 1.24.2.2 snj desc->ddesc_data = htole32(physaddr);
407 1.24.2.2 snj next = RX_NEXT(i);
408 1.24.2.2 snj desc->ddesc_next = htole32(ring->r_physaddr
409 1.24.2.2 snj + next * sizeof(*desc));
410 1.24.2.2 snj desc->ddesc_cntl = htole32(
411 1.24.2.2 snj __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
412 1.24.2.2 snj DDESC_CNTL_RXCHAIN);
413 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
414 1.24.2.2 snj }
415 1.24.2.2 snj
416 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
417 1.24.2.2 snj AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
418 1.24.2.3 snj BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
419 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
420 1.24.2.2 snj ring->r_physaddr);
421 1.24.2.2 snj
422 1.24.2.2 snj return 0;
423 1.24.2.2 snj
424 1.24.2.2 snj fail:
425 1.24.2.2 snj dwc_gmac_free_rx_ring(sc, ring);
426 1.24.2.2 snj return error;
427 1.24.2.2 snj }
428 1.24.2.2 snj
429 1.24.2.2 snj static void
430 1.24.2.2 snj dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
431 1.24.2.2 snj struct dwc_gmac_rx_ring *ring)
432 1.24.2.2 snj {
433 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
434 1.24.2.2 snj int i;
435 1.24.2.2 snj
436 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
437 1.24.2.2 snj desc = &sc->sc_rxq.r_desc[i];
438 1.24.2.2 snj desc->ddesc_cntl = htole32(
439 1.24.2.2 snj __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
440 1.24.2.2 snj DDESC_CNTL_RXCHAIN);
441 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
442 1.24.2.2 snj }
443 1.24.2.2 snj
444 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
445 1.24.2.2 snj AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
446 1.24.2.3 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
447 1.24.2.2 snj
448 1.24.2.2 snj ring->r_cur = ring->r_next = 0;
449 1.24.2.2 snj /* reset DMA address to start of ring */
450 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
451 1.24.2.2 snj sc->sc_rxq.r_physaddr);
452 1.24.2.2 snj }
453 1.24.2.2 snj
454 1.24.2.2 snj static int
455 1.24.2.2 snj dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
456 1.24.2.2 snj {
457 1.24.2.2 snj const size_t descsize = AWGE_TOTAL_RING_COUNT *
458 1.24.2.2 snj sizeof(struct dwc_gmac_dev_dmadesc);
459 1.24.2.2 snj int error, nsegs;
460 1.24.2.2 snj void *rings;
461 1.24.2.2 snj
462 1.24.2.2 snj error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
463 1.24.2.2 snj BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
464 1.24.2.2 snj if (error != 0) {
465 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
466 1.24.2.2 snj "could not create desc DMA map\n");
467 1.24.2.2 snj sc->sc_dma_ring_map = NULL;
468 1.24.2.2 snj goto fail;
469 1.24.2.2 snj }
470 1.24.2.2 snj
471 1.24.2.2 snj error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
472 1.24.2.2 snj &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
473 1.24.2.2 snj if (error != 0) {
474 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
475 1.24.2.2 snj "could not map DMA memory\n");
476 1.24.2.2 snj goto fail;
477 1.24.2.2 snj }
478 1.24.2.2 snj
479 1.24.2.2 snj error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
480 1.24.2.2 snj descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
481 1.24.2.2 snj if (error != 0) {
482 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
483 1.24.2.2 snj "could not allocate DMA memory\n");
484 1.24.2.2 snj goto fail;
485 1.24.2.2 snj }
486 1.24.2.2 snj
487 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
488 1.24.2.2 snj descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
489 1.24.2.2 snj if (error != 0) {
490 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
491 1.24.2.2 snj "could not load desc DMA map\n");
492 1.24.2.2 snj goto fail;
493 1.24.2.2 snj }
494 1.24.2.2 snj
495 1.24.2.2 snj /* give first AWGE_RX_RING_COUNT to the RX side */
496 1.24.2.2 snj sc->sc_rxq.r_desc = rings;
497 1.24.2.2 snj sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
498 1.24.2.2 snj
499 1.24.2.2 snj /* and next rings to the TX side */
500 1.24.2.2 snj sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
501 1.24.2.2 snj sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
502 1.24.2.2 snj AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
503 1.24.2.2 snj
504 1.24.2.2 snj return 0;
505 1.24.2.2 snj
506 1.24.2.2 snj fail:
507 1.24.2.2 snj dwc_gmac_free_dma_rings(sc);
508 1.24.2.2 snj return error;
509 1.24.2.2 snj }
510 1.24.2.2 snj
511 1.24.2.2 snj static void
512 1.24.2.2 snj dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
513 1.24.2.2 snj {
514 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
515 1.24.2.2 snj sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
516 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
517 1.24.2.2 snj bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
518 1.24.2.2 snj AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
519 1.24.2.2 snj bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
520 1.24.2.2 snj }
521 1.24.2.2 snj
522 1.24.2.2 snj static void
523 1.24.2.2 snj dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
524 1.24.2.2 snj {
525 1.24.2.2 snj struct dwc_gmac_rx_data *data;
526 1.24.2.2 snj int i;
527 1.24.2.2 snj
528 1.24.2.2 snj if (ring->r_desc == NULL)
529 1.24.2.2 snj return;
530 1.24.2.2 snj
531 1.24.2.2 snj
532 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
533 1.24.2.2 snj data = &ring->r_data[i];
534 1.24.2.2 snj
535 1.24.2.2 snj if (data->rd_map != NULL) {
536 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
537 1.24.2.2 snj AWGE_RX_RING_COUNT
538 1.24.2.2 snj *sizeof(struct dwc_gmac_dev_dmadesc),
539 1.24.2.2 snj BUS_DMASYNC_POSTREAD);
540 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->rd_map);
541 1.24.2.2 snj bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
542 1.24.2.2 snj }
543 1.24.2.2 snj if (data->rd_m != NULL)
544 1.24.2.2 snj m_freem(data->rd_m);
545 1.24.2.2 snj }
546 1.24.2.2 snj }
547 1.24.2.2 snj
548 1.24.2.2 snj static int
549 1.24.2.2 snj dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
550 1.24.2.2 snj struct dwc_gmac_tx_ring *ring)
551 1.24.2.2 snj {
552 1.24.2.2 snj int i, error = 0;
553 1.24.2.2 snj
554 1.24.2.2 snj ring->t_queued = 0;
555 1.24.2.2 snj ring->t_cur = ring->t_next = 0;
556 1.24.2.2 snj
557 1.24.2.2 snj memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
558 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
559 1.24.2.2 snj TX_DESC_OFFSET(0),
560 1.24.2.2 snj AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
561 1.24.2.2 snj BUS_DMASYNC_POSTWRITE);
562 1.24.2.2 snj
563 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
564 1.24.2.2 snj error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
565 1.24.2.2 snj AWGE_TX_RING_COUNT, MCLBYTES, 0,
566 1.24.2.2 snj BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
567 1.24.2.2 snj &ring->t_data[i].td_map);
568 1.24.2.2 snj if (error != 0) {
569 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
570 1.24.2.2 snj "could not create TX DMA map #%d\n", i);
571 1.24.2.2 snj ring->t_data[i].td_map = NULL;
572 1.24.2.2 snj goto fail;
573 1.24.2.2 snj }
574 1.24.2.2 snj ring->t_desc[i].ddesc_next = htole32(
575 1.24.2.2 snj ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
576 1.24.2.2 snj *TX_NEXT(i));
577 1.24.2.2 snj }
578 1.24.2.2 snj
579 1.24.2.2 snj return 0;
580 1.24.2.2 snj
581 1.24.2.2 snj fail:
582 1.24.2.2 snj dwc_gmac_free_tx_ring(sc, ring);
583 1.24.2.2 snj return error;
584 1.24.2.2 snj }
585 1.24.2.2 snj
586 1.24.2.2 snj static void
587 1.24.2.2 snj dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
588 1.24.2.2 snj {
589 1.24.2.2 snj /* 'end' is pointing one descriptor beyound the last we want to sync */
590 1.24.2.2 snj if (end > start) {
591 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
592 1.24.2.2 snj TX_DESC_OFFSET(start),
593 1.24.2.2 snj TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
594 1.24.2.2 snj ops);
595 1.24.2.2 snj return;
596 1.24.2.2 snj }
597 1.24.2.2 snj /* sync from 'start' to end of ring */
598 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
599 1.24.2.2 snj TX_DESC_OFFSET(start),
600 1.24.2.2 snj TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
601 1.24.2.2 snj ops);
602 1.24.2.2 snj /* sync from start of ring to 'end' */
603 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
604 1.24.2.2 snj TX_DESC_OFFSET(0),
605 1.24.2.2 snj TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
606 1.24.2.2 snj ops);
607 1.24.2.2 snj }
608 1.24.2.2 snj
609 1.24.2.2 snj static void
610 1.24.2.2 snj dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
611 1.24.2.2 snj struct dwc_gmac_tx_ring *ring)
612 1.24.2.2 snj {
613 1.24.2.2 snj int i;
614 1.24.2.2 snj
615 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
616 1.24.2.2 snj struct dwc_gmac_tx_data *data = &ring->t_data[i];
617 1.24.2.2 snj
618 1.24.2.2 snj if (data->td_m != NULL) {
619 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->td_active,
620 1.24.2.2 snj 0, data->td_active->dm_mapsize,
621 1.24.2.2 snj BUS_DMASYNC_POSTWRITE);
622 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->td_active);
623 1.24.2.2 snj m_freem(data->td_m);
624 1.24.2.2 snj data->td_m = NULL;
625 1.24.2.2 snj }
626 1.24.2.2 snj }
627 1.24.2.2 snj
628 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
629 1.24.2.2 snj TX_DESC_OFFSET(0),
630 1.24.2.2 snj AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
631 1.24.2.3 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
632 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
633 1.24.2.2 snj sc->sc_txq.t_physaddr);
634 1.24.2.2 snj
635 1.24.2.2 snj ring->t_queued = 0;
636 1.24.2.2 snj ring->t_cur = ring->t_next = 0;
637 1.24.2.2 snj }
638 1.24.2.2 snj
639 1.24.2.2 snj static void
640 1.24.2.2 snj dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
641 1.24.2.2 snj struct dwc_gmac_tx_ring *ring)
642 1.24.2.2 snj {
643 1.24.2.2 snj int i;
644 1.24.2.2 snj
645 1.24.2.2 snj /* unload the maps */
646 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
647 1.24.2.2 snj struct dwc_gmac_tx_data *data = &ring->t_data[i];
648 1.24.2.2 snj
649 1.24.2.2 snj if (data->td_m != NULL) {
650 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->td_active,
651 1.24.2.2 snj 0, data->td_map->dm_mapsize,
652 1.24.2.2 snj BUS_DMASYNC_POSTWRITE);
653 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->td_active);
654 1.24.2.2 snj m_freem(data->td_m);
655 1.24.2.2 snj data->td_m = NULL;
656 1.24.2.2 snj }
657 1.24.2.2 snj }
658 1.24.2.2 snj
659 1.24.2.2 snj /* and actually free them */
660 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
661 1.24.2.2 snj struct dwc_gmac_tx_data *data = &ring->t_data[i];
662 1.24.2.2 snj
663 1.24.2.2 snj bus_dmamap_destroy(sc->sc_dmat, data->td_map);
664 1.24.2.2 snj }
665 1.24.2.2 snj }
666 1.24.2.2 snj
667 1.24.2.2 snj static void
668 1.24.2.2 snj dwc_gmac_miibus_statchg(struct ifnet *ifp)
669 1.24.2.2 snj {
670 1.24.2.2 snj struct dwc_gmac_softc * const sc = ifp->if_softc;
671 1.24.2.2 snj struct mii_data * const mii = &sc->sc_mii;
672 1.24.2.2 snj uint32_t conf;
673 1.24.2.2 snj
674 1.24.2.2 snj /*
675 1.24.2.2 snj * Set MII or GMII interface based on the speed
676 1.24.2.2 snj * negotiated by the PHY.
677 1.24.2.2 snj */
678 1.24.2.2 snj conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
679 1.24.2.2 snj conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
680 1.24.2.2 snj |AWIN_GMAC_MAC_CONF_FULLDPLX);
681 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
682 1.24.2.2 snj | AWIN_GMAC_MAC_CONF_DISABLERXOWN
683 1.24.2.2 snj | AWIN_GMAC_MAC_CONF_RXENABLE
684 1.24.2.2 snj | AWIN_GMAC_MAC_CONF_TXENABLE;
685 1.24.2.2 snj switch (IFM_SUBTYPE(mii->mii_media_active)) {
686 1.24.2.2 snj case IFM_10_T:
687 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_MIISEL;
688 1.24.2.2 snj break;
689 1.24.2.2 snj case IFM_100_TX:
690 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_FES100 |
691 1.24.2.2 snj AWIN_GMAC_MAC_CONF_MIISEL;
692 1.24.2.2 snj break;
693 1.24.2.2 snj case IFM_1000_T:
694 1.24.2.2 snj break;
695 1.24.2.2 snj }
696 1.24.2.2 snj if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
697 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
698 1.24.2.2 snj
699 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
700 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
701 1.24.2.2 snj "setting MAC conf register: %08x\n", conf);
702 1.24.2.2 snj #endif
703 1.24.2.2 snj
704 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
705 1.24.2.2 snj AWIN_GMAC_MAC_CONF, conf);
706 1.24.2.2 snj }
707 1.24.2.2 snj
708 1.24.2.2 snj static int
709 1.24.2.2 snj dwc_gmac_init(struct ifnet *ifp)
710 1.24.2.2 snj {
711 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
712 1.24.2.2 snj uint32_t ffilt;
713 1.24.2.2 snj
714 1.24.2.2 snj if (ifp->if_flags & IFF_RUNNING)
715 1.24.2.2 snj return 0;
716 1.24.2.2 snj
717 1.24.2.2 snj dwc_gmac_stop(ifp, 0);
718 1.24.2.2 snj
719 1.24.2.2 snj /*
720 1.24.2.2 snj * Configure DMA burst/transfer mode and RX/TX priorities.
721 1.24.2.2 snj * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
722 1.24.2.2 snj */
723 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
724 1.24.2.2 snj GMAC_BUSMODE_FIXEDBURST |
725 1.24.2.2 snj __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
726 1.24.2.2 snj __SHIFTIN(8, GMCA_BUSMODE_PBL));
727 1.24.2.2 snj
728 1.24.2.2 snj /*
729 1.24.2.2 snj * Set up address filter
730 1.24.2.2 snj */
731 1.24.2.2 snj ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
732 1.24.2.2 snj if (ifp->if_flags & IFF_PROMISC) {
733 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_PR;
734 1.24.2.2 snj } else {
735 1.24.2.2 snj ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
736 1.24.2.2 snj }
737 1.24.2.2 snj if (ifp->if_flags & IFF_BROADCAST) {
738 1.24.2.2 snj ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
739 1.24.2.2 snj } else {
740 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
741 1.24.2.2 snj }
742 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
743 1.24.2.2 snj
744 1.24.2.2 snj /*
745 1.24.2.2 snj * Set up multicast filter
746 1.24.2.2 snj */
747 1.24.2.2 snj dwc_gmac_setmulti(sc);
748 1.24.2.2 snj
749 1.24.2.2 snj /*
750 1.24.2.2 snj * Set up dma pointer for RX and TX ring
751 1.24.2.2 snj */
752 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
753 1.24.2.2 snj sc->sc_rxq.r_physaddr);
754 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
755 1.24.2.2 snj sc->sc_txq.t_physaddr);
756 1.24.2.2 snj
757 1.24.2.2 snj /*
758 1.24.2.2 snj * Start RX/TX part
759 1.24.2.2 snj */
760 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
761 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
762 1.24.2.2 snj GMAC_DMA_OP_STOREFORWARD);
763 1.24.2.2 snj
764 1.24.2.2 snj ifp->if_flags |= IFF_RUNNING;
765 1.24.2.2 snj ifp->if_flags &= ~IFF_OACTIVE;
766 1.24.2.2 snj
767 1.24.2.2 snj return 0;
768 1.24.2.2 snj }
769 1.24.2.2 snj
770 1.24.2.2 snj static void
771 1.24.2.2 snj dwc_gmac_start(struct ifnet *ifp)
772 1.24.2.2 snj {
773 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
774 1.24.2.2 snj int old = sc->sc_txq.t_queued;
775 1.24.2.2 snj struct mbuf *m0;
776 1.24.2.2 snj
777 1.24.2.2 snj if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
778 1.24.2.2 snj return;
779 1.24.2.2 snj
780 1.24.2.2 snj for (;;) {
781 1.24.2.2 snj IFQ_POLL(&ifp->if_snd, m0);
782 1.24.2.2 snj if (m0 == NULL)
783 1.24.2.2 snj break;
784 1.24.2.2 snj if (dwc_gmac_queue(sc, m0) != 0) {
785 1.24.2.2 snj ifp->if_flags |= IFF_OACTIVE;
786 1.24.2.2 snj break;
787 1.24.2.2 snj }
788 1.24.2.2 snj IFQ_DEQUEUE(&ifp->if_snd, m0);
789 1.24.2.2 snj bpf_mtap(ifp, m0);
790 1.24.2.2 snj }
791 1.24.2.2 snj
792 1.24.2.2 snj if (sc->sc_txq.t_queued != old) {
793 1.24.2.2 snj /* packets have been queued, kick it off */
794 1.24.2.2 snj dwc_gmac_txdesc_sync(sc, old, sc->sc_txq.t_cur,
795 1.24.2.2 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
796 1.24.2.2 snj
797 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
798 1.24.2.2 snj AWIN_GMAC_DMA_TXPOLL, ~0U);
799 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
800 1.24.2.2 snj dwc_dump_status(sc);
801 1.24.2.2 snj #endif
802 1.24.2.2 snj }
803 1.24.2.2 snj }
804 1.24.2.2 snj
805 1.24.2.2 snj static void
806 1.24.2.2 snj dwc_gmac_stop(struct ifnet *ifp, int disable)
807 1.24.2.2 snj {
808 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
809 1.24.2.2 snj
810 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
811 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE,
812 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh,
813 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE)
814 1.24.2.2 snj & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
815 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
816 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE,
817 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh,
818 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
819 1.24.2.2 snj
820 1.24.2.2 snj mii_down(&sc->sc_mii);
821 1.24.2.2 snj dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
822 1.24.2.2 snj dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
823 1.24.2.2 snj }
824 1.24.2.2 snj
825 1.24.2.2 snj /*
826 1.24.2.2 snj * Add m0 to the TX ring
827 1.24.2.2 snj */
828 1.24.2.2 snj static int
829 1.24.2.2 snj dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
830 1.24.2.2 snj {
831 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc = NULL;
832 1.24.2.2 snj struct dwc_gmac_tx_data *data = NULL;
833 1.24.2.2 snj bus_dmamap_t map;
834 1.24.2.2 snj uint32_t flags, len;
835 1.24.2.2 snj int error, i, first;
836 1.24.2.2 snj
837 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
838 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
839 1.24.2.2 snj "dwc_gmac_queue: adding mbuf chain %p\n", m0);
840 1.24.2.2 snj #endif
841 1.24.2.2 snj
842 1.24.2.2 snj first = sc->sc_txq.t_cur;
843 1.24.2.2 snj map = sc->sc_txq.t_data[first].td_map;
844 1.24.2.2 snj flags = 0;
845 1.24.2.2 snj
846 1.24.2.2 snj error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
847 1.24.2.2 snj BUS_DMA_WRITE|BUS_DMA_NOWAIT);
848 1.24.2.2 snj if (error != 0) {
849 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not map mbuf "
850 1.24.2.2 snj "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
851 1.24.2.2 snj return error;
852 1.24.2.2 snj }
853 1.24.2.2 snj
854 1.24.2.2 snj if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
855 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, map);
856 1.24.2.2 snj return ENOBUFS;
857 1.24.2.2 snj }
858 1.24.2.2 snj
859 1.24.2.2 snj data = NULL;
860 1.24.2.2 snj flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
861 1.24.2.2 snj for (i = 0; i < map->dm_nsegs; i++) {
862 1.24.2.2 snj data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
863 1.24.2.2 snj desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
864 1.24.2.2 snj
865 1.24.2.2 snj desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
866 1.24.2.2 snj len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
867 1.24.2.2 snj if (i == map->dm_nsegs-1)
868 1.24.2.2 snj flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
869 1.24.2.2 snj
870 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
871 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
872 1.24.2.2 snj "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
873 1.24.2.2 snj (unsigned long)map->dm_segs[i].ds_addr,
874 1.24.2.2 snj (unsigned long)map->dm_segs[i].ds_len,
875 1.24.2.2 snj flags, len);
876 1.24.2.2 snj #endif
877 1.24.2.2 snj
878 1.24.2.2 snj desc->ddesc_cntl = htole32(len|flags);
879 1.24.2.2 snj flags &= ~DDESC_CNTL_TXFIRST;
880 1.24.2.2 snj
881 1.24.2.2 snj /*
882 1.24.2.2 snj * Defer passing ownership of the first descriptor
883 1.24.2.2 snj * until we are done.
884 1.24.2.2 snj */
885 1.24.2.2 snj if (i)
886 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
887 1.24.2.2 snj
888 1.24.2.2 snj sc->sc_txq.t_queued++;
889 1.24.2.2 snj sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
890 1.24.2.2 snj }
891 1.24.2.2 snj
892 1.24.2.2 snj /* Pass first to device */
893 1.24.2.2 snj sc->sc_txq.t_desc[first].ddesc_status
894 1.24.2.2 snj = htole32(DDESC_STATUS_OWNEDBYDEV);
895 1.24.2.2 snj
896 1.24.2.2 snj data->td_m = m0;
897 1.24.2.2 snj data->td_active = map;
898 1.24.2.2 snj
899 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
900 1.24.2.3 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
901 1.24.2.2 snj
902 1.24.2.2 snj return 0;
903 1.24.2.2 snj }
904 1.24.2.2 snj
905 1.24.2.2 snj /*
906 1.24.2.2 snj * If the interface is up and running, only modify the receive
907 1.24.2.2 snj * filter when setting promiscuous or debug mode. Otherwise fall
908 1.24.2.2 snj * through to ether_ioctl, which will reset the chip.
909 1.24.2.2 snj */
910 1.24.2.2 snj static int
911 1.24.2.2 snj dwc_gmac_ifflags_cb(struct ethercom *ec)
912 1.24.2.2 snj {
913 1.24.2.2 snj struct ifnet *ifp = &ec->ec_if;
914 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
915 1.24.2.2 snj int change = ifp->if_flags ^ sc->sc_if_flags;
916 1.24.2.2 snj
917 1.24.2.2 snj if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
918 1.24.2.2 snj return ENETRESET;
919 1.24.2.2 snj if ((change & IFF_PROMISC) != 0)
920 1.24.2.2 snj dwc_gmac_setmulti(sc);
921 1.24.2.2 snj return 0;
922 1.24.2.2 snj }
923 1.24.2.2 snj
924 1.24.2.2 snj static int
925 1.24.2.2 snj dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
926 1.24.2.2 snj {
927 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
928 1.24.2.2 snj int s, error = 0;
929 1.24.2.2 snj
930 1.24.2.2 snj s = splnet();
931 1.24.2.2 snj
932 1.24.2.2 snj if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
933 1.24.2.2 snj error = 0;
934 1.24.2.2 snj if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
935 1.24.2.2 snj ;
936 1.24.2.2 snj else if (ifp->if_flags & IFF_RUNNING) {
937 1.24.2.2 snj /*
938 1.24.2.2 snj * Multicast list has changed; set the hardware filter
939 1.24.2.2 snj * accordingly.
940 1.24.2.2 snj */
941 1.24.2.2 snj dwc_gmac_setmulti(sc);
942 1.24.2.2 snj }
943 1.24.2.2 snj }
944 1.24.2.2 snj
945 1.24.2.2 snj /* Try to get things going again */
946 1.24.2.2 snj if (ifp->if_flags & IFF_UP)
947 1.24.2.2 snj dwc_gmac_start(ifp);
948 1.24.2.2 snj sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
949 1.24.2.2 snj splx(s);
950 1.24.2.2 snj return error;
951 1.24.2.2 snj }
952 1.24.2.2 snj
953 1.24.2.2 snj static void
954 1.24.2.2 snj dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
955 1.24.2.2 snj {
956 1.24.2.2 snj struct dwc_gmac_tx_data *data;
957 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
958 1.24.2.2 snj uint32_t flags;
959 1.24.2.2 snj int i;
960 1.24.2.2 snj
961 1.24.2.2 snj for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
962 1.24.2.2 snj i = TX_NEXT(i), sc->sc_txq.t_queued--) {
963 1.24.2.2 snj
964 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
965 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
966 1.24.2.2 snj "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
967 1.24.2.2 snj i, sc->sc_txq.t_queued);
968 1.24.2.2 snj #endif
969 1.24.2.2 snj
970 1.24.2.2 snj desc = &sc->sc_txq.t_desc[i];
971 1.24.2.2 snj dwc_gmac_txdesc_sync(sc, i, i+1,
972 1.24.2.2 snj BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
973 1.24.2.2 snj flags = le32toh(desc->ddesc_status);
974 1.24.2.2 snj
975 1.24.2.2 snj if (flags & DDESC_STATUS_OWNEDBYDEV)
976 1.24.2.2 snj break;
977 1.24.2.2 snj
978 1.24.2.2 snj data = &sc->sc_txq.t_data[i];
979 1.24.2.2 snj if (data->td_m == NULL)
980 1.24.2.2 snj continue;
981 1.24.2.2 snj sc->sc_ec.ec_if.if_opackets++;
982 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
983 1.24.2.2 snj data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
984 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->td_active);
985 1.24.2.2 snj
986 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
987 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
988 1.24.2.2 snj "dwc_gmac_tx_intr: done with packet at desc #%d, "
989 1.24.2.2 snj "freeing mbuf %p\n", i, data->td_m);
990 1.24.2.2 snj #endif
991 1.24.2.2 snj
992 1.24.2.2 snj m_freem(data->td_m);
993 1.24.2.2 snj data->td_m = NULL;
994 1.24.2.2 snj }
995 1.24.2.2 snj
996 1.24.2.2 snj sc->sc_txq.t_next = i;
997 1.24.2.2 snj
998 1.24.2.2 snj if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
999 1.24.2.2 snj sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
1000 1.24.2.2 snj }
1001 1.24.2.2 snj }
1002 1.24.2.2 snj
1003 1.24.2.2 snj static void
1004 1.24.2.2 snj dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1005 1.24.2.2 snj {
1006 1.24.2.2 snj struct ifnet *ifp = &sc->sc_ec.ec_if;
1007 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
1008 1.24.2.2 snj struct dwc_gmac_rx_data *data;
1009 1.24.2.2 snj bus_addr_t physaddr;
1010 1.24.2.2 snj uint32_t status;
1011 1.24.2.2 snj struct mbuf *m, *mnew;
1012 1.24.2.2 snj int i, len, error;
1013 1.24.2.2 snj
1014 1.24.2.2 snj for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1015 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1016 1.24.2.2 snj RX_DESC_OFFSET(i), sizeof(*desc),
1017 1.24.2.2 snj BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1018 1.24.2.2 snj desc = &sc->sc_rxq.r_desc[i];
1019 1.24.2.2 snj data = &sc->sc_rxq.r_data[i];
1020 1.24.2.2 snj
1021 1.24.2.2 snj status = le32toh(desc->ddesc_status);
1022 1.24.2.2 snj if (status & DDESC_STATUS_OWNEDBYDEV)
1023 1.24.2.2 snj break;
1024 1.24.2.2 snj
1025 1.24.2.2 snj if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1026 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1027 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
1028 1.24.2.2 snj "RX error: descriptor status %08x, skipping\n",
1029 1.24.2.2 snj status);
1030 1.24.2.2 snj #endif
1031 1.24.2.2 snj ifp->if_ierrors++;
1032 1.24.2.2 snj goto skip;
1033 1.24.2.2 snj }
1034 1.24.2.2 snj
1035 1.24.2.2 snj len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1036 1.24.2.2 snj
1037 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1038 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
1039 1.24.2.2 snj "rx int: device is done with descriptor #%d, len: %d\n",
1040 1.24.2.2 snj i, len);
1041 1.24.2.2 snj #endif
1042 1.24.2.2 snj
1043 1.24.2.2 snj /*
1044 1.24.2.2 snj * Try to get a new mbuf before passing this one
1045 1.24.2.2 snj * up, if that fails, drop the packet and reuse
1046 1.24.2.2 snj * the existing one.
1047 1.24.2.2 snj */
1048 1.24.2.2 snj MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1049 1.24.2.2 snj if (mnew == NULL) {
1050 1.24.2.2 snj ifp->if_ierrors++;
1051 1.24.2.2 snj goto skip;
1052 1.24.2.2 snj }
1053 1.24.2.2 snj MCLGET(mnew, M_DONTWAIT);
1054 1.24.2.2 snj if ((mnew->m_flags & M_EXT) == 0) {
1055 1.24.2.2 snj m_freem(mnew);
1056 1.24.2.2 snj ifp->if_ierrors++;
1057 1.24.2.2 snj goto skip;
1058 1.24.2.2 snj }
1059 1.24.2.2 snj
1060 1.24.2.2 snj /* unload old DMA map */
1061 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1062 1.24.2.2 snj data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1063 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1064 1.24.2.2 snj
1065 1.24.2.2 snj /* and reload with new mbuf */
1066 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1067 1.24.2.2 snj mtod(mnew, void*), MCLBYTES, NULL,
1068 1.24.2.2 snj BUS_DMA_READ | BUS_DMA_NOWAIT);
1069 1.24.2.2 snj if (error != 0) {
1070 1.24.2.2 snj m_freem(mnew);
1071 1.24.2.2 snj /* try to reload old mbuf */
1072 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1073 1.24.2.2 snj mtod(data->rd_m, void*), MCLBYTES, NULL,
1074 1.24.2.2 snj BUS_DMA_READ | BUS_DMA_NOWAIT);
1075 1.24.2.2 snj if (error != 0) {
1076 1.24.2.2 snj panic("%s: could not load old rx mbuf",
1077 1.24.2.2 snj device_xname(sc->sc_dev));
1078 1.24.2.2 snj }
1079 1.24.2.2 snj ifp->if_ierrors++;
1080 1.24.2.2 snj goto skip;
1081 1.24.2.2 snj }
1082 1.24.2.2 snj physaddr = data->rd_map->dm_segs[0].ds_addr;
1083 1.24.2.2 snj
1084 1.24.2.2 snj /*
1085 1.24.2.2 snj * New mbuf loaded, update RX ring and continue
1086 1.24.2.2 snj */
1087 1.24.2.2 snj m = data->rd_m;
1088 1.24.2.2 snj data->rd_m = mnew;
1089 1.24.2.2 snj desc->ddesc_data = htole32(physaddr);
1090 1.24.2.2 snj
1091 1.24.2.2 snj /* finalize mbuf */
1092 1.24.2.2 snj m->m_pkthdr.len = m->m_len = len;
1093 1.24.2.2 snj m->m_pkthdr.rcvif = ifp;
1094 1.24.2.2 snj m->m_flags |= M_HASFCS;
1095 1.24.2.2 snj
1096 1.24.2.2 snj bpf_mtap(ifp, m);
1097 1.24.2.2 snj ifp->if_ipackets++;
1098 1.24.2.2 snj (*ifp->if_input)(ifp, m);
1099 1.24.2.2 snj
1100 1.24.2.2 snj skip:
1101 1.24.2.3 snj bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1102 1.24.2.3 snj data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1103 1.24.2.2 snj desc->ddesc_cntl = htole32(
1104 1.24.2.2 snj __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1105 1.24.2.2 snj DDESC_CNTL_RXCHAIN);
1106 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1107 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1108 1.24.2.2 snj RX_DESC_OFFSET(i), sizeof(*desc),
1109 1.24.2.2 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1110 1.24.2.2 snj }
1111 1.24.2.2 snj
1112 1.24.2.2 snj /* update RX pointer */
1113 1.24.2.2 snj sc->sc_rxq.r_cur = i;
1114 1.24.2.2 snj
1115 1.24.2.2 snj }
1116 1.24.2.2 snj
1117 1.24.2.2 snj /*
1118 1.24.2.2 snj * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1119 1.24.2.2 snj */
1120 1.24.2.2 snj static uint32_t
1121 1.24.2.2 snj bitrev32(uint32_t x)
1122 1.24.2.2 snj {
1123 1.24.2.2 snj x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1124 1.24.2.2 snj x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1125 1.24.2.2 snj x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1126 1.24.2.2 snj x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1127 1.24.2.2 snj
1128 1.24.2.2 snj return (x >> 16) | (x << 16);
1129 1.24.2.2 snj }
1130 1.24.2.2 snj
1131 1.24.2.2 snj static void
1132 1.24.2.2 snj dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1133 1.24.2.2 snj {
1134 1.24.2.2 snj struct ifnet * const ifp = &sc->sc_ec.ec_if;
1135 1.24.2.2 snj struct ether_multi *enm;
1136 1.24.2.2 snj struct ether_multistep step;
1137 1.24.2.2 snj uint32_t hashes[2] = { 0, 0 };
1138 1.24.2.2 snj uint32_t ffilt, h;
1139 1.24.2.2 snj int mcnt, s;
1140 1.24.2.2 snj
1141 1.24.2.2 snj s = splnet();
1142 1.24.2.2 snj
1143 1.24.2.2 snj ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1144 1.24.2.2 snj
1145 1.24.2.2 snj if (ifp->if_flags & IFF_PROMISC) {
1146 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1147 1.24.2.2 snj goto special_filter;
1148 1.24.2.2 snj }
1149 1.24.2.2 snj
1150 1.24.2.2 snj ifp->if_flags &= ~IFF_ALLMULTI;
1151 1.24.2.2 snj ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1152 1.24.2.2 snj
1153 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1154 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1155 1.24.2.2 snj
1156 1.24.2.2 snj ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1157 1.24.2.2 snj mcnt = 0;
1158 1.24.2.2 snj while (enm != NULL) {
1159 1.24.2.2 snj if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1160 1.24.2.2 snj ETHER_ADDR_LEN) != 0) {
1161 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1162 1.24.2.2 snj ifp->if_flags |= IFF_ALLMULTI;
1163 1.24.2.2 snj goto special_filter;
1164 1.24.2.2 snj }
1165 1.24.2.2 snj
1166 1.24.2.2 snj h = bitrev32(
1167 1.24.2.2 snj ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1168 1.24.2.2 snj ) >> 26;
1169 1.24.2.2 snj hashes[h >> 5] |= (1 << (h & 0x1f));
1170 1.24.2.2 snj
1171 1.24.2.2 snj mcnt++;
1172 1.24.2.2 snj ETHER_NEXT_MULTI(step, enm);
1173 1.24.2.2 snj }
1174 1.24.2.2 snj
1175 1.24.2.2 snj if (mcnt)
1176 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1177 1.24.2.2 snj else
1178 1.24.2.2 snj ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1179 1.24.2.2 snj
1180 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1181 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1182 1.24.2.2 snj hashes[0]);
1183 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1184 1.24.2.2 snj hashes[1]);
1185 1.24.2.2 snj sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1186 1.24.2.2 snj
1187 1.24.2.2 snj splx(s);
1188 1.24.2.2 snj
1189 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1190 1.24.2.2 snj dwc_gmac_dump_ffilt(sc, ffilt);
1191 1.24.2.2 snj #endif
1192 1.24.2.2 snj return;
1193 1.24.2.2 snj
1194 1.24.2.2 snj special_filter:
1195 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1196 1.24.2.2 snj dwc_gmac_dump_ffilt(sc, ffilt);
1197 1.24.2.2 snj #endif
1198 1.24.2.2 snj /* no MAC hashes, ALLMULTI or PROMISC */
1199 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1200 1.24.2.2 snj ffilt);
1201 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1202 1.24.2.2 snj 0xffffffff);
1203 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1204 1.24.2.2 snj 0xffffffff);
1205 1.24.2.2 snj sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1206 1.24.2.2 snj splx(s);
1207 1.24.2.2 snj }
1208 1.24.2.2 snj
1209 1.24.2.2 snj int
1210 1.24.2.2 snj dwc_gmac_intr(struct dwc_gmac_softc *sc)
1211 1.24.2.2 snj {
1212 1.24.2.2 snj uint32_t status, dma_status;
1213 1.24.2.2 snj int rv = 0;
1214 1.24.2.2 snj
1215 1.24.2.2 snj status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1216 1.24.2.2 snj if (status & AWIN_GMAC_MII_IRQ) {
1217 1.24.2.2 snj (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1218 1.24.2.2 snj AWIN_GMAC_MII_STATUS);
1219 1.24.2.2 snj rv = 1;
1220 1.24.2.2 snj mii_pollstat(&sc->sc_mii);
1221 1.24.2.2 snj }
1222 1.24.2.2 snj
1223 1.24.2.2 snj dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1224 1.24.2.2 snj AWIN_GMAC_DMA_STATUS);
1225 1.24.2.2 snj
1226 1.24.2.2 snj if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1227 1.24.2.2 snj rv = 1;
1228 1.24.2.2 snj
1229 1.24.2.2 snj if (dma_status & GMAC_DMA_INT_TIE)
1230 1.24.2.2 snj dwc_gmac_tx_intr(sc);
1231 1.24.2.2 snj
1232 1.24.2.2 snj if (dma_status & GMAC_DMA_INT_RIE)
1233 1.24.2.2 snj dwc_gmac_rx_intr(sc);
1234 1.24.2.2 snj
1235 1.24.2.2 snj /*
1236 1.24.2.2 snj * Check error conditions
1237 1.24.2.2 snj */
1238 1.24.2.2 snj if (dma_status & GMAC_DMA_INT_ERRORS) {
1239 1.24.2.2 snj sc->sc_ec.ec_if.if_oerrors++;
1240 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1241 1.24.2.2 snj dwc_dump_and_abort(sc, "interrupt error condition");
1242 1.24.2.2 snj #endif
1243 1.24.2.2 snj }
1244 1.24.2.2 snj
1245 1.24.2.2 snj /* ack interrupt */
1246 1.24.2.2 snj if (dma_status)
1247 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1248 1.24.2.2 snj AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1249 1.24.2.2 snj
1250 1.24.2.2 snj return rv;
1251 1.24.2.2 snj }
1252 1.24.2.2 snj
1253 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1254 1.24.2.2 snj static void
1255 1.24.2.2 snj dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1256 1.24.2.2 snj {
1257 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1258 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1259 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1260 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1261 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1262 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1263 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1264 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1265 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1266 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1267 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1268 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1269 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1270 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1271 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1272 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1273 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1274 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1275 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1276 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1277 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1278 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1279 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1280 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1281 1.24.2.2 snj }
1282 1.24.2.2 snj
1283 1.24.2.2 snj static void
1284 1.24.2.2 snj dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1285 1.24.2.2 snj {
1286 1.24.2.2 snj int i;
1287 1.24.2.2 snj
1288 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1289 1.24.2.2 snj sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1290 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1291 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1292 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1293 1.24.2.2 snj aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1294 1.24.2.2 snj "data: %08x next: %08x\n",
1295 1.24.2.2 snj i, sc->sc_txq.t_physaddr +
1296 1.24.2.2 snj i*sizeof(struct dwc_gmac_dev_dmadesc),
1297 1.24.2.2 snj le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1298 1.24.2.2 snj le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1299 1.24.2.2 snj }
1300 1.24.2.2 snj }
1301 1.24.2.2 snj
1302 1.24.2.2 snj static void
1303 1.24.2.2 snj dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1304 1.24.2.2 snj {
1305 1.24.2.2 snj int i;
1306 1.24.2.2 snj
1307 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1308 1.24.2.2 snj sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1309 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1310 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1311 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1312 1.24.2.2 snj aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1313 1.24.2.2 snj "data: %08x next: %08x\n",
1314 1.24.2.2 snj i, sc->sc_rxq.r_physaddr +
1315 1.24.2.2 snj i*sizeof(struct dwc_gmac_dev_dmadesc),
1316 1.24.2.2 snj le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1317 1.24.2.2 snj le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1318 1.24.2.2 snj }
1319 1.24.2.2 snj }
1320 1.24.2.2 snj
1321 1.24.2.2 snj static void
1322 1.24.2.2 snj dwc_dump_status(struct dwc_gmac_softc *sc)
1323 1.24.2.2 snj {
1324 1.24.2.2 snj uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1325 1.24.2.2 snj AWIN_GMAC_MAC_INTR);
1326 1.24.2.2 snj uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1327 1.24.2.2 snj AWIN_GMAC_DMA_STATUS);
1328 1.24.2.2 snj char buf[200];
1329 1.24.2.2 snj
1330 1.24.2.2 snj /* print interrupt state */
1331 1.24.2.2 snj snprintb(buf, sizeof(buf), "\177\20"
1332 1.24.2.2 snj "b\x10""NI\0"
1333 1.24.2.2 snj "b\x0f""AI\0"
1334 1.24.2.2 snj "b\x0e""ER\0"
1335 1.24.2.2 snj "b\x0d""FB\0"
1336 1.24.2.2 snj "b\x0a""ET\0"
1337 1.24.2.2 snj "b\x09""RW\0"
1338 1.24.2.2 snj "b\x08""RS\0"
1339 1.24.2.2 snj "b\x07""RU\0"
1340 1.24.2.2 snj "b\x06""RI\0"
1341 1.24.2.2 snj "b\x05""UN\0"
1342 1.24.2.2 snj "b\x04""OV\0"
1343 1.24.2.2 snj "b\x03""TJ\0"
1344 1.24.2.2 snj "b\x02""TU\0"
1345 1.24.2.2 snj "b\x01""TS\0"
1346 1.24.2.2 snj "b\x00""TI\0"
1347 1.24.2.2 snj "\0", dma_status);
1348 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1349 1.24.2.2 snj status, buf);
1350 1.24.2.2 snj }
1351 1.24.2.2 snj
1352 1.24.2.2 snj static void
1353 1.24.2.2 snj dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1354 1.24.2.2 snj {
1355 1.24.2.2 snj dwc_dump_status(sc);
1356 1.24.2.2 snj dwc_gmac_dump_ffilt(sc,
1357 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1358 1.24.2.2 snj dwc_gmac_dump_dma(sc);
1359 1.24.2.2 snj dwc_gmac_dump_tx_desc(sc);
1360 1.24.2.2 snj dwc_gmac_dump_rx_desc(sc);
1361 1.24.2.2 snj
1362 1.24.2.2 snj panic("%s", msg);
1363 1.24.2.2 snj }
1364 1.24.2.2 snj
1365 1.24.2.2 snj static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1366 1.24.2.2 snj {
1367 1.24.2.2 snj char buf[200];
1368 1.24.2.2 snj
1369 1.24.2.2 snj /* print filter setup */
1370 1.24.2.2 snj snprintb(buf, sizeof(buf), "\177\20"
1371 1.24.2.2 snj "b\x1f""RA\0"
1372 1.24.2.2 snj "b\x0a""HPF\0"
1373 1.24.2.2 snj "b\x09""SAF\0"
1374 1.24.2.2 snj "b\x08""SAIF\0"
1375 1.24.2.2 snj "b\x05""DBF\0"
1376 1.24.2.2 snj "b\x04""PM\0"
1377 1.24.2.2 snj "b\x03""DAIF\0"
1378 1.24.2.2 snj "b\x02""HMC\0"
1379 1.24.2.2 snj "b\x01""HUC\0"
1380 1.24.2.2 snj "b\x00""PR\0"
1381 1.24.2.2 snj "\0", ffilt);
1382 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1383 1.24.2.2 snj }
1384 1.24.2.2 snj #endif
1385