dwc_gmac.c revision 1.24.2.5 1 1.24.2.5 msaitoh /* $NetBSD: dwc_gmac.c,v 1.24.2.5 2015/01/07 21:12:04 msaitoh Exp $ */
2 1.24.2.2 snj
3 1.24.2.2 snj /*-
4 1.24.2.2 snj * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 1.24.2.2 snj * All rights reserved.
6 1.24.2.2 snj *
7 1.24.2.2 snj * This code is derived from software contributed to The NetBSD Foundation
8 1.24.2.2 snj * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 1.24.2.2 snj *
10 1.24.2.2 snj * Redistribution and use in source and binary forms, with or without
11 1.24.2.2 snj * modification, are permitted provided that the following conditions
12 1.24.2.2 snj * are met:
13 1.24.2.2 snj * 1. Redistributions of source code must retain the above copyright
14 1.24.2.2 snj * notice, this list of conditions and the following disclaimer.
15 1.24.2.2 snj * 2. Redistributions in binary form must reproduce the above copyright
16 1.24.2.2 snj * notice, this list of conditions and the following disclaimer in the
17 1.24.2.2 snj * documentation and/or other materials provided with the distribution.
18 1.24.2.2 snj *
19 1.24.2.2 snj * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.24.2.2 snj * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.24.2.2 snj * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.24.2.2 snj * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.24.2.2 snj * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.24.2.2 snj * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.24.2.2 snj * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.24.2.2 snj * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.24.2.2 snj * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.24.2.2 snj * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.24.2.2 snj * POSSIBILITY OF SUCH DAMAGE.
30 1.24.2.2 snj */
31 1.24.2.2 snj
32 1.24.2.2 snj /*
33 1.24.2.2 snj * This driver supports the Synopsis Designware GMAC core, as found
34 1.24.2.2 snj * on Allwinner A20 cores and others.
35 1.24.2.2 snj *
36 1.24.2.2 snj * Real documentation seems to not be available, the marketing product
37 1.24.2.2 snj * documents could be found here:
38 1.24.2.2 snj *
39 1.24.2.2 snj * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 1.24.2.2 snj */
41 1.24.2.2 snj
42 1.24.2.2 snj #include <sys/cdefs.h>
43 1.24.2.2 snj
44 1.24.2.5 msaitoh __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.24.2.5 2015/01/07 21:12:04 msaitoh Exp $");
45 1.24.2.2 snj
46 1.24.2.2 snj /* #define DWC_GMAC_DEBUG 1 */
47 1.24.2.2 snj
48 1.24.2.2 snj #include "opt_inet.h"
49 1.24.2.2 snj
50 1.24.2.2 snj #include <sys/param.h>
51 1.24.2.2 snj #include <sys/bus.h>
52 1.24.2.2 snj #include <sys/device.h>
53 1.24.2.2 snj #include <sys/intr.h>
54 1.24.2.2 snj #include <sys/systm.h>
55 1.24.2.2 snj #include <sys/sockio.h>
56 1.24.2.4 martin #include <sys/cprng.h>
57 1.24.2.2 snj
58 1.24.2.2 snj #include <net/if.h>
59 1.24.2.2 snj #include <net/if_ether.h>
60 1.24.2.2 snj #include <net/if_media.h>
61 1.24.2.2 snj #include <net/bpf.h>
62 1.24.2.2 snj #ifdef INET
63 1.24.2.2 snj #include <netinet/if_inarp.h>
64 1.24.2.2 snj #endif
65 1.24.2.2 snj
66 1.24.2.2 snj #include <dev/mii/miivar.h>
67 1.24.2.2 snj
68 1.24.2.2 snj #include <dev/ic/dwc_gmac_reg.h>
69 1.24.2.2 snj #include <dev/ic/dwc_gmac_var.h>
70 1.24.2.2 snj
71 1.24.2.2 snj static int dwc_gmac_miibus_read_reg(device_t, int, int);
72 1.24.2.2 snj static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
73 1.24.2.2 snj static void dwc_gmac_miibus_statchg(struct ifnet *);
74 1.24.2.2 snj
75 1.24.2.2 snj static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
76 1.24.2.2 snj static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
77 1.24.2.2 snj uint8_t enaddr[ETHER_ADDR_LEN]);
78 1.24.2.2 snj static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
79 1.24.2.2 snj static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
80 1.24.2.2 snj static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
81 1.24.2.2 snj static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
82 1.24.2.2 snj static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
83 1.24.2.2 snj static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
84 1.24.2.2 snj static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
85 1.24.2.2 snj static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
86 1.24.2.2 snj static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
87 1.24.2.2 snj static int dwc_gmac_init(struct ifnet *ifp);
88 1.24.2.2 snj static void dwc_gmac_stop(struct ifnet *ifp, int disable);
89 1.24.2.2 snj static void dwc_gmac_start(struct ifnet *ifp);
90 1.24.2.2 snj static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
91 1.24.2.2 snj static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
92 1.24.2.2 snj static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
93 1.24.2.2 snj static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
94 1.24.2.2 snj static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
95 1.24.2.2 snj static int dwc_gmac_ifflags_cb(struct ethercom *);
96 1.24.2.2 snj static uint32_t bitrev32(uint32_t x);
97 1.24.2.2 snj
98 1.24.2.2 snj #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
99 1.24.2.2 snj *sizeof(struct dwc_gmac_dev_dmadesc))
100 1.24.2.2 snj #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
101 1.24.2.2 snj
102 1.24.2.2 snj #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
103 1.24.2.2 snj #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
104 1.24.2.2 snj
105 1.24.2.2 snj
106 1.24.2.2 snj
107 1.24.2.2 snj #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
108 1.24.2.2 snj GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
109 1.24.2.2 snj GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
110 1.24.2.2 snj
111 1.24.2.2 snj #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
112 1.24.2.2 snj GMAC_DMA_INT_FBE| \
113 1.24.2.2 snj GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
114 1.24.2.2 snj GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
115 1.24.2.2 snj GMAC_DMA_INT_TJE)
116 1.24.2.2 snj
117 1.24.2.2 snj #define AWIN_DEF_MAC_INTRMASK \
118 1.24.2.2 snj (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
119 1.24.2.2 snj AWIN_GMAC_MAC_INT_LINKCHG | AWIN_GMAC_MAC_INT_RGSMII)
120 1.24.2.2 snj
121 1.24.2.2 snj
122 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
123 1.24.2.2 snj static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
124 1.24.2.2 snj static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
125 1.24.2.2 snj static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
126 1.24.2.2 snj static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
127 1.24.2.2 snj static void dwc_dump_status(struct dwc_gmac_softc *sc);
128 1.24.2.2 snj static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
129 1.24.2.2 snj #endif
130 1.24.2.2 snj
131 1.24.2.2 snj void
132 1.24.2.2 snj dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
133 1.24.2.2 snj {
134 1.24.2.2 snj uint8_t enaddr[ETHER_ADDR_LEN];
135 1.24.2.2 snj uint32_t maclo, machi;
136 1.24.2.2 snj struct mii_data * const mii = &sc->sc_mii;
137 1.24.2.2 snj struct ifnet * const ifp = &sc->sc_ec.ec_if;
138 1.24.2.2 snj prop_dictionary_t dict;
139 1.24.2.2 snj int s;
140 1.24.2.2 snj
141 1.24.2.2 snj mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
142 1.24.2.2 snj sc->sc_mii_clk = mii_clk & 7;
143 1.24.2.2 snj
144 1.24.2.2 snj dict = device_properties(sc->sc_dev);
145 1.24.2.2 snj prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
146 1.24.2.2 snj if (ea != NULL) {
147 1.24.2.2 snj /*
148 1.24.2.2 snj * If the MAC address is overriden by a device property,
149 1.24.2.2 snj * use that.
150 1.24.2.2 snj */
151 1.24.2.2 snj KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
152 1.24.2.2 snj KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
153 1.24.2.2 snj memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
154 1.24.2.2 snj } else {
155 1.24.2.2 snj /*
156 1.24.2.2 snj * If we did not get an externaly configure address,
157 1.24.2.2 snj * try to read one from the current filter setup,
158 1.24.2.2 snj * before resetting the chip.
159 1.24.2.2 snj */
160 1.24.2.2 snj maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
161 1.24.2.2 snj AWIN_GMAC_MAC_ADDR0LO);
162 1.24.2.2 snj machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
163 1.24.2.2 snj AWIN_GMAC_MAC_ADDR0HI);
164 1.24.2.2 snj
165 1.24.2.2 snj if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
166 1.24.2.4 martin /* fake MAC address */
167 1.24.2.4 martin maclo = 0x00f2 | (cprng_strong32() << 16);
168 1.24.2.4 martin machi = cprng_strong32();
169 1.24.2.2 snj }
170 1.24.2.2 snj
171 1.24.2.2 snj enaddr[0] = maclo & 0x0ff;
172 1.24.2.2 snj enaddr[1] = (maclo >> 8) & 0x0ff;
173 1.24.2.2 snj enaddr[2] = (maclo >> 16) & 0x0ff;
174 1.24.2.2 snj enaddr[3] = (maclo >> 24) & 0x0ff;
175 1.24.2.2 snj enaddr[4] = machi & 0x0ff;
176 1.24.2.2 snj enaddr[5] = (machi >> 8) & 0x0ff;
177 1.24.2.2 snj }
178 1.24.2.2 snj
179 1.24.2.2 snj /*
180 1.24.2.2 snj * Init chip and do initial setup
181 1.24.2.2 snj */
182 1.24.2.2 snj if (dwc_gmac_reset(sc) != 0)
183 1.24.2.2 snj return; /* not much to cleanup, haven't attached yet */
184 1.24.2.2 snj dwc_gmac_write_hwaddr(sc, enaddr);
185 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "Ethernet address: %s\n",
186 1.24.2.2 snj ether_sprintf(enaddr));
187 1.24.2.2 snj
188 1.24.2.2 snj /*
189 1.24.2.2 snj * Allocate Tx and Rx rings
190 1.24.2.2 snj */
191 1.24.2.2 snj if (dwc_gmac_alloc_dma_rings(sc) != 0) {
192 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
193 1.24.2.2 snj goto fail;
194 1.24.2.2 snj }
195 1.24.2.2 snj
196 1.24.2.2 snj if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
197 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
198 1.24.2.2 snj goto fail;
199 1.24.2.2 snj }
200 1.24.2.2 snj
201 1.24.2.2 snj mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
202 1.24.2.2 snj if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
203 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
204 1.24.2.2 snj goto fail;
205 1.24.2.2 snj }
206 1.24.2.2 snj
207 1.24.2.2 snj /*
208 1.24.2.2 snj * Prepare interface data
209 1.24.2.2 snj */
210 1.24.2.2 snj ifp->if_softc = sc;
211 1.24.2.2 snj strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
212 1.24.2.2 snj ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
213 1.24.2.2 snj ifp->if_ioctl = dwc_gmac_ioctl;
214 1.24.2.2 snj ifp->if_start = dwc_gmac_start;
215 1.24.2.2 snj ifp->if_init = dwc_gmac_init;
216 1.24.2.2 snj ifp->if_stop = dwc_gmac_stop;
217 1.24.2.2 snj IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
218 1.24.2.2 snj IFQ_SET_READY(&ifp->if_snd);
219 1.24.2.2 snj
220 1.24.2.2 snj /*
221 1.24.2.2 snj * Attach MII subdevices
222 1.24.2.2 snj */
223 1.24.2.2 snj sc->sc_ec.ec_mii = &sc->sc_mii;
224 1.24.2.2 snj ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
225 1.24.2.2 snj mii->mii_ifp = ifp;
226 1.24.2.2 snj mii->mii_readreg = dwc_gmac_miibus_read_reg;
227 1.24.2.2 snj mii->mii_writereg = dwc_gmac_miibus_write_reg;
228 1.24.2.2 snj mii->mii_statchg = dwc_gmac_miibus_statchg;
229 1.24.2.2 snj mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0);
230 1.24.2.2 snj
231 1.24.2.2 snj if (LIST_EMPTY(&mii->mii_phys)) {
232 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "no PHY found!\n");
233 1.24.2.2 snj ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
234 1.24.2.2 snj ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
235 1.24.2.2 snj } else {
236 1.24.2.2 snj ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
237 1.24.2.2 snj }
238 1.24.2.2 snj
239 1.24.2.2 snj /*
240 1.24.2.2 snj * Ready, attach interface
241 1.24.2.2 snj */
242 1.24.2.2 snj if_attach(ifp);
243 1.24.2.2 snj ether_ifattach(ifp, enaddr);
244 1.24.2.2 snj ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
245 1.24.2.2 snj
246 1.24.2.2 snj /*
247 1.24.2.2 snj * Enable interrupts
248 1.24.2.2 snj */
249 1.24.2.2 snj s = splnet();
250 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR,
251 1.24.2.2 snj AWIN_DEF_MAC_INTRMASK);
252 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
253 1.24.2.2 snj GMAC_DEF_DMA_INT_MASK);
254 1.24.2.2 snj splx(s);
255 1.24.2.2 snj
256 1.24.2.2 snj return;
257 1.24.2.2 snj
258 1.24.2.2 snj fail:
259 1.24.2.2 snj dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
260 1.24.2.2 snj dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
261 1.24.2.2 snj }
262 1.24.2.2 snj
263 1.24.2.2 snj
264 1.24.2.2 snj
265 1.24.2.2 snj static int
266 1.24.2.2 snj dwc_gmac_reset(struct dwc_gmac_softc *sc)
267 1.24.2.2 snj {
268 1.24.2.2 snj size_t cnt;
269 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
270 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
271 1.24.2.2 snj for (cnt = 0; cnt < 3000; cnt++) {
272 1.24.2.2 snj if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
273 1.24.2.2 snj & GMAC_BUSMODE_RESET) == 0)
274 1.24.2.2 snj return 0;
275 1.24.2.2 snj delay(10);
276 1.24.2.2 snj }
277 1.24.2.2 snj
278 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "reset timed out\n");
279 1.24.2.2 snj return EIO;
280 1.24.2.2 snj }
281 1.24.2.2 snj
282 1.24.2.2 snj static void
283 1.24.2.2 snj dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
284 1.24.2.2 snj uint8_t enaddr[ETHER_ADDR_LEN])
285 1.24.2.2 snj {
286 1.24.2.2 snj uint32_t lo, hi;
287 1.24.2.2 snj
288 1.24.2.2 snj lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
289 1.24.2.2 snj | (enaddr[3] << 24);
290 1.24.2.2 snj hi = enaddr[4] | (enaddr[5] << 8);
291 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
292 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
293 1.24.2.2 snj }
294 1.24.2.2 snj
295 1.24.2.2 snj static int
296 1.24.2.2 snj dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
297 1.24.2.2 snj {
298 1.24.2.2 snj struct dwc_gmac_softc * const sc = device_private(self);
299 1.24.2.2 snj uint16_t mii;
300 1.24.2.2 snj size_t cnt;
301 1.24.2.2 snj int rv = 0;
302 1.24.2.2 snj
303 1.24.2.2 snj mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
304 1.24.2.2 snj | __SHIFTIN(reg,GMAC_MII_REG_MASK)
305 1.24.2.2 snj | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
306 1.24.2.2 snj | GMAC_MII_BUSY;
307 1.24.2.2 snj
308 1.24.2.2 snj mutex_enter(&sc->sc_mdio_lock);
309 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
310 1.24.2.2 snj
311 1.24.2.2 snj for (cnt = 0; cnt < 1000; cnt++) {
312 1.24.2.2 snj if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
313 1.24.2.2 snj AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
314 1.24.2.2 snj rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
315 1.24.2.2 snj AWIN_GMAC_MAC_MIIDATA);
316 1.24.2.2 snj break;
317 1.24.2.2 snj }
318 1.24.2.2 snj delay(10);
319 1.24.2.2 snj }
320 1.24.2.2 snj
321 1.24.2.2 snj mutex_exit(&sc->sc_mdio_lock);
322 1.24.2.2 snj
323 1.24.2.2 snj return rv;
324 1.24.2.2 snj }
325 1.24.2.2 snj
326 1.24.2.2 snj static void
327 1.24.2.2 snj dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
328 1.24.2.2 snj {
329 1.24.2.2 snj struct dwc_gmac_softc * const sc = device_private(self);
330 1.24.2.2 snj uint16_t mii;
331 1.24.2.2 snj size_t cnt;
332 1.24.2.2 snj
333 1.24.2.2 snj mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
334 1.24.2.2 snj | __SHIFTIN(reg,GMAC_MII_REG_MASK)
335 1.24.2.2 snj | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
336 1.24.2.2 snj | GMAC_MII_BUSY | GMAC_MII_WRITE;
337 1.24.2.2 snj
338 1.24.2.2 snj mutex_enter(&sc->sc_mdio_lock);
339 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
340 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
341 1.24.2.2 snj
342 1.24.2.2 snj for (cnt = 0; cnt < 1000; cnt++) {
343 1.24.2.2 snj if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
344 1.24.2.2 snj AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
345 1.24.2.2 snj break;
346 1.24.2.2 snj delay(10);
347 1.24.2.2 snj }
348 1.24.2.2 snj
349 1.24.2.2 snj mutex_exit(&sc->sc_mdio_lock);
350 1.24.2.2 snj }
351 1.24.2.2 snj
352 1.24.2.2 snj static int
353 1.24.2.2 snj dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
354 1.24.2.2 snj struct dwc_gmac_rx_ring *ring)
355 1.24.2.2 snj {
356 1.24.2.2 snj struct dwc_gmac_rx_data *data;
357 1.24.2.2 snj bus_addr_t physaddr;
358 1.24.2.2 snj const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
359 1.24.2.2 snj int error, i, next;
360 1.24.2.2 snj
361 1.24.2.2 snj ring->r_cur = ring->r_next = 0;
362 1.24.2.2 snj memset(ring->r_desc, 0, descsize);
363 1.24.2.2 snj
364 1.24.2.2 snj /*
365 1.24.2.2 snj * Pre-allocate Rx buffers and populate Rx ring.
366 1.24.2.2 snj */
367 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
368 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
369 1.24.2.2 snj
370 1.24.2.2 snj data = &sc->sc_rxq.r_data[i];
371 1.24.2.2 snj
372 1.24.2.2 snj MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
373 1.24.2.2 snj if (data->rd_m == NULL) {
374 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
375 1.24.2.2 snj "could not allocate rx mbuf #%d\n", i);
376 1.24.2.2 snj error = ENOMEM;
377 1.24.2.2 snj goto fail;
378 1.24.2.2 snj }
379 1.24.2.2 snj error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
380 1.24.2.2 snj MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
381 1.24.2.2 snj if (error != 0) {
382 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
383 1.24.2.2 snj "could not create DMA map\n");
384 1.24.2.2 snj data->rd_map = NULL;
385 1.24.2.2 snj goto fail;
386 1.24.2.2 snj }
387 1.24.2.2 snj MCLGET(data->rd_m, M_DONTWAIT);
388 1.24.2.2 snj if (!(data->rd_m->m_flags & M_EXT)) {
389 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
390 1.24.2.2 snj "could not allocate mbuf cluster #%d\n", i);
391 1.24.2.2 snj error = ENOMEM;
392 1.24.2.2 snj goto fail;
393 1.24.2.2 snj }
394 1.24.2.2 snj
395 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
396 1.24.2.2 snj mtod(data->rd_m, void *), MCLBYTES, NULL,
397 1.24.2.2 snj BUS_DMA_READ | BUS_DMA_NOWAIT);
398 1.24.2.2 snj if (error != 0) {
399 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
400 1.24.2.2 snj "could not load rx buf DMA map #%d", i);
401 1.24.2.2 snj goto fail;
402 1.24.2.2 snj }
403 1.24.2.2 snj physaddr = data->rd_map->dm_segs[0].ds_addr;
404 1.24.2.2 snj
405 1.24.2.2 snj desc = &sc->sc_rxq.r_desc[i];
406 1.24.2.2 snj desc->ddesc_data = htole32(physaddr);
407 1.24.2.2 snj next = RX_NEXT(i);
408 1.24.2.2 snj desc->ddesc_next = htole32(ring->r_physaddr
409 1.24.2.2 snj + next * sizeof(*desc));
410 1.24.2.2 snj desc->ddesc_cntl = htole32(
411 1.24.2.2 snj __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
412 1.24.2.2 snj DDESC_CNTL_RXCHAIN);
413 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
414 1.24.2.2 snj }
415 1.24.2.2 snj
416 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
417 1.24.2.2 snj AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
418 1.24.2.3 snj BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
419 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
420 1.24.2.2 snj ring->r_physaddr);
421 1.24.2.2 snj
422 1.24.2.2 snj return 0;
423 1.24.2.2 snj
424 1.24.2.2 snj fail:
425 1.24.2.2 snj dwc_gmac_free_rx_ring(sc, ring);
426 1.24.2.2 snj return error;
427 1.24.2.2 snj }
428 1.24.2.2 snj
429 1.24.2.2 snj static void
430 1.24.2.2 snj dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
431 1.24.2.2 snj struct dwc_gmac_rx_ring *ring)
432 1.24.2.2 snj {
433 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
434 1.24.2.2 snj int i;
435 1.24.2.2 snj
436 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
437 1.24.2.2 snj desc = &sc->sc_rxq.r_desc[i];
438 1.24.2.2 snj desc->ddesc_cntl = htole32(
439 1.24.2.2 snj __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
440 1.24.2.2 snj DDESC_CNTL_RXCHAIN);
441 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
442 1.24.2.2 snj }
443 1.24.2.2 snj
444 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
445 1.24.2.2 snj AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
446 1.24.2.3 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
447 1.24.2.2 snj
448 1.24.2.2 snj ring->r_cur = ring->r_next = 0;
449 1.24.2.2 snj /* reset DMA address to start of ring */
450 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
451 1.24.2.2 snj sc->sc_rxq.r_physaddr);
452 1.24.2.2 snj }
453 1.24.2.2 snj
454 1.24.2.2 snj static int
455 1.24.2.2 snj dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
456 1.24.2.2 snj {
457 1.24.2.2 snj const size_t descsize = AWGE_TOTAL_RING_COUNT *
458 1.24.2.2 snj sizeof(struct dwc_gmac_dev_dmadesc);
459 1.24.2.2 snj int error, nsegs;
460 1.24.2.2 snj void *rings;
461 1.24.2.2 snj
462 1.24.2.2 snj error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
463 1.24.2.2 snj BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
464 1.24.2.2 snj if (error != 0) {
465 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
466 1.24.2.2 snj "could not create desc DMA map\n");
467 1.24.2.2 snj sc->sc_dma_ring_map = NULL;
468 1.24.2.2 snj goto fail;
469 1.24.2.2 snj }
470 1.24.2.2 snj
471 1.24.2.2 snj error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
472 1.24.2.2 snj &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
473 1.24.2.2 snj if (error != 0) {
474 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
475 1.24.2.2 snj "could not map DMA memory\n");
476 1.24.2.2 snj goto fail;
477 1.24.2.2 snj }
478 1.24.2.2 snj
479 1.24.2.2 snj error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
480 1.24.2.2 snj descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
481 1.24.2.2 snj if (error != 0) {
482 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
483 1.24.2.2 snj "could not allocate DMA memory\n");
484 1.24.2.2 snj goto fail;
485 1.24.2.2 snj }
486 1.24.2.2 snj
487 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
488 1.24.2.2 snj descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
489 1.24.2.2 snj if (error != 0) {
490 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
491 1.24.2.2 snj "could not load desc DMA map\n");
492 1.24.2.2 snj goto fail;
493 1.24.2.2 snj }
494 1.24.2.2 snj
495 1.24.2.2 snj /* give first AWGE_RX_RING_COUNT to the RX side */
496 1.24.2.2 snj sc->sc_rxq.r_desc = rings;
497 1.24.2.2 snj sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
498 1.24.2.2 snj
499 1.24.2.2 snj /* and next rings to the TX side */
500 1.24.2.2 snj sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
501 1.24.2.2 snj sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
502 1.24.2.2 snj AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
503 1.24.2.2 snj
504 1.24.2.2 snj return 0;
505 1.24.2.2 snj
506 1.24.2.2 snj fail:
507 1.24.2.2 snj dwc_gmac_free_dma_rings(sc);
508 1.24.2.2 snj return error;
509 1.24.2.2 snj }
510 1.24.2.2 snj
511 1.24.2.2 snj static void
512 1.24.2.2 snj dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
513 1.24.2.2 snj {
514 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
515 1.24.2.2 snj sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
516 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
517 1.24.2.2 snj bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
518 1.24.2.2 snj AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
519 1.24.2.2 snj bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
520 1.24.2.2 snj }
521 1.24.2.2 snj
522 1.24.2.2 snj static void
523 1.24.2.2 snj dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
524 1.24.2.2 snj {
525 1.24.2.2 snj struct dwc_gmac_rx_data *data;
526 1.24.2.2 snj int i;
527 1.24.2.2 snj
528 1.24.2.2 snj if (ring->r_desc == NULL)
529 1.24.2.2 snj return;
530 1.24.2.2 snj
531 1.24.2.2 snj
532 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
533 1.24.2.2 snj data = &ring->r_data[i];
534 1.24.2.2 snj
535 1.24.2.2 snj if (data->rd_map != NULL) {
536 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
537 1.24.2.2 snj AWGE_RX_RING_COUNT
538 1.24.2.2 snj *sizeof(struct dwc_gmac_dev_dmadesc),
539 1.24.2.2 snj BUS_DMASYNC_POSTREAD);
540 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->rd_map);
541 1.24.2.2 snj bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
542 1.24.2.2 snj }
543 1.24.2.2 snj if (data->rd_m != NULL)
544 1.24.2.2 snj m_freem(data->rd_m);
545 1.24.2.2 snj }
546 1.24.2.2 snj }
547 1.24.2.2 snj
548 1.24.2.2 snj static int
549 1.24.2.2 snj dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
550 1.24.2.2 snj struct dwc_gmac_tx_ring *ring)
551 1.24.2.2 snj {
552 1.24.2.2 snj int i, error = 0;
553 1.24.2.2 snj
554 1.24.2.2 snj ring->t_queued = 0;
555 1.24.2.2 snj ring->t_cur = ring->t_next = 0;
556 1.24.2.2 snj
557 1.24.2.2 snj memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
558 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
559 1.24.2.2 snj TX_DESC_OFFSET(0),
560 1.24.2.2 snj AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
561 1.24.2.2 snj BUS_DMASYNC_POSTWRITE);
562 1.24.2.2 snj
563 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
564 1.24.2.2 snj error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
565 1.24.2.2 snj AWGE_TX_RING_COUNT, MCLBYTES, 0,
566 1.24.2.2 snj BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
567 1.24.2.2 snj &ring->t_data[i].td_map);
568 1.24.2.2 snj if (error != 0) {
569 1.24.2.2 snj aprint_error_dev(sc->sc_dev,
570 1.24.2.2 snj "could not create TX DMA map #%d\n", i);
571 1.24.2.2 snj ring->t_data[i].td_map = NULL;
572 1.24.2.2 snj goto fail;
573 1.24.2.2 snj }
574 1.24.2.2 snj ring->t_desc[i].ddesc_next = htole32(
575 1.24.2.2 snj ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
576 1.24.2.2 snj *TX_NEXT(i));
577 1.24.2.2 snj }
578 1.24.2.2 snj
579 1.24.2.2 snj return 0;
580 1.24.2.2 snj
581 1.24.2.2 snj fail:
582 1.24.2.2 snj dwc_gmac_free_tx_ring(sc, ring);
583 1.24.2.2 snj return error;
584 1.24.2.2 snj }
585 1.24.2.2 snj
586 1.24.2.2 snj static void
587 1.24.2.2 snj dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
588 1.24.2.2 snj {
589 1.24.2.2 snj /* 'end' is pointing one descriptor beyound the last we want to sync */
590 1.24.2.2 snj if (end > start) {
591 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
592 1.24.2.2 snj TX_DESC_OFFSET(start),
593 1.24.2.2 snj TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
594 1.24.2.2 snj ops);
595 1.24.2.2 snj return;
596 1.24.2.2 snj }
597 1.24.2.2 snj /* sync from 'start' to end of ring */
598 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
599 1.24.2.2 snj TX_DESC_OFFSET(start),
600 1.24.2.2 snj TX_DESC_OFFSET(AWGE_TX_RING_COUNT+1)-TX_DESC_OFFSET(start),
601 1.24.2.2 snj ops);
602 1.24.2.2 snj /* sync from start of ring to 'end' */
603 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
604 1.24.2.2 snj TX_DESC_OFFSET(0),
605 1.24.2.2 snj TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
606 1.24.2.2 snj ops);
607 1.24.2.2 snj }
608 1.24.2.2 snj
609 1.24.2.2 snj static void
610 1.24.2.2 snj dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
611 1.24.2.2 snj struct dwc_gmac_tx_ring *ring)
612 1.24.2.2 snj {
613 1.24.2.2 snj int i;
614 1.24.2.2 snj
615 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
616 1.24.2.2 snj struct dwc_gmac_tx_data *data = &ring->t_data[i];
617 1.24.2.2 snj
618 1.24.2.2 snj if (data->td_m != NULL) {
619 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->td_active,
620 1.24.2.2 snj 0, data->td_active->dm_mapsize,
621 1.24.2.2 snj BUS_DMASYNC_POSTWRITE);
622 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->td_active);
623 1.24.2.2 snj m_freem(data->td_m);
624 1.24.2.2 snj data->td_m = NULL;
625 1.24.2.2 snj }
626 1.24.2.2 snj }
627 1.24.2.2 snj
628 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
629 1.24.2.2 snj TX_DESC_OFFSET(0),
630 1.24.2.2 snj AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
631 1.24.2.3 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
632 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
633 1.24.2.2 snj sc->sc_txq.t_physaddr);
634 1.24.2.2 snj
635 1.24.2.2 snj ring->t_queued = 0;
636 1.24.2.2 snj ring->t_cur = ring->t_next = 0;
637 1.24.2.2 snj }
638 1.24.2.2 snj
639 1.24.2.2 snj static void
640 1.24.2.2 snj dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
641 1.24.2.2 snj struct dwc_gmac_tx_ring *ring)
642 1.24.2.2 snj {
643 1.24.2.2 snj int i;
644 1.24.2.2 snj
645 1.24.2.2 snj /* unload the maps */
646 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
647 1.24.2.2 snj struct dwc_gmac_tx_data *data = &ring->t_data[i];
648 1.24.2.2 snj
649 1.24.2.2 snj if (data->td_m != NULL) {
650 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->td_active,
651 1.24.2.2 snj 0, data->td_map->dm_mapsize,
652 1.24.2.2 snj BUS_DMASYNC_POSTWRITE);
653 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->td_active);
654 1.24.2.2 snj m_freem(data->td_m);
655 1.24.2.2 snj data->td_m = NULL;
656 1.24.2.2 snj }
657 1.24.2.2 snj }
658 1.24.2.2 snj
659 1.24.2.2 snj /* and actually free them */
660 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
661 1.24.2.2 snj struct dwc_gmac_tx_data *data = &ring->t_data[i];
662 1.24.2.2 snj
663 1.24.2.2 snj bus_dmamap_destroy(sc->sc_dmat, data->td_map);
664 1.24.2.2 snj }
665 1.24.2.2 snj }
666 1.24.2.2 snj
667 1.24.2.2 snj static void
668 1.24.2.2 snj dwc_gmac_miibus_statchg(struct ifnet *ifp)
669 1.24.2.2 snj {
670 1.24.2.2 snj struct dwc_gmac_softc * const sc = ifp->if_softc;
671 1.24.2.2 snj struct mii_data * const mii = &sc->sc_mii;
672 1.24.2.2 snj uint32_t conf;
673 1.24.2.2 snj
674 1.24.2.2 snj /*
675 1.24.2.2 snj * Set MII or GMII interface based on the speed
676 1.24.2.2 snj * negotiated by the PHY.
677 1.24.2.2 snj */
678 1.24.2.2 snj conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
679 1.24.2.2 snj conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
680 1.24.2.2 snj |AWIN_GMAC_MAC_CONF_FULLDPLX);
681 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
682 1.24.2.2 snj | AWIN_GMAC_MAC_CONF_DISABLERXOWN
683 1.24.2.2 snj | AWIN_GMAC_MAC_CONF_RXENABLE
684 1.24.2.2 snj | AWIN_GMAC_MAC_CONF_TXENABLE;
685 1.24.2.2 snj switch (IFM_SUBTYPE(mii->mii_media_active)) {
686 1.24.2.2 snj case IFM_10_T:
687 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_MIISEL;
688 1.24.2.2 snj break;
689 1.24.2.2 snj case IFM_100_TX:
690 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_FES100 |
691 1.24.2.2 snj AWIN_GMAC_MAC_CONF_MIISEL;
692 1.24.2.2 snj break;
693 1.24.2.2 snj case IFM_1000_T:
694 1.24.2.2 snj break;
695 1.24.2.2 snj }
696 1.24.2.2 snj if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX)
697 1.24.2.2 snj conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
698 1.24.2.2 snj
699 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
700 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
701 1.24.2.2 snj "setting MAC conf register: %08x\n", conf);
702 1.24.2.2 snj #endif
703 1.24.2.2 snj
704 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
705 1.24.2.2 snj AWIN_GMAC_MAC_CONF, conf);
706 1.24.2.2 snj }
707 1.24.2.2 snj
708 1.24.2.2 snj static int
709 1.24.2.2 snj dwc_gmac_init(struct ifnet *ifp)
710 1.24.2.2 snj {
711 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
712 1.24.2.2 snj uint32_t ffilt;
713 1.24.2.2 snj
714 1.24.2.2 snj if (ifp->if_flags & IFF_RUNNING)
715 1.24.2.2 snj return 0;
716 1.24.2.2 snj
717 1.24.2.2 snj dwc_gmac_stop(ifp, 0);
718 1.24.2.2 snj
719 1.24.2.2 snj /*
720 1.24.2.2 snj * Configure DMA burst/transfer mode and RX/TX priorities.
721 1.24.2.2 snj * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
722 1.24.2.2 snj */
723 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
724 1.24.2.2 snj GMAC_BUSMODE_FIXEDBURST |
725 1.24.2.2 snj __SHIFTIN(GMAC_BUSMODE_PRIORXTX_41, GMAC_BUSMODE_PRIORXTX) |
726 1.24.2.2 snj __SHIFTIN(8, GMCA_BUSMODE_PBL));
727 1.24.2.2 snj
728 1.24.2.2 snj /*
729 1.24.2.2 snj * Set up address filter
730 1.24.2.2 snj */
731 1.24.2.2 snj ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
732 1.24.2.2 snj if (ifp->if_flags & IFF_PROMISC) {
733 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_PR;
734 1.24.2.2 snj } else {
735 1.24.2.2 snj ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
736 1.24.2.2 snj }
737 1.24.2.2 snj if (ifp->if_flags & IFF_BROADCAST) {
738 1.24.2.2 snj ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
739 1.24.2.2 snj } else {
740 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
741 1.24.2.2 snj }
742 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
743 1.24.2.2 snj
744 1.24.2.2 snj /*
745 1.24.2.2 snj * Set up multicast filter
746 1.24.2.2 snj */
747 1.24.2.2 snj dwc_gmac_setmulti(sc);
748 1.24.2.2 snj
749 1.24.2.2 snj /*
750 1.24.2.2 snj * Set up dma pointer for RX and TX ring
751 1.24.2.2 snj */
752 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
753 1.24.2.2 snj sc->sc_rxq.r_physaddr);
754 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
755 1.24.2.2 snj sc->sc_txq.t_physaddr);
756 1.24.2.2 snj
757 1.24.2.2 snj /*
758 1.24.2.2 snj * Start RX/TX part
759 1.24.2.2 snj */
760 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
761 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE, GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART |
762 1.24.2.2 snj GMAC_DMA_OP_STOREFORWARD);
763 1.24.2.2 snj
764 1.24.2.2 snj ifp->if_flags |= IFF_RUNNING;
765 1.24.2.2 snj ifp->if_flags &= ~IFF_OACTIVE;
766 1.24.2.2 snj
767 1.24.2.2 snj return 0;
768 1.24.2.2 snj }
769 1.24.2.2 snj
770 1.24.2.2 snj static void
771 1.24.2.2 snj dwc_gmac_start(struct ifnet *ifp)
772 1.24.2.2 snj {
773 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
774 1.24.2.2 snj int old = sc->sc_txq.t_queued;
775 1.24.2.5 msaitoh int start = sc->sc_txq.t_cur;
776 1.24.2.2 snj struct mbuf *m0;
777 1.24.2.2 snj
778 1.24.2.2 snj if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
779 1.24.2.2 snj return;
780 1.24.2.2 snj
781 1.24.2.2 snj for (;;) {
782 1.24.2.2 snj IFQ_POLL(&ifp->if_snd, m0);
783 1.24.2.2 snj if (m0 == NULL)
784 1.24.2.2 snj break;
785 1.24.2.2 snj if (dwc_gmac_queue(sc, m0) != 0) {
786 1.24.2.2 snj ifp->if_flags |= IFF_OACTIVE;
787 1.24.2.2 snj break;
788 1.24.2.2 snj }
789 1.24.2.2 snj IFQ_DEQUEUE(&ifp->if_snd, m0);
790 1.24.2.2 snj bpf_mtap(ifp, m0);
791 1.24.2.2 snj }
792 1.24.2.2 snj
793 1.24.2.2 snj if (sc->sc_txq.t_queued != old) {
794 1.24.2.2 snj /* packets have been queued, kick it off */
795 1.24.2.5 msaitoh dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
796 1.24.2.2 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
797 1.24.2.2 snj
798 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
799 1.24.2.2 snj AWIN_GMAC_DMA_TXPOLL, ~0U);
800 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
801 1.24.2.2 snj dwc_dump_status(sc);
802 1.24.2.2 snj #endif
803 1.24.2.2 snj }
804 1.24.2.2 snj }
805 1.24.2.2 snj
806 1.24.2.2 snj static void
807 1.24.2.2 snj dwc_gmac_stop(struct ifnet *ifp, int disable)
808 1.24.2.2 snj {
809 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
810 1.24.2.2 snj
811 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
812 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE,
813 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh,
814 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE)
815 1.24.2.2 snj & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
816 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
817 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE,
818 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh,
819 1.24.2.2 snj AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
820 1.24.2.2 snj
821 1.24.2.2 snj mii_down(&sc->sc_mii);
822 1.24.2.2 snj dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
823 1.24.2.2 snj dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
824 1.24.2.2 snj }
825 1.24.2.2 snj
826 1.24.2.2 snj /*
827 1.24.2.2 snj * Add m0 to the TX ring
828 1.24.2.2 snj */
829 1.24.2.2 snj static int
830 1.24.2.2 snj dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
831 1.24.2.2 snj {
832 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc = NULL;
833 1.24.2.2 snj struct dwc_gmac_tx_data *data = NULL;
834 1.24.2.2 snj bus_dmamap_t map;
835 1.24.2.2 snj uint32_t flags, len;
836 1.24.2.2 snj int error, i, first;
837 1.24.2.2 snj
838 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
839 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
840 1.24.2.2 snj "dwc_gmac_queue: adding mbuf chain %p\n", m0);
841 1.24.2.2 snj #endif
842 1.24.2.2 snj
843 1.24.2.2 snj first = sc->sc_txq.t_cur;
844 1.24.2.2 snj map = sc->sc_txq.t_data[first].td_map;
845 1.24.2.2 snj flags = 0;
846 1.24.2.2 snj
847 1.24.2.2 snj error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
848 1.24.2.2 snj BUS_DMA_WRITE|BUS_DMA_NOWAIT);
849 1.24.2.2 snj if (error != 0) {
850 1.24.2.2 snj aprint_error_dev(sc->sc_dev, "could not map mbuf "
851 1.24.2.2 snj "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
852 1.24.2.2 snj return error;
853 1.24.2.2 snj }
854 1.24.2.2 snj
855 1.24.2.2 snj if (sc->sc_txq.t_queued + map->dm_nsegs >= AWGE_TX_RING_COUNT - 1) {
856 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, map);
857 1.24.2.2 snj return ENOBUFS;
858 1.24.2.2 snj }
859 1.24.2.2 snj
860 1.24.2.2 snj data = NULL;
861 1.24.2.2 snj flags = DDESC_CNTL_TXFIRST|DDESC_CNTL_TXCHAIN;
862 1.24.2.2 snj for (i = 0; i < map->dm_nsegs; i++) {
863 1.24.2.2 snj data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
864 1.24.2.2 snj desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
865 1.24.2.2 snj
866 1.24.2.2 snj desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
867 1.24.2.2 snj len = __SHIFTIN(map->dm_segs[i].ds_len,DDESC_CNTL_SIZE1MASK);
868 1.24.2.2 snj if (i == map->dm_nsegs-1)
869 1.24.2.2 snj flags |= DDESC_CNTL_TXLAST|DDESC_CNTL_TXINT;
870 1.24.2.2 snj
871 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
872 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
873 1.24.2.2 snj "len %lu (flags: %08x, len: %08x)\n", sc->sc_txq.t_cur,
874 1.24.2.2 snj (unsigned long)map->dm_segs[i].ds_addr,
875 1.24.2.2 snj (unsigned long)map->dm_segs[i].ds_len,
876 1.24.2.2 snj flags, len);
877 1.24.2.2 snj #endif
878 1.24.2.2 snj
879 1.24.2.2 snj desc->ddesc_cntl = htole32(len|flags);
880 1.24.2.2 snj flags &= ~DDESC_CNTL_TXFIRST;
881 1.24.2.2 snj
882 1.24.2.2 snj /*
883 1.24.2.2 snj * Defer passing ownership of the first descriptor
884 1.24.2.2 snj * until we are done.
885 1.24.2.2 snj */
886 1.24.2.2 snj if (i)
887 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
888 1.24.2.2 snj
889 1.24.2.2 snj sc->sc_txq.t_queued++;
890 1.24.2.2 snj sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
891 1.24.2.2 snj }
892 1.24.2.2 snj
893 1.24.2.2 snj /* Pass first to device */
894 1.24.2.2 snj sc->sc_txq.t_desc[first].ddesc_status
895 1.24.2.2 snj = htole32(DDESC_STATUS_OWNEDBYDEV);
896 1.24.2.2 snj
897 1.24.2.2 snj data->td_m = m0;
898 1.24.2.2 snj data->td_active = map;
899 1.24.2.2 snj
900 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
901 1.24.2.3 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
902 1.24.2.2 snj
903 1.24.2.2 snj return 0;
904 1.24.2.2 snj }
905 1.24.2.2 snj
906 1.24.2.2 snj /*
907 1.24.2.2 snj * If the interface is up and running, only modify the receive
908 1.24.2.2 snj * filter when setting promiscuous or debug mode. Otherwise fall
909 1.24.2.2 snj * through to ether_ioctl, which will reset the chip.
910 1.24.2.2 snj */
911 1.24.2.2 snj static int
912 1.24.2.2 snj dwc_gmac_ifflags_cb(struct ethercom *ec)
913 1.24.2.2 snj {
914 1.24.2.2 snj struct ifnet *ifp = &ec->ec_if;
915 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
916 1.24.2.2 snj int change = ifp->if_flags ^ sc->sc_if_flags;
917 1.24.2.2 snj
918 1.24.2.2 snj if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
919 1.24.2.2 snj return ENETRESET;
920 1.24.2.2 snj if ((change & IFF_PROMISC) != 0)
921 1.24.2.2 snj dwc_gmac_setmulti(sc);
922 1.24.2.2 snj return 0;
923 1.24.2.2 snj }
924 1.24.2.2 snj
925 1.24.2.2 snj static int
926 1.24.2.2 snj dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
927 1.24.2.2 snj {
928 1.24.2.2 snj struct dwc_gmac_softc *sc = ifp->if_softc;
929 1.24.2.2 snj int s, error = 0;
930 1.24.2.2 snj
931 1.24.2.2 snj s = splnet();
932 1.24.2.2 snj
933 1.24.2.2 snj if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
934 1.24.2.2 snj error = 0;
935 1.24.2.2 snj if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
936 1.24.2.2 snj ;
937 1.24.2.2 snj else if (ifp->if_flags & IFF_RUNNING) {
938 1.24.2.2 snj /*
939 1.24.2.2 snj * Multicast list has changed; set the hardware filter
940 1.24.2.2 snj * accordingly.
941 1.24.2.2 snj */
942 1.24.2.2 snj dwc_gmac_setmulti(sc);
943 1.24.2.2 snj }
944 1.24.2.2 snj }
945 1.24.2.2 snj
946 1.24.2.2 snj /* Try to get things going again */
947 1.24.2.2 snj if (ifp->if_flags & IFF_UP)
948 1.24.2.2 snj dwc_gmac_start(ifp);
949 1.24.2.2 snj sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
950 1.24.2.2 snj splx(s);
951 1.24.2.2 snj return error;
952 1.24.2.2 snj }
953 1.24.2.2 snj
954 1.24.2.2 snj static void
955 1.24.2.2 snj dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
956 1.24.2.2 snj {
957 1.24.2.2 snj struct dwc_gmac_tx_data *data;
958 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
959 1.24.2.2 snj uint32_t flags;
960 1.24.2.2 snj int i;
961 1.24.2.2 snj
962 1.24.2.2 snj for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0;
963 1.24.2.2 snj i = TX_NEXT(i), sc->sc_txq.t_queued--) {
964 1.24.2.2 snj
965 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
966 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
967 1.24.2.2 snj "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
968 1.24.2.2 snj i, sc->sc_txq.t_queued);
969 1.24.2.2 snj #endif
970 1.24.2.2 snj
971 1.24.2.2 snj desc = &sc->sc_txq.t_desc[i];
972 1.24.2.2 snj dwc_gmac_txdesc_sync(sc, i, i+1,
973 1.24.2.2 snj BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
974 1.24.2.2 snj flags = le32toh(desc->ddesc_status);
975 1.24.2.2 snj
976 1.24.2.2 snj if (flags & DDESC_STATUS_OWNEDBYDEV)
977 1.24.2.2 snj break;
978 1.24.2.2 snj
979 1.24.2.2 snj data = &sc->sc_txq.t_data[i];
980 1.24.2.2 snj if (data->td_m == NULL)
981 1.24.2.2 snj continue;
982 1.24.2.2 snj sc->sc_ec.ec_if.if_opackets++;
983 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
984 1.24.2.2 snj data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
985 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->td_active);
986 1.24.2.2 snj
987 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
988 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
989 1.24.2.2 snj "dwc_gmac_tx_intr: done with packet at desc #%d, "
990 1.24.2.2 snj "freeing mbuf %p\n", i, data->td_m);
991 1.24.2.2 snj #endif
992 1.24.2.2 snj
993 1.24.2.2 snj m_freem(data->td_m);
994 1.24.2.2 snj data->td_m = NULL;
995 1.24.2.2 snj }
996 1.24.2.2 snj
997 1.24.2.2 snj sc->sc_txq.t_next = i;
998 1.24.2.2 snj
999 1.24.2.2 snj if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1000 1.24.2.2 snj sc->sc_ec.ec_if.if_flags &= ~IFF_OACTIVE;
1001 1.24.2.2 snj }
1002 1.24.2.2 snj }
1003 1.24.2.2 snj
1004 1.24.2.2 snj static void
1005 1.24.2.2 snj dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1006 1.24.2.2 snj {
1007 1.24.2.2 snj struct ifnet *ifp = &sc->sc_ec.ec_if;
1008 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc;
1009 1.24.2.2 snj struct dwc_gmac_rx_data *data;
1010 1.24.2.2 snj bus_addr_t physaddr;
1011 1.24.2.2 snj uint32_t status;
1012 1.24.2.2 snj struct mbuf *m, *mnew;
1013 1.24.2.2 snj int i, len, error;
1014 1.24.2.2 snj
1015 1.24.2.2 snj for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1016 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1017 1.24.2.2 snj RX_DESC_OFFSET(i), sizeof(*desc),
1018 1.24.2.2 snj BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1019 1.24.2.2 snj desc = &sc->sc_rxq.r_desc[i];
1020 1.24.2.2 snj data = &sc->sc_rxq.r_data[i];
1021 1.24.2.2 snj
1022 1.24.2.2 snj status = le32toh(desc->ddesc_status);
1023 1.24.2.2 snj if (status & DDESC_STATUS_OWNEDBYDEV)
1024 1.24.2.2 snj break;
1025 1.24.2.2 snj
1026 1.24.2.2 snj if (status & (DDESC_STATUS_RXERROR|DDESC_STATUS_RXTRUNCATED)) {
1027 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1028 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
1029 1.24.2.2 snj "RX error: descriptor status %08x, skipping\n",
1030 1.24.2.2 snj status);
1031 1.24.2.2 snj #endif
1032 1.24.2.2 snj ifp->if_ierrors++;
1033 1.24.2.2 snj goto skip;
1034 1.24.2.2 snj }
1035 1.24.2.2 snj
1036 1.24.2.2 snj len = __SHIFTOUT(status, DDESC_STATUS_FRMLENMSK);
1037 1.24.2.2 snj
1038 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1039 1.24.2.2 snj aprint_normal_dev(sc->sc_dev,
1040 1.24.2.2 snj "rx int: device is done with descriptor #%d, len: %d\n",
1041 1.24.2.2 snj i, len);
1042 1.24.2.2 snj #endif
1043 1.24.2.2 snj
1044 1.24.2.2 snj /*
1045 1.24.2.2 snj * Try to get a new mbuf before passing this one
1046 1.24.2.2 snj * up, if that fails, drop the packet and reuse
1047 1.24.2.2 snj * the existing one.
1048 1.24.2.2 snj */
1049 1.24.2.2 snj MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1050 1.24.2.2 snj if (mnew == NULL) {
1051 1.24.2.2 snj ifp->if_ierrors++;
1052 1.24.2.2 snj goto skip;
1053 1.24.2.2 snj }
1054 1.24.2.2 snj MCLGET(mnew, M_DONTWAIT);
1055 1.24.2.2 snj if ((mnew->m_flags & M_EXT) == 0) {
1056 1.24.2.2 snj m_freem(mnew);
1057 1.24.2.2 snj ifp->if_ierrors++;
1058 1.24.2.2 snj goto skip;
1059 1.24.2.2 snj }
1060 1.24.2.2 snj
1061 1.24.2.2 snj /* unload old DMA map */
1062 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1063 1.24.2.2 snj data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1064 1.24.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1065 1.24.2.2 snj
1066 1.24.2.2 snj /* and reload with new mbuf */
1067 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1068 1.24.2.2 snj mtod(mnew, void*), MCLBYTES, NULL,
1069 1.24.2.2 snj BUS_DMA_READ | BUS_DMA_NOWAIT);
1070 1.24.2.2 snj if (error != 0) {
1071 1.24.2.2 snj m_freem(mnew);
1072 1.24.2.2 snj /* try to reload old mbuf */
1073 1.24.2.2 snj error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1074 1.24.2.2 snj mtod(data->rd_m, void*), MCLBYTES, NULL,
1075 1.24.2.2 snj BUS_DMA_READ | BUS_DMA_NOWAIT);
1076 1.24.2.2 snj if (error != 0) {
1077 1.24.2.2 snj panic("%s: could not load old rx mbuf",
1078 1.24.2.2 snj device_xname(sc->sc_dev));
1079 1.24.2.2 snj }
1080 1.24.2.2 snj ifp->if_ierrors++;
1081 1.24.2.2 snj goto skip;
1082 1.24.2.2 snj }
1083 1.24.2.2 snj physaddr = data->rd_map->dm_segs[0].ds_addr;
1084 1.24.2.2 snj
1085 1.24.2.2 snj /*
1086 1.24.2.2 snj * New mbuf loaded, update RX ring and continue
1087 1.24.2.2 snj */
1088 1.24.2.2 snj m = data->rd_m;
1089 1.24.2.2 snj data->rd_m = mnew;
1090 1.24.2.2 snj desc->ddesc_data = htole32(physaddr);
1091 1.24.2.2 snj
1092 1.24.2.2 snj /* finalize mbuf */
1093 1.24.2.2 snj m->m_pkthdr.len = m->m_len = len;
1094 1.24.2.2 snj m->m_pkthdr.rcvif = ifp;
1095 1.24.2.2 snj m->m_flags |= M_HASFCS;
1096 1.24.2.2 snj
1097 1.24.2.2 snj bpf_mtap(ifp, m);
1098 1.24.2.2 snj ifp->if_ipackets++;
1099 1.24.2.2 snj (*ifp->if_input)(ifp, m);
1100 1.24.2.2 snj
1101 1.24.2.2 snj skip:
1102 1.24.2.3 snj bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1103 1.24.2.3 snj data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1104 1.24.2.2 snj desc->ddesc_cntl = htole32(
1105 1.24.2.2 snj __SHIFTIN(AWGE_MAX_PACKET,DDESC_CNTL_SIZE1MASK) |
1106 1.24.2.2 snj DDESC_CNTL_RXCHAIN);
1107 1.24.2.2 snj desc->ddesc_status = htole32(DDESC_STATUS_OWNEDBYDEV);
1108 1.24.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1109 1.24.2.2 snj RX_DESC_OFFSET(i), sizeof(*desc),
1110 1.24.2.2 snj BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1111 1.24.2.2 snj }
1112 1.24.2.2 snj
1113 1.24.2.2 snj /* update RX pointer */
1114 1.24.2.2 snj sc->sc_rxq.r_cur = i;
1115 1.24.2.2 snj
1116 1.24.2.2 snj }
1117 1.24.2.2 snj
1118 1.24.2.2 snj /*
1119 1.24.2.2 snj * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1120 1.24.2.2 snj */
1121 1.24.2.2 snj static uint32_t
1122 1.24.2.2 snj bitrev32(uint32_t x)
1123 1.24.2.2 snj {
1124 1.24.2.2 snj x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1125 1.24.2.2 snj x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1126 1.24.2.2 snj x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1127 1.24.2.2 snj x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1128 1.24.2.2 snj
1129 1.24.2.2 snj return (x >> 16) | (x << 16);
1130 1.24.2.2 snj }
1131 1.24.2.2 snj
1132 1.24.2.2 snj static void
1133 1.24.2.2 snj dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1134 1.24.2.2 snj {
1135 1.24.2.2 snj struct ifnet * const ifp = &sc->sc_ec.ec_if;
1136 1.24.2.2 snj struct ether_multi *enm;
1137 1.24.2.2 snj struct ether_multistep step;
1138 1.24.2.2 snj uint32_t hashes[2] = { 0, 0 };
1139 1.24.2.2 snj uint32_t ffilt, h;
1140 1.24.2.2 snj int mcnt, s;
1141 1.24.2.2 snj
1142 1.24.2.2 snj s = splnet();
1143 1.24.2.2 snj
1144 1.24.2.2 snj ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1145 1.24.2.2 snj
1146 1.24.2.2 snj if (ifp->if_flags & IFF_PROMISC) {
1147 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1148 1.24.2.2 snj goto special_filter;
1149 1.24.2.2 snj }
1150 1.24.2.2 snj
1151 1.24.2.2 snj ifp->if_flags &= ~IFF_ALLMULTI;
1152 1.24.2.2 snj ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1153 1.24.2.2 snj
1154 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1155 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1156 1.24.2.2 snj
1157 1.24.2.2 snj ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1158 1.24.2.2 snj mcnt = 0;
1159 1.24.2.2 snj while (enm != NULL) {
1160 1.24.2.2 snj if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1161 1.24.2.2 snj ETHER_ADDR_LEN) != 0) {
1162 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1163 1.24.2.2 snj ifp->if_flags |= IFF_ALLMULTI;
1164 1.24.2.2 snj goto special_filter;
1165 1.24.2.2 snj }
1166 1.24.2.2 snj
1167 1.24.2.2 snj h = bitrev32(
1168 1.24.2.2 snj ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1169 1.24.2.2 snj ) >> 26;
1170 1.24.2.2 snj hashes[h >> 5] |= (1 << (h & 0x1f));
1171 1.24.2.2 snj
1172 1.24.2.2 snj mcnt++;
1173 1.24.2.2 snj ETHER_NEXT_MULTI(step, enm);
1174 1.24.2.2 snj }
1175 1.24.2.2 snj
1176 1.24.2.2 snj if (mcnt)
1177 1.24.2.2 snj ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1178 1.24.2.2 snj else
1179 1.24.2.2 snj ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1180 1.24.2.2 snj
1181 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1182 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1183 1.24.2.2 snj hashes[0]);
1184 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1185 1.24.2.2 snj hashes[1]);
1186 1.24.2.2 snj sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1187 1.24.2.2 snj
1188 1.24.2.2 snj splx(s);
1189 1.24.2.2 snj
1190 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1191 1.24.2.2 snj dwc_gmac_dump_ffilt(sc, ffilt);
1192 1.24.2.2 snj #endif
1193 1.24.2.2 snj return;
1194 1.24.2.2 snj
1195 1.24.2.2 snj special_filter:
1196 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1197 1.24.2.2 snj dwc_gmac_dump_ffilt(sc, ffilt);
1198 1.24.2.2 snj #endif
1199 1.24.2.2 snj /* no MAC hashes, ALLMULTI or PROMISC */
1200 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1201 1.24.2.2 snj ffilt);
1202 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1203 1.24.2.2 snj 0xffffffff);
1204 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1205 1.24.2.2 snj 0xffffffff);
1206 1.24.2.2 snj sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1207 1.24.2.2 snj splx(s);
1208 1.24.2.2 snj }
1209 1.24.2.2 snj
1210 1.24.2.2 snj int
1211 1.24.2.2 snj dwc_gmac_intr(struct dwc_gmac_softc *sc)
1212 1.24.2.2 snj {
1213 1.24.2.2 snj uint32_t status, dma_status;
1214 1.24.2.2 snj int rv = 0;
1215 1.24.2.2 snj
1216 1.24.2.2 snj status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1217 1.24.2.2 snj if (status & AWIN_GMAC_MII_IRQ) {
1218 1.24.2.2 snj (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1219 1.24.2.2 snj AWIN_GMAC_MII_STATUS);
1220 1.24.2.2 snj rv = 1;
1221 1.24.2.2 snj mii_pollstat(&sc->sc_mii);
1222 1.24.2.2 snj }
1223 1.24.2.2 snj
1224 1.24.2.2 snj dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1225 1.24.2.2 snj AWIN_GMAC_DMA_STATUS);
1226 1.24.2.2 snj
1227 1.24.2.2 snj if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1228 1.24.2.2 snj rv = 1;
1229 1.24.2.2 snj
1230 1.24.2.2 snj if (dma_status & GMAC_DMA_INT_TIE)
1231 1.24.2.2 snj dwc_gmac_tx_intr(sc);
1232 1.24.2.2 snj
1233 1.24.2.2 snj if (dma_status & GMAC_DMA_INT_RIE)
1234 1.24.2.2 snj dwc_gmac_rx_intr(sc);
1235 1.24.2.2 snj
1236 1.24.2.2 snj /*
1237 1.24.2.2 snj * Check error conditions
1238 1.24.2.2 snj */
1239 1.24.2.2 snj if (dma_status & GMAC_DMA_INT_ERRORS) {
1240 1.24.2.2 snj sc->sc_ec.ec_if.if_oerrors++;
1241 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1242 1.24.2.2 snj dwc_dump_and_abort(sc, "interrupt error condition");
1243 1.24.2.2 snj #endif
1244 1.24.2.2 snj }
1245 1.24.2.2 snj
1246 1.24.2.2 snj /* ack interrupt */
1247 1.24.2.2 snj if (dma_status)
1248 1.24.2.2 snj bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1249 1.24.2.2 snj AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1250 1.24.2.2 snj
1251 1.24.2.2 snj return rv;
1252 1.24.2.2 snj }
1253 1.24.2.2 snj
1254 1.24.2.2 snj #ifdef DWC_GMAC_DEBUG
1255 1.24.2.2 snj static void
1256 1.24.2.2 snj dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1257 1.24.2.2 snj {
1258 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1259 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1260 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1261 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1262 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1263 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1264 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1265 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1266 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1267 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1268 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1269 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1270 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1271 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1272 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1273 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1274 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1275 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1276 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1277 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1278 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1279 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1280 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1281 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1282 1.24.2.2 snj }
1283 1.24.2.2 snj
1284 1.24.2.2 snj static void
1285 1.24.2.2 snj dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1286 1.24.2.2 snj {
1287 1.24.2.2 snj int i;
1288 1.24.2.2 snj
1289 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1290 1.24.2.2 snj sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1291 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1292 1.24.2.2 snj for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1293 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1294 1.24.2.2 snj aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1295 1.24.2.2 snj "data: %08x next: %08x\n",
1296 1.24.2.2 snj i, sc->sc_txq.t_physaddr +
1297 1.24.2.2 snj i*sizeof(struct dwc_gmac_dev_dmadesc),
1298 1.24.2.2 snj le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1299 1.24.2.2 snj le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1300 1.24.2.2 snj }
1301 1.24.2.2 snj }
1302 1.24.2.2 snj
1303 1.24.2.2 snj static void
1304 1.24.2.2 snj dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1305 1.24.2.2 snj {
1306 1.24.2.2 snj int i;
1307 1.24.2.2 snj
1308 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1309 1.24.2.2 snj sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1310 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1311 1.24.2.2 snj for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1312 1.24.2.2 snj struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1313 1.24.2.2 snj aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1314 1.24.2.2 snj "data: %08x next: %08x\n",
1315 1.24.2.2 snj i, sc->sc_rxq.r_physaddr +
1316 1.24.2.2 snj i*sizeof(struct dwc_gmac_dev_dmadesc),
1317 1.24.2.2 snj le32toh(desc->ddesc_status), le32toh(desc->ddesc_cntl),
1318 1.24.2.2 snj le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1319 1.24.2.2 snj }
1320 1.24.2.2 snj }
1321 1.24.2.2 snj
1322 1.24.2.2 snj static void
1323 1.24.2.2 snj dwc_dump_status(struct dwc_gmac_softc *sc)
1324 1.24.2.2 snj {
1325 1.24.2.2 snj uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1326 1.24.2.2 snj AWIN_GMAC_MAC_INTR);
1327 1.24.2.2 snj uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1328 1.24.2.2 snj AWIN_GMAC_DMA_STATUS);
1329 1.24.2.2 snj char buf[200];
1330 1.24.2.2 snj
1331 1.24.2.2 snj /* print interrupt state */
1332 1.24.2.2 snj snprintb(buf, sizeof(buf), "\177\20"
1333 1.24.2.2 snj "b\x10""NI\0"
1334 1.24.2.2 snj "b\x0f""AI\0"
1335 1.24.2.2 snj "b\x0e""ER\0"
1336 1.24.2.2 snj "b\x0d""FB\0"
1337 1.24.2.2 snj "b\x0a""ET\0"
1338 1.24.2.2 snj "b\x09""RW\0"
1339 1.24.2.2 snj "b\x08""RS\0"
1340 1.24.2.2 snj "b\x07""RU\0"
1341 1.24.2.2 snj "b\x06""RI\0"
1342 1.24.2.2 snj "b\x05""UN\0"
1343 1.24.2.2 snj "b\x04""OV\0"
1344 1.24.2.2 snj "b\x03""TJ\0"
1345 1.24.2.2 snj "b\x02""TU\0"
1346 1.24.2.2 snj "b\x01""TS\0"
1347 1.24.2.2 snj "b\x00""TI\0"
1348 1.24.2.2 snj "\0", dma_status);
1349 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1350 1.24.2.2 snj status, buf);
1351 1.24.2.2 snj }
1352 1.24.2.2 snj
1353 1.24.2.2 snj static void
1354 1.24.2.2 snj dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1355 1.24.2.2 snj {
1356 1.24.2.2 snj dwc_dump_status(sc);
1357 1.24.2.2 snj dwc_gmac_dump_ffilt(sc,
1358 1.24.2.2 snj bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1359 1.24.2.2 snj dwc_gmac_dump_dma(sc);
1360 1.24.2.2 snj dwc_gmac_dump_tx_desc(sc);
1361 1.24.2.2 snj dwc_gmac_dump_rx_desc(sc);
1362 1.24.2.2 snj
1363 1.24.2.2 snj panic("%s", msg);
1364 1.24.2.2 snj }
1365 1.24.2.2 snj
1366 1.24.2.2 snj static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1367 1.24.2.2 snj {
1368 1.24.2.2 snj char buf[200];
1369 1.24.2.2 snj
1370 1.24.2.2 snj /* print filter setup */
1371 1.24.2.2 snj snprintb(buf, sizeof(buf), "\177\20"
1372 1.24.2.2 snj "b\x1f""RA\0"
1373 1.24.2.2 snj "b\x0a""HPF\0"
1374 1.24.2.2 snj "b\x09""SAF\0"
1375 1.24.2.2 snj "b\x08""SAIF\0"
1376 1.24.2.2 snj "b\x05""DBF\0"
1377 1.24.2.2 snj "b\x04""PM\0"
1378 1.24.2.2 snj "b\x03""DAIF\0"
1379 1.24.2.2 snj "b\x02""HMC\0"
1380 1.24.2.2 snj "b\x01""HUC\0"
1381 1.24.2.2 snj "b\x00""PR\0"
1382 1.24.2.2 snj "\0", ffilt);
1383 1.24.2.2 snj aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1384 1.24.2.2 snj }
1385 1.24.2.2 snj #endif
1386