dwc_gmac.c revision 1.53 1 1.53 aymeric /* $NetBSD: dwc_gmac.c,v 1.53 2018/09/17 20:25:49 aymeric Exp $ */
2 1.18 jmcneill
3 1.1 martin /*-
4 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 1.1 martin * All rights reserved.
6 1.1 martin *
7 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
8 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 1.1 martin *
10 1.1 martin * Redistribution and use in source and binary forms, with or without
11 1.1 martin * modification, are permitted provided that the following conditions
12 1.1 martin * are met:
13 1.1 martin * 1. Redistributions of source code must retain the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer.
15 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 martin * notice, this list of conditions and the following disclaimer in the
17 1.1 martin * documentation and/or other materials provided with the distribution.
18 1.1 martin *
19 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
30 1.1 martin */
31 1.1 martin
32 1.1 martin /*
33 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
34 1.1 martin * on Allwinner A20 cores and others.
35 1.1 martin *
36 1.1 martin * Real documentation seems to not be available, the marketing product
37 1.1 martin * documents could be found here:
38 1.1 martin *
39 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 1.1 martin */
41 1.1 martin
42 1.1 martin #include <sys/cdefs.h>
43 1.1 martin
44 1.53 aymeric __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.53 2018/09/17 20:25:49 aymeric Exp $");
45 1.7 martin
46 1.7 martin /* #define DWC_GMAC_DEBUG 1 */
47 1.1 martin
48 1.38 skrll #ifdef _KERNEL_OPT
49 1.1 martin #include "opt_inet.h"
50 1.38 skrll #include "opt_net_mpsafe.h"
51 1.38 skrll #endif
52 1.1 martin
53 1.1 martin #include <sys/param.h>
54 1.1 martin #include <sys/bus.h>
55 1.1 martin #include <sys/device.h>
56 1.1 martin #include <sys/intr.h>
57 1.1 martin #include <sys/systm.h>
58 1.1 martin #include <sys/sockio.h>
59 1.29 jmcneill #include <sys/cprng.h>
60 1.1 martin
61 1.1 martin #include <net/if.h>
62 1.1 martin #include <net/if_ether.h>
63 1.1 martin #include <net/if_media.h>
64 1.1 martin #include <net/bpf.h>
65 1.1 martin #ifdef INET
66 1.1 martin #include <netinet/if_inarp.h>
67 1.1 martin #endif
68 1.1 martin
69 1.1 martin #include <dev/mii/miivar.h>
70 1.1 martin
71 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
72 1.1 martin #include <dev/ic/dwc_gmac_var.h>
73 1.1 martin
74 1.1 martin static int dwc_gmac_miibus_read_reg(device_t, int, int);
75 1.1 martin static void dwc_gmac_miibus_write_reg(device_t, int, int, int);
76 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
77 1.1 martin
78 1.1 martin static int dwc_gmac_reset(struct dwc_gmac_softc *sc);
79 1.1 martin static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
80 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN]);
81 1.1 martin static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc);
82 1.1 martin static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc);
83 1.1 martin static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
84 1.1 martin static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
85 1.1 martin static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *);
86 1.1 martin static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
87 1.1 martin static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
88 1.1 martin static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_tx_ring *);
89 1.1 martin static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops);
90 1.1 martin static int dwc_gmac_init(struct ifnet *ifp);
91 1.38 skrll static int dwc_gmac_init_locked(struct ifnet *ifp);
92 1.1 martin static void dwc_gmac_stop(struct ifnet *ifp, int disable);
93 1.38 skrll static void dwc_gmac_stop_locked(struct ifnet *ifp, int disable);
94 1.1 martin static void dwc_gmac_start(struct ifnet *ifp);
95 1.38 skrll static void dwc_gmac_start_locked(struct ifnet *ifp);
96 1.1 martin static int dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0);
97 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 1.8 martin static void dwc_gmac_tx_intr(struct dwc_gmac_softc *sc);
99 1.8 martin static void dwc_gmac_rx_intr(struct dwc_gmac_softc *sc);
100 1.20 jmcneill static void dwc_gmac_setmulti(struct dwc_gmac_softc *sc);
101 1.22 martin static int dwc_gmac_ifflags_cb(struct ethercom *);
102 1.22 martin static uint32_t bitrev32(uint32_t x);
103 1.53 aymeric static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 1.53 aymeric static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 1.53 aymeric static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 1.53 aymeric static int dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 1.53 aymeric static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 1.53 aymeric static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 1.53 aymeric static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 1.53 aymeric static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 1.53 aymeric static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 1.53 aymeric static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 1.53 aymeric static int dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 1.53 aymeric static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 1.53 aymeric static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 1.53 aymeric static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 1.53 aymeric static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 1.53 aymeric static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119 1.53 aymeric
120 1.53 aymeric static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 1.53 aymeric .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 1.53 aymeric .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 1.53 aymeric .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 1.53 aymeric .tx_set_len = dwc_gmac_desc_std_set_len,
125 1.53 aymeric .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 1.53 aymeric .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 1.53 aymeric .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 1.53 aymeric .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 1.53 aymeric .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 1.53 aymeric .rx_set_len = dwc_gmac_desc_std_set_len,
131 1.53 aymeric .rx_get_len = dwc_gmac_desc_std_get_len,
132 1.53 aymeric .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 1.53 aymeric };
134 1.53 aymeric
135 1.53 aymeric static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 1.53 aymeric .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 1.53 aymeric .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 1.53 aymeric .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 1.53 aymeric .tx_set_len = dwc_gmac_desc_enh_set_len,
140 1.53 aymeric .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 1.53 aymeric .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 1.53 aymeric .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 1.53 aymeric .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 1.53 aymeric .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 1.53 aymeric .rx_set_len = dwc_gmac_desc_enh_set_len,
146 1.53 aymeric .rx_get_len = dwc_gmac_desc_enh_get_len,
147 1.53 aymeric .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 1.53 aymeric };
149 1.53 aymeric
150 1.1 martin
151 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
153 1.8 martin #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154 1.1 martin
155 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 1.8 martin #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157 1.8 martin
158 1.8 martin
159 1.8 martin
160 1.11 martin #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE|GMAC_DMA_INT_RIE| \
161 1.8 martin GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE| \
162 1.8 martin GMAC_DMA_INT_FBE|GMAC_DMA_INT_UNE)
163 1.8 martin
164 1.8 martin #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE|GMAC_DMA_INT_ERE| \
165 1.10 martin GMAC_DMA_INT_FBE| \
166 1.8 martin GMAC_DMA_INT_RWE|GMAC_DMA_INT_RUE| \
167 1.8 martin GMAC_DMA_INT_UNE|GMAC_DMA_INT_OVE| \
168 1.10 martin GMAC_DMA_INT_TJE)
169 1.8 martin
170 1.8 martin #define AWIN_DEF_MAC_INTRMASK \
171 1.8 martin (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 1.53 aymeric AWIN_GMAC_MAC_INT_LINKCHG)
173 1.1 martin
174 1.7 martin #ifdef DWC_GMAC_DEBUG
175 1.7 martin static void dwc_gmac_dump_dma(struct dwc_gmac_softc *sc);
176 1.7 martin static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc);
177 1.11 martin static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc);
178 1.8 martin static void dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg);
179 1.10 martin static void dwc_dump_status(struct dwc_gmac_softc *sc);
180 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt);
181 1.7 martin #endif
182 1.7 martin
183 1.38 skrll #ifdef NET_MPSAFE
184 1.38 skrll #define DWCGMAC_MPSAFE 1
185 1.38 skrll #endif
186 1.38 skrll
187 1.51 jmcneill int
188 1.5 martin dwc_gmac_attach(struct dwc_gmac_softc *sc, uint32_t mii_clk)
189 1.1 martin {
190 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
191 1.1 martin uint32_t maclo, machi;
192 1.1 martin struct mii_data * const mii = &sc->sc_mii;
193 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
194 1.5 martin prop_dictionary_t dict;
195 1.41 msaitoh int rv;
196 1.1 martin
197 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
198 1.3 martin sc->sc_mii_clk = mii_clk & 7;
199 1.1 martin
200 1.5 martin dict = device_properties(sc->sc_dev);
201 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
202 1.5 martin if (ea != NULL) {
203 1.5 martin /*
204 1.5 martin * If the MAC address is overriden by a device property,
205 1.5 martin * use that.
206 1.5 martin */
207 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
208 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
209 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
210 1.5 martin } else {
211 1.5 martin /*
212 1.5 martin * If we did not get an externaly configure address,
213 1.5 martin * try to read one from the current filter setup,
214 1.5 martin * before resetting the chip.
215 1.5 martin */
216 1.8 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
217 1.8 martin AWIN_GMAC_MAC_ADDR0LO);
218 1.8 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
219 1.8 martin AWIN_GMAC_MAC_ADDR0HI);
220 1.14 jmcneill
221 1.14 jmcneill if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
222 1.29 jmcneill /* fake MAC address */
223 1.29 jmcneill maclo = 0x00f2 | (cprng_strong32() << 16);
224 1.29 jmcneill machi = cprng_strong32();
225 1.14 jmcneill }
226 1.14 jmcneill
227 1.1 martin enaddr[0] = maclo & 0x0ff;
228 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
229 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
230 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
231 1.1 martin enaddr[4] = machi & 0x0ff;
232 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
233 1.1 martin }
234 1.1 martin
235 1.1 martin /*
236 1.21 joerg * Init chip and do initial setup
237 1.1 martin */
238 1.1 martin if (dwc_gmac_reset(sc) != 0)
239 1.51 jmcneill return ENXIO; /* not much to cleanup, haven't attached yet */
240 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
241 1.52 sevan aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
242 1.1 martin ether_sprintf(enaddr));
243 1.1 martin
244 1.53 aymeric if (bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_HWFEATURES) &
245 1.53 aymeric GMAC_DMA_FEAT_ENHANCED_DESC)
246 1.53 aymeric sc->sc_descm = &desc_methods_enhanced;
247 1.53 aymeric else
248 1.53 aymeric sc->sc_descm = &desc_methods_standard;
249 1.53 aymeric
250 1.1 martin /*
251 1.1 martin * Allocate Tx and Rx rings
252 1.1 martin */
253 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
254 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
255 1.1 martin goto fail;
256 1.1 martin }
257 1.38 skrll
258 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
259 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
260 1.1 martin goto fail;
261 1.1 martin }
262 1.1 martin
263 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
264 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
265 1.1 martin goto fail;
266 1.1 martin }
267 1.1 martin
268 1.38 skrll sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
269 1.38 skrll mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
270 1.38 skrll mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
271 1.38 skrll
272 1.1 martin /*
273 1.1 martin * Prepare interface data
274 1.1 martin */
275 1.1 martin ifp->if_softc = sc;
276 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
277 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
278 1.44 ozaki #ifdef DWCGMAC_MPSAFE
279 1.43 ozaki ifp->if_extflags = IFEF_MPSAFE;
280 1.44 ozaki #endif
281 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
282 1.1 martin ifp->if_start = dwc_gmac_start;
283 1.1 martin ifp->if_init = dwc_gmac_init;
284 1.1 martin ifp->if_stop = dwc_gmac_stop;
285 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
286 1.1 martin IFQ_SET_READY(&ifp->if_snd);
287 1.1 martin
288 1.1 martin /*
289 1.1 martin * Attach MII subdevices
290 1.1 martin */
291 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
292 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
293 1.1 martin mii->mii_ifp = ifp;
294 1.1 martin mii->mii_readreg = dwc_gmac_miibus_read_reg;
295 1.1 martin mii->mii_writereg = dwc_gmac_miibus_write_reg;
296 1.1 martin mii->mii_statchg = dwc_gmac_miibus_statchg;
297 1.25 jmcneill mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
298 1.25 jmcneill MIIF_DOPAUSE);
299 1.1 martin
300 1.38 skrll if (LIST_EMPTY(&mii->mii_phys)) {
301 1.1 martin aprint_error_dev(sc->sc_dev, "no PHY found!\n");
302 1.1 martin ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
303 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_MANUAL);
304 1.1 martin } else {
305 1.1 martin ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO);
306 1.1 martin }
307 1.1 martin
308 1.1 martin /*
309 1.33 tnn * We can support 802.1Q VLAN-sized frames.
310 1.33 tnn */
311 1.33 tnn sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
312 1.33 tnn
313 1.33 tnn /*
314 1.1 martin * Ready, attach interface
315 1.1 martin */
316 1.38 skrll /* Attach the interface. */
317 1.41 msaitoh rv = if_initialize(ifp);
318 1.41 msaitoh if (rv != 0)
319 1.41 msaitoh goto fail_2;
320 1.38 skrll sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
321 1.40 ozaki if_deferred_start_init(ifp, NULL);
322 1.1 martin ether_ifattach(ifp, enaddr);
323 1.22 martin ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
324 1.38 skrll if_register(ifp);
325 1.1 martin
326 1.1 martin /*
327 1.1 martin * Enable interrupts
328 1.1 martin */
329 1.38 skrll mutex_enter(sc->sc_lock);
330 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
331 1.8 martin AWIN_DEF_MAC_INTRMASK);
332 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
333 1.8 martin GMAC_DEF_DMA_INT_MASK);
334 1.38 skrll mutex_exit(sc->sc_lock);
335 1.1 martin
336 1.51 jmcneill return 0;
337 1.51 jmcneill
338 1.41 msaitoh fail_2:
339 1.41 msaitoh ifmedia_removeall(&mii->mii_media);
340 1.42 jakllsch mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
341 1.41 msaitoh mutex_destroy(&sc->sc_txq.t_mtx);
342 1.41 msaitoh mutex_destroy(&sc->sc_rxq.r_mtx);
343 1.41 msaitoh mutex_obj_free(sc->sc_lock);
344 1.1 martin fail:
345 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
346 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
347 1.41 msaitoh dwc_gmac_free_dma_rings(sc);
348 1.41 msaitoh mutex_destroy(&sc->sc_mdio_lock);
349 1.51 jmcneill
350 1.51 jmcneill return ENXIO;
351 1.1 martin }
352 1.1 martin
353 1.1 martin
354 1.1 martin
355 1.1 martin static int
356 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
357 1.1 martin {
358 1.1 martin size_t cnt;
359 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
360 1.1 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE) | GMAC_BUSMODE_RESET);
361 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
362 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
363 1.1 martin & GMAC_BUSMODE_RESET) == 0)
364 1.1 martin return 0;
365 1.1 martin delay(10);
366 1.1 martin }
367 1.1 martin
368 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
369 1.1 martin return EIO;
370 1.1 martin }
371 1.1 martin
372 1.1 martin static void
373 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
374 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
375 1.1 martin {
376 1.49 jmcneill uint32_t hi, lo;
377 1.1 martin
378 1.49 jmcneill hi = enaddr[4] | (enaddr[5] << 8);
379 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
380 1.1 martin | (enaddr[3] << 24);
381 1.49 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
382 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
383 1.1 martin }
384 1.1 martin
385 1.1 martin static int
386 1.1 martin dwc_gmac_miibus_read_reg(device_t self, int phy, int reg)
387 1.1 martin {
388 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
389 1.6 martin uint16_t mii;
390 1.1 martin size_t cnt;
391 1.1 martin int rv = 0;
392 1.1 martin
393 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
394 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
395 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
396 1.6 martin | GMAC_MII_BUSY;
397 1.1 martin
398 1.1 martin mutex_enter(&sc->sc_mdio_lock);
399 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
400 1.1 martin
401 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
402 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
403 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
404 1.3 martin rv = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
405 1.3 martin AWIN_GMAC_MAC_MIIDATA);
406 1.1 martin break;
407 1.1 martin }
408 1.1 martin delay(10);
409 1.1 martin }
410 1.1 martin
411 1.1 martin mutex_exit(&sc->sc_mdio_lock);
412 1.1 martin
413 1.1 martin return rv;
414 1.1 martin }
415 1.1 martin
416 1.1 martin static void
417 1.1 martin dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, int val)
418 1.1 martin {
419 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
420 1.6 martin uint16_t mii;
421 1.1 martin size_t cnt;
422 1.1 martin
423 1.6 martin mii = __SHIFTIN(phy,GMAC_MII_PHY_MASK)
424 1.6 martin | __SHIFTIN(reg,GMAC_MII_REG_MASK)
425 1.6 martin | __SHIFTIN(sc->sc_mii_clk,GMAC_MII_CLKMASK)
426 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
427 1.1 martin
428 1.1 martin mutex_enter(&sc->sc_mdio_lock);
429 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
430 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
431 1.1 martin
432 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
433 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
434 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
435 1.1 martin break;
436 1.1 martin delay(10);
437 1.1 martin }
438 1.38 skrll
439 1.1 martin mutex_exit(&sc->sc_mdio_lock);
440 1.1 martin }
441 1.1 martin
442 1.1 martin static int
443 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
444 1.1 martin struct dwc_gmac_rx_ring *ring)
445 1.1 martin {
446 1.1 martin struct dwc_gmac_rx_data *data;
447 1.1 martin bus_addr_t physaddr;
448 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
449 1.1 martin int error, i, next;
450 1.1 martin
451 1.1 martin ring->r_cur = ring->r_next = 0;
452 1.1 martin memset(ring->r_desc, 0, descsize);
453 1.1 martin
454 1.1 martin /*
455 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
456 1.1 martin */
457 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
458 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
459 1.1 martin
460 1.1 martin data = &sc->sc_rxq.r_data[i];
461 1.1 martin
462 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
463 1.1 martin if (data->rd_m == NULL) {
464 1.1 martin aprint_error_dev(sc->sc_dev,
465 1.1 martin "could not allocate rx mbuf #%d\n", i);
466 1.1 martin error = ENOMEM;
467 1.1 martin goto fail;
468 1.1 martin }
469 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
470 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
471 1.1 martin if (error != 0) {
472 1.1 martin aprint_error_dev(sc->sc_dev,
473 1.1 martin "could not create DMA map\n");
474 1.1 martin data->rd_map = NULL;
475 1.1 martin goto fail;
476 1.1 martin }
477 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
478 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
479 1.1 martin aprint_error_dev(sc->sc_dev,
480 1.1 martin "could not allocate mbuf cluster #%d\n", i);
481 1.1 martin error = ENOMEM;
482 1.1 martin goto fail;
483 1.1 martin }
484 1.1 martin
485 1.1 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
486 1.1 martin mtod(data->rd_m, void *), MCLBYTES, NULL,
487 1.1 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
488 1.1 martin if (error != 0) {
489 1.1 martin aprint_error_dev(sc->sc_dev,
490 1.1 martin "could not load rx buf DMA map #%d", i);
491 1.1 martin goto fail;
492 1.1 martin }
493 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
494 1.1 martin
495 1.1 martin desc = &sc->sc_rxq.r_desc[i];
496 1.1 martin desc->ddesc_data = htole32(physaddr);
497 1.8 martin next = RX_NEXT(i);
498 1.38 skrll desc->ddesc_next = htole32(ring->r_physaddr
499 1.1 martin + next * sizeof(*desc));
500 1.53 aymeric sc->sc_descm->rx_init_flags(desc);
501 1.53 aymeric sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
502 1.53 aymeric sc->sc_descm->rx_set_owned_by_dev(desc);
503 1.1 martin }
504 1.1 martin
505 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
506 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
507 1.27 matt BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
508 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
509 1.6 martin ring->r_physaddr);
510 1.1 martin
511 1.1 martin return 0;
512 1.1 martin
513 1.1 martin fail:
514 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
515 1.1 martin return error;
516 1.1 martin }
517 1.1 martin
518 1.1 martin static void
519 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
520 1.1 martin struct dwc_gmac_rx_ring *ring)
521 1.1 martin {
522 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
523 1.1 martin int i;
524 1.1 martin
525 1.38 skrll mutex_enter(&ring->r_mtx);
526 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
527 1.1 martin desc = &sc->sc_rxq.r_desc[i];
528 1.53 aymeric sc->sc_descm->rx_init_flags(desc);
529 1.53 aymeric sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
530 1.53 aymeric sc->sc_descm->rx_set_owned_by_dev(desc);
531 1.1 martin }
532 1.1 martin
533 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
534 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
535 1.27 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
536 1.1 martin
537 1.1 martin ring->r_cur = ring->r_next = 0;
538 1.11 martin /* reset DMA address to start of ring */
539 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
540 1.11 martin sc->sc_rxq.r_physaddr);
541 1.38 skrll mutex_exit(&ring->r_mtx);
542 1.1 martin }
543 1.1 martin
544 1.1 martin static int
545 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
546 1.1 martin {
547 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
548 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
549 1.1 martin int error, nsegs;
550 1.1 martin void *rings;
551 1.1 martin
552 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
553 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
554 1.1 martin if (error != 0) {
555 1.1 martin aprint_error_dev(sc->sc_dev,
556 1.1 martin "could not create desc DMA map\n");
557 1.1 martin sc->sc_dma_ring_map = NULL;
558 1.1 martin goto fail;
559 1.1 martin }
560 1.1 martin
561 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
562 1.1 martin &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
563 1.1 martin if (error != 0) {
564 1.1 martin aprint_error_dev(sc->sc_dev,
565 1.1 martin "could not map DMA memory\n");
566 1.1 martin goto fail;
567 1.1 martin }
568 1.1 martin
569 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
570 1.1 martin descsize, &rings, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
571 1.1 martin if (error != 0) {
572 1.1 martin aprint_error_dev(sc->sc_dev,
573 1.1 martin "could not allocate DMA memory\n");
574 1.1 martin goto fail;
575 1.1 martin }
576 1.1 martin
577 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
578 1.1 martin descsize, NULL, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
579 1.1 martin if (error != 0) {
580 1.1 martin aprint_error_dev(sc->sc_dev,
581 1.1 martin "could not load desc DMA map\n");
582 1.1 martin goto fail;
583 1.1 martin }
584 1.1 martin
585 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
586 1.1 martin sc->sc_rxq.r_desc = rings;
587 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
588 1.1 martin
589 1.1 martin /* and next rings to the TX side */
590 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
591 1.38 skrll sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
592 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
593 1.1 martin
594 1.1 martin return 0;
595 1.1 martin
596 1.1 martin fail:
597 1.1 martin dwc_gmac_free_dma_rings(sc);
598 1.1 martin return error;
599 1.1 martin }
600 1.1 martin
601 1.1 martin static void
602 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
603 1.1 martin {
604 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
605 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
606 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
607 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
608 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
609 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
610 1.1 martin }
611 1.1 martin
612 1.1 martin static void
613 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
614 1.1 martin {
615 1.1 martin struct dwc_gmac_rx_data *data;
616 1.1 martin int i;
617 1.1 martin
618 1.1 martin if (ring->r_desc == NULL)
619 1.1 martin return;
620 1.1 martin
621 1.1 martin
622 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
623 1.1 martin data = &ring->r_data[i];
624 1.1 martin
625 1.1 martin if (data->rd_map != NULL) {
626 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
627 1.1 martin AWGE_RX_RING_COUNT
628 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
629 1.1 martin BUS_DMASYNC_POSTREAD);
630 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
631 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
632 1.1 martin }
633 1.1 martin if (data->rd_m != NULL)
634 1.1 martin m_freem(data->rd_m);
635 1.1 martin }
636 1.1 martin }
637 1.1 martin
638 1.1 martin static int
639 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
640 1.1 martin struct dwc_gmac_tx_ring *ring)
641 1.1 martin {
642 1.1 martin int i, error = 0;
643 1.1 martin
644 1.1 martin ring->t_queued = 0;
645 1.1 martin ring->t_cur = ring->t_next = 0;
646 1.1 martin
647 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
648 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
649 1.1 martin TX_DESC_OFFSET(0),
650 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
651 1.1 martin BUS_DMASYNC_POSTWRITE);
652 1.1 martin
653 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
654 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
655 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
656 1.1 martin BUS_DMA_NOWAIT|BUS_DMA_COHERENT,
657 1.1 martin &ring->t_data[i].td_map);
658 1.1 martin if (error != 0) {
659 1.1 martin aprint_error_dev(sc->sc_dev,
660 1.1 martin "could not create TX DMA map #%d\n", i);
661 1.1 martin ring->t_data[i].td_map = NULL;
662 1.1 martin goto fail;
663 1.1 martin }
664 1.1 martin ring->t_desc[i].ddesc_next = htole32(
665 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
666 1.8 martin *TX_NEXT(i));
667 1.1 martin }
668 1.1 martin
669 1.1 martin return 0;
670 1.1 martin
671 1.1 martin fail:
672 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
673 1.1 martin return error;
674 1.1 martin }
675 1.1 martin
676 1.1 martin static void
677 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
678 1.1 martin {
679 1.1 martin /* 'end' is pointing one descriptor beyound the last we want to sync */
680 1.1 martin if (end > start) {
681 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
682 1.1 martin TX_DESC_OFFSET(start),
683 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
684 1.1 martin ops);
685 1.1 martin return;
686 1.1 martin }
687 1.1 martin /* sync from 'start' to end of ring */
688 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
689 1.1 martin TX_DESC_OFFSET(start),
690 1.31 jmcneill TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
691 1.1 martin ops);
692 1.47 jmcneill if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
693 1.47 jmcneill /* sync from start of ring to 'end' */
694 1.47 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
695 1.47 jmcneill TX_DESC_OFFSET(0),
696 1.47 jmcneill TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
697 1.47 jmcneill ops);
698 1.47 jmcneill }
699 1.1 martin }
700 1.1 martin
701 1.1 martin static void
702 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
703 1.1 martin struct dwc_gmac_tx_ring *ring)
704 1.1 martin {
705 1.1 martin int i;
706 1.1 martin
707 1.38 skrll mutex_enter(&ring->t_mtx);
708 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
709 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
710 1.1 martin
711 1.1 martin if (data->td_m != NULL) {
712 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
713 1.1 martin 0, data->td_active->dm_mapsize,
714 1.1 martin BUS_DMASYNC_POSTWRITE);
715 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
716 1.1 martin m_freem(data->td_m);
717 1.1 martin data->td_m = NULL;
718 1.1 martin }
719 1.1 martin }
720 1.1 martin
721 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
722 1.1 martin TX_DESC_OFFSET(0),
723 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
724 1.27 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
725 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
726 1.6 martin sc->sc_txq.t_physaddr);
727 1.1 martin
728 1.1 martin ring->t_queued = 0;
729 1.1 martin ring->t_cur = ring->t_next = 0;
730 1.38 skrll mutex_exit(&ring->t_mtx);
731 1.1 martin }
732 1.1 martin
733 1.1 martin static void
734 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
735 1.1 martin struct dwc_gmac_tx_ring *ring)
736 1.1 martin {
737 1.1 martin int i;
738 1.1 martin
739 1.1 martin /* unload the maps */
740 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
741 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
742 1.1 martin
743 1.1 martin if (data->td_m != NULL) {
744 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
745 1.1 martin 0, data->td_map->dm_mapsize,
746 1.1 martin BUS_DMASYNC_POSTWRITE);
747 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
748 1.1 martin m_freem(data->td_m);
749 1.1 martin data->td_m = NULL;
750 1.1 martin }
751 1.1 martin }
752 1.1 martin
753 1.1 martin /* and actually free them */
754 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
755 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
756 1.1 martin
757 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
758 1.1 martin }
759 1.1 martin }
760 1.1 martin
761 1.1 martin static void
762 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
763 1.1 martin {
764 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
765 1.1 martin struct mii_data * const mii = &sc->sc_mii;
766 1.25 jmcneill uint32_t conf, flow;
767 1.1 martin
768 1.1 martin /*
769 1.1 martin * Set MII or GMII interface based on the speed
770 1.38 skrll * negotiated by the PHY.
771 1.9 martin */
772 1.9 martin conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
773 1.9 martin conf &= ~(AWIN_GMAC_MAC_CONF_FES100|AWIN_GMAC_MAC_CONF_MIISEL
774 1.9 martin |AWIN_GMAC_MAC_CONF_FULLDPLX);
775 1.11 martin conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
776 1.11 martin | AWIN_GMAC_MAC_CONF_DISABLERXOWN
777 1.25 jmcneill | AWIN_GMAC_MAC_CONF_DISABLEJABBER
778 1.25 jmcneill | AWIN_GMAC_MAC_CONF_ACS
779 1.11 martin | AWIN_GMAC_MAC_CONF_RXENABLE
780 1.11 martin | AWIN_GMAC_MAC_CONF_TXENABLE;
781 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
782 1.1 martin case IFM_10_T:
783 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_MIISEL;
784 1.9 martin break;
785 1.1 martin case IFM_100_TX:
786 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_FES100 |
787 1.12 jmcneill AWIN_GMAC_MAC_CONF_MIISEL;
788 1.1 martin break;
789 1.1 martin case IFM_1000_T:
790 1.1 martin break;
791 1.1 martin }
792 1.46 jmcneill if (sc->sc_set_speed)
793 1.46 jmcneill sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
794 1.25 jmcneill
795 1.25 jmcneill flow = 0;
796 1.25 jmcneill if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
797 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
798 1.25 jmcneill flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
799 1.25 jmcneill }
800 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
801 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
802 1.25 jmcneill }
803 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
804 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
805 1.25 jmcneill }
806 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh,
807 1.25 jmcneill AWIN_GMAC_MAC_FLOWCTRL, flow);
808 1.9 martin
809 1.9 martin #ifdef DWC_GMAC_DEBUG
810 1.9 martin aprint_normal_dev(sc->sc_dev,
811 1.9 martin "setting MAC conf register: %08x\n", conf);
812 1.9 martin #endif
813 1.9 martin
814 1.9 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
815 1.9 martin AWIN_GMAC_MAC_CONF, conf);
816 1.1 martin }
817 1.1 martin
818 1.1 martin static int
819 1.1 martin dwc_gmac_init(struct ifnet *ifp)
820 1.1 martin {
821 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
822 1.38 skrll
823 1.38 skrll mutex_enter(sc->sc_lock);
824 1.38 skrll int ret = dwc_gmac_init_locked(ifp);
825 1.38 skrll mutex_exit(sc->sc_lock);
826 1.38 skrll
827 1.38 skrll return ret;
828 1.38 skrll }
829 1.38 skrll
830 1.38 skrll static int
831 1.38 skrll dwc_gmac_init_locked(struct ifnet *ifp)
832 1.38 skrll {
833 1.38 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
834 1.13 jmcneill uint32_t ffilt;
835 1.1 martin
836 1.1 martin if (ifp->if_flags & IFF_RUNNING)
837 1.1 martin return 0;
838 1.1 martin
839 1.38 skrll dwc_gmac_stop_locked(ifp, 0);
840 1.1 martin
841 1.1 martin /*
842 1.11 martin * Configure DMA burst/transfer mode and RX/TX priorities.
843 1.11 martin * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
844 1.11 martin */
845 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
846 1.25 jmcneill GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
847 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
848 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_PBL));
849 1.11 martin
850 1.11 martin /*
851 1.13 jmcneill * Set up address filter
852 1.11 martin */
853 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
854 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
855 1.13 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_PR;
856 1.20 jmcneill } else {
857 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
858 1.20 jmcneill }
859 1.20 jmcneill if (ifp->if_flags & IFF_BROADCAST) {
860 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
861 1.20 jmcneill } else {
862 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
863 1.20 jmcneill }
864 1.13 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
865 1.11 martin
866 1.11 martin /*
867 1.20 jmcneill * Set up multicast filter
868 1.20 jmcneill */
869 1.20 jmcneill dwc_gmac_setmulti(sc);
870 1.20 jmcneill
871 1.20 jmcneill /*
872 1.6 martin * Set up dma pointer for RX and TX ring
873 1.1 martin */
874 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
875 1.6 martin sc->sc_rxq.r_physaddr);
876 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
877 1.6 martin sc->sc_txq.t_physaddr);
878 1.6 martin
879 1.6 martin /*
880 1.10 martin * Start RX/TX part
881 1.6 martin */
882 1.46 jmcneill uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
883 1.46 jmcneill if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
884 1.46 jmcneill opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
885 1.46 jmcneill }
886 1.46 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
887 1.1 martin
888 1.38 skrll sc->sc_stopping = false;
889 1.38 skrll
890 1.1 martin ifp->if_flags |= IFF_RUNNING;
891 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
892 1.1 martin
893 1.1 martin return 0;
894 1.1 martin }
895 1.1 martin
896 1.1 martin static void
897 1.1 martin dwc_gmac_start(struct ifnet *ifp)
898 1.1 martin {
899 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
900 1.45 martin #ifdef DWCGMAC_MPSAFE
901 1.43 ozaki KASSERT(if_is_mpsafe(ifp));
902 1.45 martin #endif
903 1.38 skrll
904 1.38 skrll mutex_enter(sc->sc_lock);
905 1.38 skrll if (!sc->sc_stopping) {
906 1.38 skrll mutex_enter(&sc->sc_txq.t_mtx);
907 1.38 skrll dwc_gmac_start_locked(ifp);
908 1.38 skrll mutex_exit(&sc->sc_txq.t_mtx);
909 1.38 skrll }
910 1.38 skrll mutex_exit(sc->sc_lock);
911 1.38 skrll }
912 1.38 skrll
913 1.38 skrll static void
914 1.38 skrll dwc_gmac_start_locked(struct ifnet *ifp)
915 1.38 skrll {
916 1.38 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
917 1.1 martin int old = sc->sc_txq.t_queued;
918 1.30 martin int start = sc->sc_txq.t_cur;
919 1.1 martin struct mbuf *m0;
920 1.1 martin
921 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
922 1.1 martin return;
923 1.1 martin
924 1.1 martin for (;;) {
925 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
926 1.1 martin if (m0 == NULL)
927 1.1 martin break;
928 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
929 1.1 martin ifp->if_flags |= IFF_OACTIVE;
930 1.1 martin break;
931 1.1 martin }
932 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
933 1.50 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT);
934 1.32 martin if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
935 1.32 martin ifp->if_flags |= IFF_OACTIVE;
936 1.32 martin break;
937 1.32 martin }
938 1.1 martin }
939 1.1 martin
940 1.1 martin if (sc->sc_txq.t_queued != old) {
941 1.1 martin /* packets have been queued, kick it off */
942 1.30 martin dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
943 1.1 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
944 1.10 martin
945 1.10 martin #ifdef DWC_GMAC_DEBUG
946 1.10 martin dwc_dump_status(sc);
947 1.10 martin #endif
948 1.53 aymeric bus_space_write_4(sc->sc_bst, sc->sc_bsh,
949 1.53 aymeric AWIN_GMAC_DMA_TXPOLL, ~0U);
950 1.1 martin }
951 1.1 martin }
952 1.1 martin
953 1.1 martin static void
954 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
955 1.1 martin {
956 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
957 1.1 martin
958 1.38 skrll mutex_enter(sc->sc_lock);
959 1.38 skrll dwc_gmac_stop_locked(ifp, disable);
960 1.38 skrll mutex_exit(sc->sc_lock);
961 1.38 skrll }
962 1.38 skrll
963 1.38 skrll static void
964 1.38 skrll dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
965 1.38 skrll {
966 1.38 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
967 1.38 skrll
968 1.38 skrll sc->sc_stopping = true;
969 1.38 skrll
970 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
971 1.6 martin AWIN_GMAC_DMA_OPMODE,
972 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
973 1.6 martin AWIN_GMAC_DMA_OPMODE)
974 1.6 martin & ~(GMAC_DMA_OP_TXSTART|GMAC_DMA_OP_RXSTART));
975 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
976 1.6 martin AWIN_GMAC_DMA_OPMODE,
977 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
978 1.6 martin AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
979 1.6 martin
980 1.1 martin mii_down(&sc->sc_mii);
981 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
982 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
983 1.48 jmcneill
984 1.48 jmcneill ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
985 1.1 martin }
986 1.1 martin
987 1.1 martin /*
988 1.1 martin * Add m0 to the TX ring
989 1.1 martin */
990 1.1 martin static int
991 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
992 1.1 martin {
993 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
994 1.1 martin struct dwc_gmac_tx_data *data = NULL;
995 1.1 martin bus_dmamap_t map;
996 1.1 martin int error, i, first;
997 1.1 martin
998 1.8 martin #ifdef DWC_GMAC_DEBUG
999 1.8 martin aprint_normal_dev(sc->sc_dev,
1000 1.8 martin "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1001 1.8 martin #endif
1002 1.8 martin
1003 1.1 martin first = sc->sc_txq.t_cur;
1004 1.1 martin map = sc->sc_txq.t_data[first].td_map;
1005 1.1 martin
1006 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1007 1.1 martin BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1008 1.1 martin if (error != 0) {
1009 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
1010 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1011 1.1 martin return error;
1012 1.1 martin }
1013 1.1 martin
1014 1.32 martin if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1015 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
1016 1.1 martin return ENOBUFS;
1017 1.1 martin }
1018 1.1 martin
1019 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
1020 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1021 1.8 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1022 1.8 martin
1023 1.8 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1024 1.7 martin
1025 1.7 martin #ifdef DWC_GMAC_DEBUG
1026 1.7 martin aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1027 1.53 aymeric "len %lu\n", sc->sc_txq.t_cur,
1028 1.7 martin (unsigned long)map->dm_segs[i].ds_addr,
1029 1.53 aymeric (unsigned long)map->dm_segs[i].ds_len);
1030 1.7 martin #endif
1031 1.7 martin
1032 1.53 aymeric sc->sc_descm->tx_init_flags(desc);
1033 1.53 aymeric sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1034 1.53 aymeric
1035 1.53 aymeric if (i == 0)
1036 1.53 aymeric sc->sc_descm->tx_set_first_frag(desc);
1037 1.1 martin
1038 1.1 martin /*
1039 1.1 martin * Defer passing ownership of the first descriptor
1040 1.23 joerg * until we are done.
1041 1.1 martin */
1042 1.53 aymeric if (i != 0)
1043 1.53 aymeric sc->sc_descm->tx_set_owned_by_dev(desc);
1044 1.8 martin
1045 1.6 martin sc->sc_txq.t_queued++;
1046 1.8 martin sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1047 1.1 martin }
1048 1.1 martin
1049 1.53 aymeric sc->sc_descm->tx_set_last_frag(desc);
1050 1.1 martin
1051 1.1 martin data->td_m = m0;
1052 1.1 martin data->td_active = map;
1053 1.1 martin
1054 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1055 1.34 jmcneill BUS_DMASYNC_PREWRITE);
1056 1.1 martin
1057 1.32 martin /* Pass first to device */
1058 1.53 aymeric sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1059 1.53 aymeric
1060 1.53 aymeric bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1061 1.53 aymeric BUS_DMASYNC_PREWRITE);
1062 1.32 martin
1063 1.1 martin return 0;
1064 1.1 martin }
1065 1.1 martin
1066 1.22 martin /*
1067 1.22 martin * If the interface is up and running, only modify the receive
1068 1.22 martin * filter when setting promiscuous or debug mode. Otherwise fall
1069 1.22 martin * through to ether_ioctl, which will reset the chip.
1070 1.22 martin */
1071 1.22 martin static int
1072 1.22 martin dwc_gmac_ifflags_cb(struct ethercom *ec)
1073 1.22 martin {
1074 1.22 martin struct ifnet *ifp = &ec->ec_if;
1075 1.22 martin struct dwc_gmac_softc *sc = ifp->if_softc;
1076 1.38 skrll int ret = 0;
1077 1.38 skrll
1078 1.38 skrll mutex_enter(sc->sc_lock);
1079 1.22 martin int change = ifp->if_flags ^ sc->sc_if_flags;
1080 1.38 skrll sc->sc_if_flags = ifp->if_flags;
1081 1.22 martin
1082 1.38 skrll if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
1083 1.38 skrll ret = ENETRESET;
1084 1.38 skrll goto out;
1085 1.38 skrll }
1086 1.38 skrll if ((change & IFF_PROMISC) != 0) {
1087 1.22 martin dwc_gmac_setmulti(sc);
1088 1.38 skrll }
1089 1.38 skrll out:
1090 1.38 skrll mutex_exit(sc->sc_lock);
1091 1.38 skrll
1092 1.38 skrll return ret;
1093 1.22 martin }
1094 1.22 martin
1095 1.1 martin static int
1096 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1097 1.1 martin {
1098 1.20 jmcneill struct dwc_gmac_softc *sc = ifp->if_softc;
1099 1.38 skrll int error = 0;
1100 1.38 skrll
1101 1.38 skrll int s = splnet();
1102 1.38 skrll error = ether_ioctl(ifp, cmd, data);
1103 1.1 martin
1104 1.38 skrll #ifdef DWCGMAC_MPSAFE
1105 1.38 skrll splx(s);
1106 1.38 skrll #endif
1107 1.1 martin
1108 1.38 skrll if (error == ENETRESET) {
1109 1.1 martin error = 0;
1110 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1111 1.1 martin ;
1112 1.22 martin else if (ifp->if_flags & IFF_RUNNING) {
1113 1.22 martin /*
1114 1.22 martin * Multicast list has changed; set the hardware filter
1115 1.22 martin * accordingly.
1116 1.22 martin */
1117 1.38 skrll mutex_enter(sc->sc_lock);
1118 1.20 jmcneill dwc_gmac_setmulti(sc);
1119 1.38 skrll mutex_exit(sc->sc_lock);
1120 1.22 martin }
1121 1.1 martin }
1122 1.1 martin
1123 1.22 martin /* Try to get things going again */
1124 1.22 martin if (ifp->if_flags & IFF_UP)
1125 1.22 martin dwc_gmac_start(ifp);
1126 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1127 1.38 skrll
1128 1.38 skrll #ifndef DWCGMAC_MPSAFE
1129 1.1 martin splx(s);
1130 1.38 skrll #endif
1131 1.38 skrll
1132 1.1 martin return error;
1133 1.1 martin }
1134 1.1 martin
1135 1.8 martin static void
1136 1.8 martin dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1137 1.8 martin {
1138 1.32 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
1139 1.8 martin struct dwc_gmac_tx_data *data;
1140 1.8 martin struct dwc_gmac_dev_dmadesc *desc;
1141 1.32 martin int i, nsegs;
1142 1.8 martin
1143 1.38 skrll mutex_enter(&sc->sc_txq.t_mtx);
1144 1.38 skrll
1145 1.32 martin for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1146 1.8 martin #ifdef DWC_GMAC_DEBUG
1147 1.8 martin aprint_normal_dev(sc->sc_dev,
1148 1.8 martin "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1149 1.8 martin i, sc->sc_txq.t_queued);
1150 1.8 martin #endif
1151 1.8 martin
1152 1.26 martin /*
1153 1.26 martin * i+1 does not need to be a valid descriptor,
1154 1.26 martin * this is just a special notion to just sync
1155 1.26 martin * a single tx descriptor (i)
1156 1.26 martin */
1157 1.26 martin dwc_gmac_txdesc_sync(sc, i, i+1,
1158 1.8 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1159 1.11 martin
1160 1.32 martin desc = &sc->sc_txq.t_desc[i];
1161 1.53 aymeric if (sc->sc_descm->tx_is_owned_by_dev(desc))
1162 1.8 martin break;
1163 1.11 martin
1164 1.8 martin data = &sc->sc_txq.t_data[i];
1165 1.8 martin if (data->td_m == NULL)
1166 1.8 martin continue;
1167 1.32 martin
1168 1.32 martin ifp->if_opackets++;
1169 1.32 martin nsegs = data->td_active->dm_nsegs;
1170 1.8 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1171 1.8 martin data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1172 1.8 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
1173 1.8 martin
1174 1.8 martin #ifdef DWC_GMAC_DEBUG
1175 1.8 martin aprint_normal_dev(sc->sc_dev,
1176 1.8 martin "dwc_gmac_tx_intr: done with packet at desc #%d, "
1177 1.8 martin "freeing mbuf %p\n", i, data->td_m);
1178 1.8 martin #endif
1179 1.8 martin
1180 1.8 martin m_freem(data->td_m);
1181 1.8 martin data->td_m = NULL;
1182 1.32 martin
1183 1.32 martin sc->sc_txq.t_queued -= nsegs;
1184 1.8 martin }
1185 1.8 martin
1186 1.8 martin sc->sc_txq.t_next = i;
1187 1.8 martin
1188 1.8 martin if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1189 1.32 martin ifp->if_flags &= ~IFF_OACTIVE;
1190 1.8 martin }
1191 1.38 skrll mutex_exit(&sc->sc_txq.t_mtx);
1192 1.8 martin }
1193 1.8 martin
1194 1.8 martin static void
1195 1.8 martin dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1196 1.8 martin {
1197 1.11 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
1198 1.11 martin struct dwc_gmac_dev_dmadesc *desc;
1199 1.11 martin struct dwc_gmac_rx_data *data;
1200 1.11 martin bus_addr_t physaddr;
1201 1.11 martin struct mbuf *m, *mnew;
1202 1.11 martin int i, len, error;
1203 1.11 martin
1204 1.38 skrll mutex_enter(&sc->sc_rxq.r_mtx);
1205 1.11 martin for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1206 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1207 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1208 1.11 martin BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1209 1.11 martin desc = &sc->sc_rxq.r_desc[i];
1210 1.11 martin data = &sc->sc_rxq.r_data[i];
1211 1.11 martin
1212 1.53 aymeric if (sc->sc_descm->rx_is_owned_by_dev(desc))
1213 1.11 martin break;
1214 1.11 martin
1215 1.53 aymeric if (sc->sc_descm->rx_has_error(desc)) {
1216 1.11 martin #ifdef DWC_GMAC_DEBUG
1217 1.15 martin aprint_normal_dev(sc->sc_dev,
1218 1.15 martin "RX error: descriptor status %08x, skipping\n",
1219 1.53 aymeric le32toh(desc->ddesc_status0));
1220 1.11 martin #endif
1221 1.11 martin ifp->if_ierrors++;
1222 1.11 martin goto skip;
1223 1.11 martin }
1224 1.11 martin
1225 1.53 aymeric len = sc->sc_descm->rx_get_len(desc);
1226 1.11 martin
1227 1.11 martin #ifdef DWC_GMAC_DEBUG
1228 1.15 martin aprint_normal_dev(sc->sc_dev,
1229 1.15 martin "rx int: device is done with descriptor #%d, len: %d\n",
1230 1.15 martin i, len);
1231 1.11 martin #endif
1232 1.11 martin
1233 1.11 martin /*
1234 1.11 martin * Try to get a new mbuf before passing this one
1235 1.11 martin * up, if that fails, drop the packet and reuse
1236 1.11 martin * the existing one.
1237 1.11 martin */
1238 1.11 martin MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1239 1.11 martin if (mnew == NULL) {
1240 1.11 martin ifp->if_ierrors++;
1241 1.11 martin goto skip;
1242 1.11 martin }
1243 1.11 martin MCLGET(mnew, M_DONTWAIT);
1244 1.11 martin if ((mnew->m_flags & M_EXT) == 0) {
1245 1.11 martin m_freem(mnew);
1246 1.11 martin ifp->if_ierrors++;
1247 1.11 martin goto skip;
1248 1.11 martin }
1249 1.11 martin
1250 1.11 martin /* unload old DMA map */
1251 1.11 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1252 1.11 martin data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1253 1.11 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1254 1.11 martin
1255 1.11 martin /* and reload with new mbuf */
1256 1.11 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1257 1.11 martin mtod(mnew, void*), MCLBYTES, NULL,
1258 1.11 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
1259 1.11 martin if (error != 0) {
1260 1.11 martin m_freem(mnew);
1261 1.11 martin /* try to reload old mbuf */
1262 1.11 martin error = bus_dmamap_load(sc->sc_dmat, data->rd_map,
1263 1.11 martin mtod(data->rd_m, void*), MCLBYTES, NULL,
1264 1.11 martin BUS_DMA_READ | BUS_DMA_NOWAIT);
1265 1.11 martin if (error != 0) {
1266 1.11 martin panic("%s: could not load old rx mbuf",
1267 1.11 martin device_xname(sc->sc_dev));
1268 1.11 martin }
1269 1.11 martin ifp->if_ierrors++;
1270 1.11 martin goto skip;
1271 1.11 martin }
1272 1.11 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
1273 1.11 martin
1274 1.11 martin /*
1275 1.11 martin * New mbuf loaded, update RX ring and continue
1276 1.11 martin */
1277 1.11 martin m = data->rd_m;
1278 1.11 martin data->rd_m = mnew;
1279 1.11 martin desc->ddesc_data = htole32(physaddr);
1280 1.11 martin
1281 1.11 martin /* finalize mbuf */
1282 1.11 martin m->m_pkthdr.len = m->m_len = len;
1283 1.36 ozaki m_set_rcvif(m, ifp);
1284 1.19 matt m->m_flags |= M_HASFCS;
1285 1.11 martin
1286 1.39 skrll if_percpuq_enqueue(sc->sc_ipq, m);
1287 1.11 martin
1288 1.11 martin skip:
1289 1.27 matt bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1290 1.27 matt data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1291 1.53 aymeric
1292 1.53 aymeric sc->sc_descm->rx_init_flags(desc);
1293 1.53 aymeric sc->sc_descm->rx_set_len(desc, AWGE_MAX_PACKET);
1294 1.53 aymeric sc->sc_descm->rx_set_owned_by_dev(desc);
1295 1.53 aymeric
1296 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1297 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1298 1.11 martin BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1299 1.11 martin }
1300 1.11 martin
1301 1.11 martin /* update RX pointer */
1302 1.11 martin sc->sc_rxq.r_cur = i;
1303 1.11 martin
1304 1.38 skrll mutex_exit(&sc->sc_rxq.r_mtx);
1305 1.8 martin }
1306 1.8 martin
1307 1.22 martin /*
1308 1.24 skrll * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1309 1.22 martin */
1310 1.22 martin static uint32_t
1311 1.22 martin bitrev32(uint32_t x)
1312 1.22 martin {
1313 1.22 martin x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1314 1.22 martin x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1315 1.22 martin x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1316 1.22 martin x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1317 1.22 martin
1318 1.22 martin return (x >> 16) | (x << 16);
1319 1.22 martin }
1320 1.22 martin
1321 1.20 jmcneill static void
1322 1.20 jmcneill dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1323 1.20 jmcneill {
1324 1.20 jmcneill struct ifnet * const ifp = &sc->sc_ec.ec_if;
1325 1.20 jmcneill struct ether_multi *enm;
1326 1.20 jmcneill struct ether_multistep step;
1327 1.20 jmcneill uint32_t hashes[2] = { 0, 0 };
1328 1.22 martin uint32_t ffilt, h;
1329 1.38 skrll int mcnt;
1330 1.22 martin
1331 1.38 skrll KASSERT(mutex_owned(sc->sc_lock));
1332 1.20 jmcneill
1333 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1334 1.38 skrll
1335 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
1336 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1337 1.22 martin goto special_filter;
1338 1.20 jmcneill }
1339 1.20 jmcneill
1340 1.20 jmcneill ifp->if_flags &= ~IFF_ALLMULTI;
1341 1.22 martin ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM|AWIN_GMAC_MAC_FFILT_PR);
1342 1.20 jmcneill
1343 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1344 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1345 1.20 jmcneill
1346 1.20 jmcneill ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1347 1.20 jmcneill mcnt = 0;
1348 1.20 jmcneill while (enm != NULL) {
1349 1.20 jmcneill if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1350 1.22 martin ETHER_ADDR_LEN) != 0) {
1351 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1352 1.22 martin ifp->if_flags |= IFF_ALLMULTI;
1353 1.22 martin goto special_filter;
1354 1.22 martin }
1355 1.20 jmcneill
1356 1.22 martin h = bitrev32(
1357 1.22 martin ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1358 1.22 martin ) >> 26;
1359 1.20 jmcneill hashes[h >> 5] |= (1 << (h & 0x1f));
1360 1.20 jmcneill
1361 1.20 jmcneill mcnt++;
1362 1.20 jmcneill ETHER_NEXT_MULTI(step, enm);
1363 1.20 jmcneill }
1364 1.20 jmcneill
1365 1.20 jmcneill if (mcnt)
1366 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1367 1.20 jmcneill else
1368 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1369 1.20 jmcneill
1370 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1371 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1372 1.20 jmcneill hashes[0]);
1373 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1374 1.20 jmcneill hashes[1]);
1375 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1376 1.22 martin
1377 1.22 martin #ifdef DWC_GMAC_DEBUG
1378 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1379 1.22 martin #endif
1380 1.22 martin return;
1381 1.22 martin
1382 1.22 martin special_filter:
1383 1.22 martin #ifdef DWC_GMAC_DEBUG
1384 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1385 1.22 martin #endif
1386 1.22 martin /* no MAC hashes, ALLMULTI or PROMISC */
1387 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1388 1.22 martin ffilt);
1389 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1390 1.22 martin 0xffffffff);
1391 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1392 1.22 martin 0xffffffff);
1393 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1394 1.20 jmcneill }
1395 1.20 jmcneill
1396 1.1 martin int
1397 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
1398 1.1 martin {
1399 1.1 martin uint32_t status, dma_status;
1400 1.8 martin int rv = 0;
1401 1.1 martin
1402 1.38 skrll if (sc->sc_stopping)
1403 1.38 skrll return 0;
1404 1.38 skrll
1405 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1406 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
1407 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1408 1.1 martin AWIN_GMAC_MII_STATUS);
1409 1.8 martin rv = 1;
1410 1.2 martin mii_pollstat(&sc->sc_mii);
1411 1.2 martin }
1412 1.1 martin
1413 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1414 1.1 martin AWIN_GMAC_DMA_STATUS);
1415 1.1 martin
1416 1.8 martin if (dma_status & (GMAC_DMA_INT_NIE|GMAC_DMA_INT_AIE))
1417 1.8 martin rv = 1;
1418 1.1 martin
1419 1.8 martin if (dma_status & GMAC_DMA_INT_TIE)
1420 1.8 martin dwc_gmac_tx_intr(sc);
1421 1.1 martin
1422 1.8 martin if (dma_status & GMAC_DMA_INT_RIE)
1423 1.8 martin dwc_gmac_rx_intr(sc);
1424 1.8 martin
1425 1.8 martin /*
1426 1.8 martin * Check error conditions
1427 1.8 martin */
1428 1.8 martin if (dma_status & GMAC_DMA_INT_ERRORS) {
1429 1.8 martin sc->sc_ec.ec_if.if_oerrors++;
1430 1.8 martin #ifdef DWC_GMAC_DEBUG
1431 1.8 martin dwc_dump_and_abort(sc, "interrupt error condition");
1432 1.8 martin #endif
1433 1.8 martin }
1434 1.8 martin
1435 1.8 martin /* ack interrupt */
1436 1.8 martin if (dma_status)
1437 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1438 1.8 martin AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1439 1.8 martin
1440 1.28 martin /*
1441 1.28 martin * Get more packets
1442 1.28 martin */
1443 1.28 martin if (rv)
1444 1.40 ozaki if_schedule_deferred_start(&sc->sc_ec.ec_if);
1445 1.28 martin
1446 1.8 martin return rv;
1447 1.1 martin }
1448 1.7 martin
1449 1.53 aymeric static void
1450 1.53 aymeric dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc) {
1451 1.53 aymeric desc->ddesc_status0 |= DDESC_STATUS_OWNEDBYDEV;
1452 1.53 aymeric }
1453 1.53 aymeric
1454 1.53 aymeric static int
1455 1.53 aymeric dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc) {
1456 1.53 aymeric return !!(desc->ddesc_status0 & DDESC_STATUS_OWNEDBYDEV);
1457 1.53 aymeric }
1458 1.53 aymeric
1459 1.53 aymeric static void
1460 1.53 aymeric dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len) {
1461 1.53 aymeric uint32_t cntl = le32toh(desc->ddesc_cntl1);
1462 1.53 aymeric
1463 1.53 aymeric desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1464 1.53 aymeric __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1465 1.53 aymeric }
1466 1.53 aymeric
1467 1.53 aymeric static int
1468 1.53 aymeric dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc) {
1469 1.53 aymeric return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1470 1.53 aymeric }
1471 1.53 aymeric
1472 1.53 aymeric static void
1473 1.53 aymeric dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1474 1.53 aymeric desc->ddesc_status0 = 0;
1475 1.53 aymeric desc->ddesc_cntl1 = DDESC_CNTL_TXCHAIN;
1476 1.53 aymeric }
1477 1.53 aymeric
1478 1.53 aymeric static void
1479 1.53 aymeric dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc) {
1480 1.53 aymeric uint32_t cntl = le32toh(desc->ddesc_cntl1);
1481 1.53 aymeric
1482 1.53 aymeric desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1483 1.53 aymeric }
1484 1.53 aymeric
1485 1.53 aymeric static void
1486 1.53 aymeric dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc) {
1487 1.53 aymeric uint32_t cntl = le32toh(desc->ddesc_cntl1);
1488 1.53 aymeric
1489 1.53 aymeric desc->ddesc_cntl1 = htole32(cntl |
1490 1.53 aymeric DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1491 1.53 aymeric }
1492 1.53 aymeric
1493 1.53 aymeric static void
1494 1.53 aymeric dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1495 1.53 aymeric desc->ddesc_status0 = 0;
1496 1.53 aymeric desc->ddesc_cntl1 = DDESC_CNTL_TXCHAIN;
1497 1.53 aymeric }
1498 1.53 aymeric
1499 1.53 aymeric static int
1500 1.53 aymeric dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1501 1.53 aymeric return !!(le32toh(desc->ddesc_status0) &
1502 1.53 aymeric (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1503 1.53 aymeric }
1504 1.53 aymeric
1505 1.53 aymeric static void
1506 1.53 aymeric dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len) {
1507 1.53 aymeric uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1508 1.53 aymeric
1509 1.53 aymeric desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1510 1.53 aymeric __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1511 1.53 aymeric }
1512 1.53 aymeric
1513 1.53 aymeric static int
1514 1.53 aymeric dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc) {
1515 1.53 aymeric return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1516 1.53 aymeric }
1517 1.53 aymeric
1518 1.53 aymeric static void
1519 1.53 aymeric dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1520 1.53 aymeric desc->ddesc_status0 = DDESC_TDES0_TCH;
1521 1.53 aymeric desc->ddesc_cntl1 = 0;
1522 1.53 aymeric }
1523 1.53 aymeric
1524 1.53 aymeric static void
1525 1.53 aymeric dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc) {
1526 1.53 aymeric uint32_t tdes0 = le32toh(desc->ddesc_status0);
1527 1.53 aymeric
1528 1.53 aymeric desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1529 1.53 aymeric }
1530 1.53 aymeric
1531 1.53 aymeric static void
1532 1.53 aymeric dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc) {
1533 1.53 aymeric uint32_t tdes0 = le32toh(desc->ddesc_status0);
1534 1.53 aymeric
1535 1.53 aymeric desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1536 1.53 aymeric }
1537 1.53 aymeric
1538 1.53 aymeric static void
1539 1.53 aymeric dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc) {
1540 1.53 aymeric desc->ddesc_status0 = 0;
1541 1.53 aymeric desc->ddesc_cntl1 = DDESC_RDES1_RCH;
1542 1.53 aymeric }
1543 1.53 aymeric
1544 1.53 aymeric static int
1545 1.53 aymeric dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1546 1.53 aymeric return !!(le32toh(desc->ddesc_status0) &
1547 1.53 aymeric (DDESC_RDES0_ES | DDESC_RDES0_LE));
1548 1.53 aymeric }
1549 1.53 aymeric
1550 1.7 martin #ifdef DWC_GMAC_DEBUG
1551 1.7 martin static void
1552 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1553 1.7 martin {
1554 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1555 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1556 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1557 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1558 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1559 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1560 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1561 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1562 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1563 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1564 1.7 martin aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1565 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1566 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1567 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1568 1.7 martin aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1569 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1570 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1571 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1572 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1573 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1574 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1575 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1576 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1577 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1578 1.7 martin }
1579 1.7 martin
1580 1.7 martin static void
1581 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1582 1.7 martin {
1583 1.7 martin int i;
1584 1.7 martin
1585 1.8 martin aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1586 1.8 martin sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1587 1.8 martin aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1588 1.7 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1589 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1590 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1591 1.15 martin "data: %08x next: %08x\n",
1592 1.15 martin i, sc->sc_txq.t_physaddr +
1593 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1594 1.53 aymeric le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1595 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1596 1.7 martin }
1597 1.7 martin }
1598 1.8 martin
1599 1.8 martin static void
1600 1.11 martin dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1601 1.11 martin {
1602 1.11 martin int i;
1603 1.11 martin
1604 1.11 martin aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1605 1.11 martin sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1606 1.11 martin aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1607 1.11 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1608 1.11 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1609 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1610 1.15 martin "data: %08x next: %08x\n",
1611 1.15 martin i, sc->sc_rxq.r_physaddr +
1612 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1613 1.53 aymeric le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1614 1.11 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1615 1.11 martin }
1616 1.11 martin }
1617 1.11 martin
1618 1.11 martin static void
1619 1.10 martin dwc_dump_status(struct dwc_gmac_softc *sc)
1620 1.8 martin {
1621 1.8 martin uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1622 1.8 martin AWIN_GMAC_MAC_INTR);
1623 1.8 martin uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1624 1.8 martin AWIN_GMAC_DMA_STATUS);
1625 1.8 martin char buf[200];
1626 1.8 martin
1627 1.8 martin /* print interrupt state */
1628 1.8 martin snprintb(buf, sizeof(buf), "\177\20"
1629 1.10 martin "b\x10""NI\0"
1630 1.10 martin "b\x0f""AI\0"
1631 1.10 martin "b\x0e""ER\0"
1632 1.10 martin "b\x0d""FB\0"
1633 1.10 martin "b\x0a""ET\0"
1634 1.10 martin "b\x09""RW\0"
1635 1.10 martin "b\x08""RS\0"
1636 1.10 martin "b\x07""RU\0"
1637 1.10 martin "b\x06""RI\0"
1638 1.10 martin "b\x05""UN\0"
1639 1.10 martin "b\x04""OV\0"
1640 1.10 martin "b\x03""TJ\0"
1641 1.10 martin "b\x02""TU\0"
1642 1.10 martin "b\x01""TS\0"
1643 1.10 martin "b\x00""TI\0"
1644 1.8 martin "\0", dma_status);
1645 1.10 martin aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1646 1.8 martin status, buf);
1647 1.10 martin }
1648 1.8 martin
1649 1.10 martin static void
1650 1.10 martin dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1651 1.10 martin {
1652 1.10 martin dwc_dump_status(sc);
1653 1.22 martin dwc_gmac_dump_ffilt(sc,
1654 1.22 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1655 1.8 martin dwc_gmac_dump_dma(sc);
1656 1.8 martin dwc_gmac_dump_tx_desc(sc);
1657 1.11 martin dwc_gmac_dump_rx_desc(sc);
1658 1.8 martin
1659 1.21 joerg panic("%s", msg);
1660 1.8 martin }
1661 1.22 martin
1662 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1663 1.22 martin {
1664 1.22 martin char buf[200];
1665 1.22 martin
1666 1.22 martin /* print filter setup */
1667 1.22 martin snprintb(buf, sizeof(buf), "\177\20"
1668 1.22 martin "b\x1f""RA\0"
1669 1.22 martin "b\x0a""HPF\0"
1670 1.22 martin "b\x09""SAF\0"
1671 1.22 martin "b\x08""SAIF\0"
1672 1.22 martin "b\x05""DBF\0"
1673 1.22 martin "b\x04""PM\0"
1674 1.22 martin "b\x03""DAIF\0"
1675 1.22 martin "b\x02""HMC\0"
1676 1.22 martin "b\x01""HUC\0"
1677 1.22 martin "b\x00""PR\0"
1678 1.22 martin "\0", ffilt);
1679 1.22 martin aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1680 1.22 martin }
1681 1.7 martin #endif
1682