dwc_gmac.c revision 1.50.2.3 1 1.50.2.2 martin /* $NetBSD: dwc_gmac.c,v 1.50.2.3 2020/04/13 08:04:21 martin Exp $ */
2 1.18 jmcneill
3 1.1 martin /*-
4 1.1 martin * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5 1.1 martin * All rights reserved.
6 1.1 martin *
7 1.1 martin * This code is derived from software contributed to The NetBSD Foundation
8 1.1 martin * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9 1.1 martin *
10 1.1 martin * Redistribution and use in source and binary forms, with or without
11 1.1 martin * modification, are permitted provided that the following conditions
12 1.1 martin * are met:
13 1.1 martin * 1. Redistributions of source code must retain the above copyright
14 1.1 martin * notice, this list of conditions and the following disclaimer.
15 1.1 martin * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 martin * notice, this list of conditions and the following disclaimer in the
17 1.1 martin * documentation and/or other materials provided with the distribution.
18 1.1 martin *
19 1.1 martin * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 martin * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 martin * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 martin * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 martin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 martin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 martin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 martin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 martin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 martin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 martin * POSSIBILITY OF SUCH DAMAGE.
30 1.1 martin */
31 1.1 martin
32 1.1 martin /*
33 1.1 martin * This driver supports the Synopsis Designware GMAC core, as found
34 1.1 martin * on Allwinner A20 cores and others.
35 1.1 martin *
36 1.1 martin * Real documentation seems to not be available, the marketing product
37 1.1 martin * documents could be found here:
38 1.1 martin *
39 1.1 martin * http://www.synopsys.com/dw/ipdir.php?ds=dwc_ether_mac10_100_1000_unive
40 1.1 martin */
41 1.1 martin
42 1.1 martin #include <sys/cdefs.h>
43 1.1 martin
44 1.50.2.2 martin __KERNEL_RCSID(1, "$NetBSD: dwc_gmac.c,v 1.50.2.3 2020/04/13 08:04:21 martin Exp $");
45 1.7 martin
46 1.7 martin /* #define DWC_GMAC_DEBUG 1 */
47 1.1 martin
48 1.38 skrll #ifdef _KERNEL_OPT
49 1.1 martin #include "opt_inet.h"
50 1.38 skrll #include "opt_net_mpsafe.h"
51 1.38 skrll #endif
52 1.1 martin
53 1.1 martin #include <sys/param.h>
54 1.1 martin #include <sys/bus.h>
55 1.1 martin #include <sys/device.h>
56 1.1 martin #include <sys/intr.h>
57 1.1 martin #include <sys/systm.h>
58 1.1 martin #include <sys/sockio.h>
59 1.29 jmcneill #include <sys/cprng.h>
60 1.50.2.3 martin #include <sys/rndsource.h>
61 1.1 martin
62 1.1 martin #include <net/if.h>
63 1.1 martin #include <net/if_ether.h>
64 1.1 martin #include <net/if_media.h>
65 1.1 martin #include <net/bpf.h>
66 1.1 martin #ifdef INET
67 1.1 martin #include <netinet/if_inarp.h>
68 1.1 martin #endif
69 1.1 martin
70 1.1 martin #include <dev/mii/miivar.h>
71 1.1 martin
72 1.1 martin #include <dev/ic/dwc_gmac_reg.h>
73 1.1 martin #include <dev/ic/dwc_gmac_var.h>
74 1.1 martin
75 1.50.2.1 christos static int dwc_gmac_miibus_read_reg(device_t, int, int, uint16_t *);
76 1.50.2.1 christos static int dwc_gmac_miibus_write_reg(device_t, int, int, uint16_t);
77 1.1 martin static void dwc_gmac_miibus_statchg(struct ifnet *);
78 1.1 martin
79 1.50.2.1 christos static int dwc_gmac_reset(struct dwc_gmac_softc *);
80 1.50.2.1 christos static void dwc_gmac_write_hwaddr(struct dwc_gmac_softc *, uint8_t *);
81 1.50.2.1 christos static int dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *);
82 1.50.2.1 christos static void dwc_gmac_free_dma_rings(struct dwc_gmac_softc *);
83 1.50.2.1 christos static int dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
84 1.50.2.1 christos static void dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
85 1.50.2.1 christos static void dwc_gmac_free_rx_ring(struct dwc_gmac_softc *, struct dwc_gmac_rx_ring *);
86 1.50.2.1 christos static int dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
87 1.50.2.1 christos static void dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
88 1.50.2.1 christos static void dwc_gmac_free_tx_ring(struct dwc_gmac_softc *, struct dwc_gmac_tx_ring *);
89 1.50.2.1 christos static void dwc_gmac_txdesc_sync(struct dwc_gmac_softc *, int, int, int);
90 1.50.2.1 christos static int dwc_gmac_init(struct ifnet *);
91 1.50.2.1 christos static int dwc_gmac_init_locked(struct ifnet *);
92 1.50.2.1 christos static void dwc_gmac_stop(struct ifnet *, int);
93 1.50.2.1 christos static void dwc_gmac_stop_locked(struct ifnet *, int);
94 1.50.2.1 christos static void dwc_gmac_start(struct ifnet *);
95 1.50.2.1 christos static void dwc_gmac_start_locked(struct ifnet *);
96 1.50.2.1 christos static int dwc_gmac_queue(struct dwc_gmac_softc *, struct mbuf *);
97 1.1 martin static int dwc_gmac_ioctl(struct ifnet *, u_long, void *);
98 1.50.2.1 christos static void dwc_gmac_tx_intr(struct dwc_gmac_softc *);
99 1.50.2.1 christos static void dwc_gmac_rx_intr(struct dwc_gmac_softc *);
100 1.50.2.1 christos static void dwc_gmac_setmulti(struct dwc_gmac_softc *);
101 1.22 martin static int dwc_gmac_ifflags_cb(struct ethercom *);
102 1.50.2.1 christos static uint32_t bitrev32(uint32_t);
103 1.50.2.1 christos static void dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
104 1.50.2.1 christos static int dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *);
105 1.50.2.1 christos static void dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *, int);
106 1.50.2.1 christos static uint32_t dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *);
107 1.50.2.1 christos static void dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
108 1.50.2.1 christos static void dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
109 1.50.2.1 christos static void dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
110 1.50.2.1 christos static void dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
111 1.50.2.1 christos static int dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *);
112 1.50.2.1 christos static void dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *, int);
113 1.50.2.1 christos static uint32_t dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *);
114 1.50.2.1 christos static void dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *);
115 1.50.2.1 christos static void dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *);
116 1.50.2.1 christos static void dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *);
117 1.50.2.1 christos static void dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *);
118 1.50.2.1 christos static int dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *);
119 1.50.2.1 christos
120 1.50.2.1 christos static const struct dwc_gmac_desc_methods desc_methods_standard = {
121 1.50.2.1 christos .tx_init_flags = dwc_gmac_desc_std_tx_init_flags,
122 1.50.2.1 christos .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
123 1.50.2.1 christos .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
124 1.50.2.1 christos .tx_set_len = dwc_gmac_desc_std_set_len,
125 1.50.2.1 christos .tx_set_first_frag = dwc_gmac_desc_std_tx_set_first_frag,
126 1.50.2.1 christos .tx_set_last_frag = dwc_gmac_desc_std_tx_set_last_frag,
127 1.50.2.1 christos .rx_init_flags = dwc_gmac_desc_std_rx_init_flags,
128 1.50.2.1 christos .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
129 1.50.2.1 christos .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
130 1.50.2.1 christos .rx_set_len = dwc_gmac_desc_std_set_len,
131 1.50.2.1 christos .rx_get_len = dwc_gmac_desc_std_get_len,
132 1.50.2.1 christos .rx_has_error = dwc_gmac_desc_std_rx_has_error
133 1.50.2.1 christos };
134 1.50.2.1 christos
135 1.50.2.1 christos static const struct dwc_gmac_desc_methods desc_methods_enhanced = {
136 1.50.2.1 christos .tx_init_flags = dwc_gmac_desc_enh_tx_init_flags,
137 1.50.2.1 christos .tx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
138 1.50.2.1 christos .tx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
139 1.50.2.1 christos .tx_set_len = dwc_gmac_desc_enh_set_len,
140 1.50.2.1 christos .tx_set_first_frag = dwc_gmac_desc_enh_tx_set_first_frag,
141 1.50.2.1 christos .tx_set_last_frag = dwc_gmac_desc_enh_tx_set_last_frag,
142 1.50.2.1 christos .rx_init_flags = dwc_gmac_desc_enh_rx_init_flags,
143 1.50.2.1 christos .rx_set_owned_by_dev = dwc_gmac_desc_set_owned_by_dev,
144 1.50.2.1 christos .rx_is_owned_by_dev = dwc_gmac_desc_is_owned_by_dev,
145 1.50.2.1 christos .rx_set_len = dwc_gmac_desc_enh_set_len,
146 1.50.2.1 christos .rx_get_len = dwc_gmac_desc_enh_get_len,
147 1.50.2.1 christos .rx_has_error = dwc_gmac_desc_enh_rx_has_error
148 1.50.2.1 christos };
149 1.50.2.1 christos
150 1.1 martin
151 1.1 martin #define TX_DESC_OFFSET(N) ((AWGE_RX_RING_COUNT+(N)) \
152 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc))
153 1.8 martin #define TX_NEXT(N) (((N)+1) & (AWGE_TX_RING_COUNT-1))
154 1.1 martin
155 1.1 martin #define RX_DESC_OFFSET(N) ((N)*sizeof(struct dwc_gmac_dev_dmadesc))
156 1.8 martin #define RX_NEXT(N) (((N)+1) & (AWGE_RX_RING_COUNT-1))
157 1.8 martin
158 1.8 martin
159 1.8 martin
160 1.50.2.1 christos #define GMAC_DEF_DMA_INT_MASK (GMAC_DMA_INT_TIE | GMAC_DMA_INT_RIE | \
161 1.50.2.1 christos GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE | \
162 1.50.2.1 christos GMAC_DMA_INT_FBE | GMAC_DMA_INT_UNE)
163 1.50.2.1 christos
164 1.50.2.1 christos #define GMAC_DMA_INT_ERRORS (GMAC_DMA_INT_AIE | GMAC_DMA_INT_ERE | \
165 1.50.2.1 christos GMAC_DMA_INT_FBE | \
166 1.50.2.1 christos GMAC_DMA_INT_RWE | GMAC_DMA_INT_RUE | \
167 1.50.2.1 christos GMAC_DMA_INT_UNE | GMAC_DMA_INT_OVE | \
168 1.10 martin GMAC_DMA_INT_TJE)
169 1.8 martin
170 1.8 martin #define AWIN_DEF_MAC_INTRMASK \
171 1.8 martin (AWIN_GMAC_MAC_INT_TSI | AWIN_GMAC_MAC_INT_ANEG | \
172 1.50.2.1 christos AWIN_GMAC_MAC_INT_LINKCHG)
173 1.1 martin
174 1.7 martin #ifdef DWC_GMAC_DEBUG
175 1.50.2.1 christos static void dwc_gmac_dump_dma(struct dwc_gmac_softc *);
176 1.50.2.1 christos static void dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *);
177 1.50.2.1 christos static void dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *);
178 1.50.2.1 christos static void dwc_dump_and_abort(struct dwc_gmac_softc *, const char *);
179 1.50.2.1 christos static void dwc_dump_status(struct dwc_gmac_softc *);
180 1.50.2.1 christos static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *, uint32_t);
181 1.7 martin #endif
182 1.7 martin
183 1.50.2.1 christos int
184 1.50.2.1 christos dwc_gmac_attach(struct dwc_gmac_softc *sc, int phy_id, uint32_t mii_clk)
185 1.1 martin {
186 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN];
187 1.50.2.1 christos uint32_t maclo, machi, ver, hwft;
188 1.1 martin struct mii_data * const mii = &sc->sc_mii;
189 1.1 martin struct ifnet * const ifp = &sc->sc_ec.ec_if;
190 1.5 martin prop_dictionary_t dict;
191 1.41 msaitoh int rv;
192 1.1 martin
193 1.1 martin mutex_init(&sc->sc_mdio_lock, MUTEX_DEFAULT, IPL_NET);
194 1.3 martin sc->sc_mii_clk = mii_clk & 7;
195 1.1 martin
196 1.5 martin dict = device_properties(sc->sc_dev);
197 1.5 martin prop_data_t ea = dict ? prop_dictionary_get(dict, "mac-address") : NULL;
198 1.5 martin if (ea != NULL) {
199 1.5 martin /*
200 1.5 martin * If the MAC address is overriden by a device property,
201 1.5 martin * use that.
202 1.5 martin */
203 1.5 martin KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
204 1.5 martin KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
205 1.5 martin memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
206 1.5 martin } else {
207 1.5 martin /*
208 1.5 martin * If we did not get an externaly configure address,
209 1.5 martin * try to read one from the current filter setup,
210 1.5 martin * before resetting the chip.
211 1.5 martin */
212 1.8 martin maclo = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
213 1.8 martin AWIN_GMAC_MAC_ADDR0LO);
214 1.8 martin machi = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
215 1.8 martin AWIN_GMAC_MAC_ADDR0HI);
216 1.14 jmcneill
217 1.14 jmcneill if (maclo == 0xffffffff && (machi & 0xffff) == 0xffff) {
218 1.29 jmcneill /* fake MAC address */
219 1.29 jmcneill maclo = 0x00f2 | (cprng_strong32() << 16);
220 1.29 jmcneill machi = cprng_strong32();
221 1.14 jmcneill }
222 1.14 jmcneill
223 1.1 martin enaddr[0] = maclo & 0x0ff;
224 1.1 martin enaddr[1] = (maclo >> 8) & 0x0ff;
225 1.1 martin enaddr[2] = (maclo >> 16) & 0x0ff;
226 1.1 martin enaddr[3] = (maclo >> 24) & 0x0ff;
227 1.1 martin enaddr[4] = machi & 0x0ff;
228 1.1 martin enaddr[5] = (machi >> 8) & 0x0ff;
229 1.1 martin }
230 1.1 martin
231 1.50.2.1 christos ver = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_VERSION);
232 1.50.2.1 christos aprint_normal_dev(sc->sc_dev, "Core version: %08x\n", ver);
233 1.50.2.1 christos
234 1.1 martin /*
235 1.21 joerg * Init chip and do initial setup
236 1.1 martin */
237 1.1 martin if (dwc_gmac_reset(sc) != 0)
238 1.50.2.1 christos return ENXIO; /* not much to cleanup, haven't attached yet */
239 1.5 martin dwc_gmac_write_hwaddr(sc, enaddr);
240 1.50.2.1 christos aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
241 1.1 martin ether_sprintf(enaddr));
242 1.1 martin
243 1.50.2.1 christos hwft = 0;
244 1.50.2.1 christos if (ver >= 0x35) {
245 1.50.2.1 christos hwft = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
246 1.50.2.1 christos AWIN_GMAC_DMA_HWFEATURES);
247 1.50.2.1 christos aprint_normal_dev(sc->sc_dev,
248 1.50.2.1 christos "HW feature mask: %x\n", hwft);
249 1.50.2.1 christos }
250 1.50.2.1 christos if (hwft & GMAC_DMA_FEAT_ENHANCED_DESC) {
251 1.50.2.1 christos aprint_normal_dev(sc->sc_dev,
252 1.50.2.1 christos "Using enhanced descriptor format\n");
253 1.50.2.1 christos sc->sc_descm = &desc_methods_enhanced;
254 1.50.2.1 christos } else {
255 1.50.2.1 christos sc->sc_descm = &desc_methods_standard;
256 1.50.2.1 christos }
257 1.50.2.1 christos
258 1.1 martin /*
259 1.1 martin * Allocate Tx and Rx rings
260 1.1 martin */
261 1.1 martin if (dwc_gmac_alloc_dma_rings(sc) != 0) {
262 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate DMA rings\n");
263 1.1 martin goto fail;
264 1.1 martin }
265 1.38 skrll
266 1.1 martin if (dwc_gmac_alloc_tx_ring(sc, &sc->sc_txq) != 0) {
267 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Tx ring\n");
268 1.1 martin goto fail;
269 1.1 martin }
270 1.1 martin
271 1.1 martin if (dwc_gmac_alloc_rx_ring(sc, &sc->sc_rxq) != 0) {
272 1.1 martin aprint_error_dev(sc->sc_dev, "could not allocate Rx ring\n");
273 1.1 martin goto fail;
274 1.1 martin }
275 1.1 martin
276 1.38 skrll sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
277 1.38 skrll mutex_init(&sc->sc_txq.t_mtx, MUTEX_DEFAULT, IPL_NET);
278 1.38 skrll mutex_init(&sc->sc_rxq.r_mtx, MUTEX_DEFAULT, IPL_NET);
279 1.38 skrll
280 1.1 martin /*
281 1.1 martin * Prepare interface data
282 1.1 martin */
283 1.1 martin ifp->if_softc = sc;
284 1.1 martin strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
285 1.1 martin ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
286 1.44 ozaki #ifdef DWCGMAC_MPSAFE
287 1.43 ozaki ifp->if_extflags = IFEF_MPSAFE;
288 1.44 ozaki #endif
289 1.1 martin ifp->if_ioctl = dwc_gmac_ioctl;
290 1.1 martin ifp->if_start = dwc_gmac_start;
291 1.1 martin ifp->if_init = dwc_gmac_init;
292 1.1 martin ifp->if_stop = dwc_gmac_stop;
293 1.1 martin IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
294 1.1 martin IFQ_SET_READY(&ifp->if_snd);
295 1.1 martin
296 1.1 martin /*
297 1.1 martin * Attach MII subdevices
298 1.1 martin */
299 1.2 martin sc->sc_ec.ec_mii = &sc->sc_mii;
300 1.1 martin ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus);
301 1.50.2.1 christos mii->mii_ifp = ifp;
302 1.50.2.1 christos mii->mii_readreg = dwc_gmac_miibus_read_reg;
303 1.50.2.1 christos mii->mii_writereg = dwc_gmac_miibus_write_reg;
304 1.50.2.1 christos mii->mii_statchg = dwc_gmac_miibus_statchg;
305 1.50.2.1 christos mii_attach(sc->sc_dev, mii, 0xffffffff, phy_id, MII_OFFSET_ANY,
306 1.25 jmcneill MIIF_DOPAUSE);
307 1.1 martin
308 1.50.2.1 christos if (LIST_EMPTY(&mii->mii_phys)) {
309 1.50.2.1 christos aprint_error_dev(sc->sc_dev, "no PHY found!\n");
310 1.50.2.1 christos ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL);
311 1.50.2.1 christos ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL);
312 1.50.2.1 christos } else {
313 1.50.2.1 christos ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
314 1.50.2.1 christos }
315 1.1 martin
316 1.1 martin /*
317 1.33 tnn * We can support 802.1Q VLAN-sized frames.
318 1.33 tnn */
319 1.33 tnn sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
320 1.33 tnn
321 1.33 tnn /*
322 1.1 martin * Ready, attach interface
323 1.1 martin */
324 1.38 skrll /* Attach the interface. */
325 1.41 msaitoh rv = if_initialize(ifp);
326 1.41 msaitoh if (rv != 0)
327 1.41 msaitoh goto fail_2;
328 1.38 skrll sc->sc_ipq = if_percpuq_create(&sc->sc_ec.ec_if);
329 1.40 ozaki if_deferred_start_init(ifp, NULL);
330 1.1 martin ether_ifattach(ifp, enaddr);
331 1.22 martin ether_set_ifflags_cb(&sc->sc_ec, dwc_gmac_ifflags_cb);
332 1.38 skrll if_register(ifp);
333 1.50.2.3 martin rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
334 1.50.2.3 martin RND_TYPE_NET, RND_FLAG_DEFAULT);
335 1.1 martin
336 1.1 martin /*
337 1.1 martin * Enable interrupts
338 1.1 martin */
339 1.38 skrll mutex_enter(sc->sc_lock);
340 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTMASK,
341 1.8 martin AWIN_DEF_MAC_INTRMASK);
342 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE,
343 1.8 martin GMAC_DEF_DMA_INT_MASK);
344 1.38 skrll mutex_exit(sc->sc_lock);
345 1.1 martin
346 1.50.2.1 christos return 0;
347 1.50.2.1 christos
348 1.41 msaitoh fail_2:
349 1.41 msaitoh ifmedia_removeall(&mii->mii_media);
350 1.42 jakllsch mii_detach(mii, MII_PHY_ANY, MII_OFFSET_ANY);
351 1.41 msaitoh mutex_destroy(&sc->sc_txq.t_mtx);
352 1.41 msaitoh mutex_destroy(&sc->sc_rxq.r_mtx);
353 1.41 msaitoh mutex_obj_free(sc->sc_lock);
354 1.1 martin fail:
355 1.1 martin dwc_gmac_free_rx_ring(sc, &sc->sc_rxq);
356 1.1 martin dwc_gmac_free_tx_ring(sc, &sc->sc_txq);
357 1.41 msaitoh dwc_gmac_free_dma_rings(sc);
358 1.41 msaitoh mutex_destroy(&sc->sc_mdio_lock);
359 1.50.2.1 christos
360 1.50.2.1 christos return ENXIO;
361 1.1 martin }
362 1.1 martin
363 1.1 martin
364 1.1 martin
365 1.1 martin static int
366 1.1 martin dwc_gmac_reset(struct dwc_gmac_softc *sc)
367 1.1 martin {
368 1.1 martin size_t cnt;
369 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
370 1.50.2.1 christos bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
371 1.50.2.1 christos | GMAC_BUSMODE_RESET);
372 1.1 martin for (cnt = 0; cnt < 3000; cnt++) {
373 1.1 martin if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE)
374 1.1 martin & GMAC_BUSMODE_RESET) == 0)
375 1.1 martin return 0;
376 1.1 martin delay(10);
377 1.1 martin }
378 1.1 martin
379 1.1 martin aprint_error_dev(sc->sc_dev, "reset timed out\n");
380 1.1 martin return EIO;
381 1.1 martin }
382 1.1 martin
383 1.1 martin static void
384 1.1 martin dwc_gmac_write_hwaddr(struct dwc_gmac_softc *sc,
385 1.1 martin uint8_t enaddr[ETHER_ADDR_LEN])
386 1.1 martin {
387 1.49 jmcneill uint32_t hi, lo;
388 1.1 martin
389 1.49 jmcneill hi = enaddr[4] | (enaddr[5] << 8);
390 1.1 martin lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16)
391 1.1 martin | (enaddr[3] << 24);
392 1.49 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0HI, hi);
393 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_ADDR0LO, lo);
394 1.1 martin }
395 1.1 martin
396 1.1 martin static int
397 1.50.2.1 christos dwc_gmac_miibus_read_reg(device_t self, int phy, int reg, uint16_t *val)
398 1.1 martin {
399 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
400 1.6 martin uint16_t mii;
401 1.1 martin size_t cnt;
402 1.1 martin
403 1.50.2.1 christos mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
404 1.50.2.1 christos | __SHIFTIN(reg, GMAC_MII_REG_MASK)
405 1.50.2.1 christos | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
406 1.6 martin | GMAC_MII_BUSY;
407 1.1 martin
408 1.1 martin mutex_enter(&sc->sc_mdio_lock);
409 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
410 1.1 martin
411 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
412 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
413 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY)) {
414 1.50.2.1 christos *val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
415 1.3 martin AWIN_GMAC_MAC_MIIDATA);
416 1.1 martin break;
417 1.1 martin }
418 1.1 martin delay(10);
419 1.1 martin }
420 1.1 martin
421 1.1 martin mutex_exit(&sc->sc_mdio_lock);
422 1.1 martin
423 1.50.2.1 christos if (cnt >= 1000)
424 1.50.2.1 christos return ETIMEDOUT;
425 1.50.2.1 christos
426 1.50.2.1 christos return 0;
427 1.1 martin }
428 1.1 martin
429 1.50.2.1 christos static int
430 1.50.2.1 christos dwc_gmac_miibus_write_reg(device_t self, int phy, int reg, uint16_t val)
431 1.1 martin {
432 1.1 martin struct dwc_gmac_softc * const sc = device_private(self);
433 1.6 martin uint16_t mii;
434 1.1 martin size_t cnt;
435 1.1 martin
436 1.50.2.1 christos mii = __SHIFTIN(phy, GMAC_MII_PHY_MASK)
437 1.50.2.1 christos | __SHIFTIN(reg, GMAC_MII_REG_MASK)
438 1.50.2.1 christos | __SHIFTIN(sc->sc_mii_clk, GMAC_MII_CLKMASK)
439 1.6 martin | GMAC_MII_BUSY | GMAC_MII_WRITE;
440 1.1 martin
441 1.1 martin mutex_enter(&sc->sc_mdio_lock);
442 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIDATA, val);
443 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_MIIADDR, mii);
444 1.1 martin
445 1.1 martin for (cnt = 0; cnt < 1000; cnt++) {
446 1.3 martin if (!(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
447 1.3 martin AWIN_GMAC_MAC_MIIADDR) & GMAC_MII_BUSY))
448 1.1 martin break;
449 1.1 martin delay(10);
450 1.1 martin }
451 1.38 skrll
452 1.1 martin mutex_exit(&sc->sc_mdio_lock);
453 1.50.2.1 christos
454 1.50.2.1 christos if (cnt >= 1000)
455 1.50.2.1 christos return ETIMEDOUT;
456 1.50.2.1 christos
457 1.50.2.1 christos return 0;
458 1.1 martin }
459 1.1 martin
460 1.1 martin static int
461 1.1 martin dwc_gmac_alloc_rx_ring(struct dwc_gmac_softc *sc,
462 1.1 martin struct dwc_gmac_rx_ring *ring)
463 1.1 martin {
464 1.1 martin struct dwc_gmac_rx_data *data;
465 1.1 martin bus_addr_t physaddr;
466 1.6 martin const size_t descsize = AWGE_RX_RING_COUNT * sizeof(*ring->r_desc);
467 1.1 martin int error, i, next;
468 1.1 martin
469 1.1 martin ring->r_cur = ring->r_next = 0;
470 1.1 martin memset(ring->r_desc, 0, descsize);
471 1.1 martin
472 1.1 martin /*
473 1.1 martin * Pre-allocate Rx buffers and populate Rx ring.
474 1.1 martin */
475 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
476 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
477 1.1 martin
478 1.1 martin data = &sc->sc_rxq.r_data[i];
479 1.1 martin
480 1.1 martin MGETHDR(data->rd_m, M_DONTWAIT, MT_DATA);
481 1.1 martin if (data->rd_m == NULL) {
482 1.1 martin aprint_error_dev(sc->sc_dev,
483 1.1 martin "could not allocate rx mbuf #%d\n", i);
484 1.1 martin error = ENOMEM;
485 1.1 martin goto fail;
486 1.1 martin }
487 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
488 1.1 martin MCLBYTES, 0, BUS_DMA_NOWAIT, &data->rd_map);
489 1.1 martin if (error != 0) {
490 1.1 martin aprint_error_dev(sc->sc_dev,
491 1.1 martin "could not create DMA map\n");
492 1.1 martin data->rd_map = NULL;
493 1.1 martin goto fail;
494 1.1 martin }
495 1.1 martin MCLGET(data->rd_m, M_DONTWAIT);
496 1.1 martin if (!(data->rd_m->m_flags & M_EXT)) {
497 1.1 martin aprint_error_dev(sc->sc_dev,
498 1.1 martin "could not allocate mbuf cluster #%d\n", i);
499 1.1 martin error = ENOMEM;
500 1.1 martin goto fail;
501 1.1 martin }
502 1.50.2.3 martin data->rd_m->m_len = data->rd_m->m_pkthdr.len
503 1.50.2.3 martin = data->rd_m->m_ext.ext_size;
504 1.50.2.3 martin if (data->rd_m->m_len > AWGE_MAX_PACKET) {
505 1.50.2.3 martin data->rd_m->m_len = data->rd_m->m_pkthdr.len
506 1.50.2.3 martin = AWGE_MAX_PACKET;
507 1.50.2.3 martin }
508 1.1 martin
509 1.50.2.3 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
510 1.50.2.3 martin data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
511 1.1 martin if (error != 0) {
512 1.1 martin aprint_error_dev(sc->sc_dev,
513 1.1 martin "could not load rx buf DMA map #%d", i);
514 1.1 martin goto fail;
515 1.1 martin }
516 1.50.2.3 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
517 1.50.2.3 martin data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
518 1.1 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
519 1.1 martin
520 1.1 martin desc = &sc->sc_rxq.r_desc[i];
521 1.1 martin desc->ddesc_data = htole32(physaddr);
522 1.8 martin next = RX_NEXT(i);
523 1.38 skrll desc->ddesc_next = htole32(ring->r_physaddr
524 1.1 martin + next * sizeof(*desc));
525 1.50.2.1 christos sc->sc_descm->rx_init_flags(desc);
526 1.50.2.3 martin sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
527 1.50.2.1 christos sc->sc_descm->rx_set_owned_by_dev(desc);
528 1.1 martin }
529 1.1 martin
530 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
531 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
532 1.50.2.1 christos BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
533 1.1 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
534 1.6 martin ring->r_physaddr);
535 1.1 martin
536 1.1 martin return 0;
537 1.1 martin
538 1.1 martin fail:
539 1.1 martin dwc_gmac_free_rx_ring(sc, ring);
540 1.1 martin return error;
541 1.1 martin }
542 1.1 martin
543 1.1 martin static void
544 1.1 martin dwc_gmac_reset_rx_ring(struct dwc_gmac_softc *sc,
545 1.1 martin struct dwc_gmac_rx_ring *ring)
546 1.1 martin {
547 1.1 martin struct dwc_gmac_dev_dmadesc *desc;
548 1.50.2.3 martin struct dwc_gmac_rx_data *data;
549 1.1 martin int i;
550 1.1 martin
551 1.38 skrll mutex_enter(&ring->r_mtx);
552 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
553 1.1 martin desc = &sc->sc_rxq.r_desc[i];
554 1.50.2.3 martin data = &sc->sc_rxq.r_data[i];
555 1.50.2.1 christos sc->sc_descm->rx_init_flags(desc);
556 1.50.2.3 martin sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
557 1.50.2.1 christos sc->sc_descm->rx_set_owned_by_dev(desc);
558 1.1 martin }
559 1.1 martin
560 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
561 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
562 1.50.2.1 christos BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
563 1.1 martin
564 1.1 martin ring->r_cur = ring->r_next = 0;
565 1.11 martin /* reset DMA address to start of ring */
566 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
567 1.11 martin sc->sc_rxq.r_physaddr);
568 1.38 skrll mutex_exit(&ring->r_mtx);
569 1.1 martin }
570 1.1 martin
571 1.1 martin static int
572 1.1 martin dwc_gmac_alloc_dma_rings(struct dwc_gmac_softc *sc)
573 1.1 martin {
574 1.1 martin const size_t descsize = AWGE_TOTAL_RING_COUNT *
575 1.1 martin sizeof(struct dwc_gmac_dev_dmadesc);
576 1.1 martin int error, nsegs;
577 1.1 martin void *rings;
578 1.1 martin
579 1.1 martin error = bus_dmamap_create(sc->sc_dmat, descsize, 1, descsize, 0,
580 1.1 martin BUS_DMA_NOWAIT, &sc->sc_dma_ring_map);
581 1.1 martin if (error != 0) {
582 1.1 martin aprint_error_dev(sc->sc_dev,
583 1.1 martin "could not create desc DMA map\n");
584 1.1 martin sc->sc_dma_ring_map = NULL;
585 1.1 martin goto fail;
586 1.1 martin }
587 1.1 martin
588 1.1 martin error = bus_dmamem_alloc(sc->sc_dmat, descsize, PAGE_SIZE, 0,
589 1.50.2.1 christos &sc->sc_dma_ring_seg, 1, &nsegs, BUS_DMA_NOWAIT |BUS_DMA_COHERENT);
590 1.1 martin if (error != 0) {
591 1.1 martin aprint_error_dev(sc->sc_dev,
592 1.1 martin "could not map DMA memory\n");
593 1.1 martin goto fail;
594 1.1 martin }
595 1.1 martin
596 1.1 martin error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dma_ring_seg, nsegs,
597 1.50.2.1 christos descsize, &rings, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
598 1.1 martin if (error != 0) {
599 1.1 martin aprint_error_dev(sc->sc_dev,
600 1.1 martin "could not allocate DMA memory\n");
601 1.1 martin goto fail;
602 1.1 martin }
603 1.1 martin
604 1.1 martin error = bus_dmamap_load(sc->sc_dmat, sc->sc_dma_ring_map, rings,
605 1.50.2.1 christos descsize, NULL, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
606 1.1 martin if (error != 0) {
607 1.1 martin aprint_error_dev(sc->sc_dev,
608 1.1 martin "could not load desc DMA map\n");
609 1.1 martin goto fail;
610 1.1 martin }
611 1.1 martin
612 1.1 martin /* give first AWGE_RX_RING_COUNT to the RX side */
613 1.1 martin sc->sc_rxq.r_desc = rings;
614 1.1 martin sc->sc_rxq.r_physaddr = sc->sc_dma_ring_map->dm_segs[0].ds_addr;
615 1.1 martin
616 1.1 martin /* and next rings to the TX side */
617 1.1 martin sc->sc_txq.t_desc = sc->sc_rxq.r_desc + AWGE_RX_RING_COUNT;
618 1.38 skrll sc->sc_txq.t_physaddr = sc->sc_rxq.r_physaddr +
619 1.1 martin AWGE_RX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc);
620 1.1 martin
621 1.1 martin return 0;
622 1.1 martin
623 1.1 martin fail:
624 1.1 martin dwc_gmac_free_dma_rings(sc);
625 1.1 martin return error;
626 1.1 martin }
627 1.1 martin
628 1.1 martin static void
629 1.1 martin dwc_gmac_free_dma_rings(struct dwc_gmac_softc *sc)
630 1.1 martin {
631 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map, 0,
632 1.1 martin sc->sc_dma_ring_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
633 1.1 martin bus_dmamap_unload(sc->sc_dmat, sc->sc_dma_ring_map);
634 1.1 martin bus_dmamem_unmap(sc->sc_dmat, sc->sc_rxq.r_desc,
635 1.1 martin AWGE_TOTAL_RING_COUNT * sizeof(struct dwc_gmac_dev_dmadesc));
636 1.1 martin bus_dmamem_free(sc->sc_dmat, &sc->sc_dma_ring_seg, 1);
637 1.1 martin }
638 1.1 martin
639 1.1 martin static void
640 1.1 martin dwc_gmac_free_rx_ring(struct dwc_gmac_softc *sc, struct dwc_gmac_rx_ring *ring)
641 1.1 martin {
642 1.1 martin struct dwc_gmac_rx_data *data;
643 1.1 martin int i;
644 1.1 martin
645 1.1 martin if (ring->r_desc == NULL)
646 1.1 martin return;
647 1.1 martin
648 1.1 martin
649 1.1 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
650 1.1 martin data = &ring->r_data[i];
651 1.1 martin
652 1.1 martin if (data->rd_map != NULL) {
653 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
654 1.1 martin AWGE_RX_RING_COUNT
655 1.1 martin *sizeof(struct dwc_gmac_dev_dmadesc),
656 1.1 martin BUS_DMASYNC_POSTREAD);
657 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
658 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->rd_map);
659 1.1 martin }
660 1.1 martin if (data->rd_m != NULL)
661 1.1 martin m_freem(data->rd_m);
662 1.1 martin }
663 1.1 martin }
664 1.1 martin
665 1.1 martin static int
666 1.1 martin dwc_gmac_alloc_tx_ring(struct dwc_gmac_softc *sc,
667 1.1 martin struct dwc_gmac_tx_ring *ring)
668 1.1 martin {
669 1.1 martin int i, error = 0;
670 1.1 martin
671 1.1 martin ring->t_queued = 0;
672 1.1 martin ring->t_cur = ring->t_next = 0;
673 1.1 martin
674 1.1 martin memset(ring->t_desc, 0, AWGE_TX_RING_COUNT*sizeof(*ring->t_desc));
675 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
676 1.1 martin TX_DESC_OFFSET(0),
677 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
678 1.1 martin BUS_DMASYNC_POSTWRITE);
679 1.1 martin
680 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
681 1.1 martin error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
682 1.1 martin AWGE_TX_RING_COUNT, MCLBYTES, 0,
683 1.50.2.1 christos BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
684 1.1 martin &ring->t_data[i].td_map);
685 1.1 martin if (error != 0) {
686 1.1 martin aprint_error_dev(sc->sc_dev,
687 1.1 martin "could not create TX DMA map #%d\n", i);
688 1.1 martin ring->t_data[i].td_map = NULL;
689 1.1 martin goto fail;
690 1.1 martin }
691 1.1 martin ring->t_desc[i].ddesc_next = htole32(
692 1.1 martin ring->t_physaddr + sizeof(struct dwc_gmac_dev_dmadesc)
693 1.8 martin *TX_NEXT(i));
694 1.1 martin }
695 1.1 martin
696 1.1 martin return 0;
697 1.1 martin
698 1.1 martin fail:
699 1.1 martin dwc_gmac_free_tx_ring(sc, ring);
700 1.1 martin return error;
701 1.1 martin }
702 1.1 martin
703 1.1 martin static void
704 1.1 martin dwc_gmac_txdesc_sync(struct dwc_gmac_softc *sc, int start, int end, int ops)
705 1.1 martin {
706 1.50.2.3 martin /* 'end' is pointing one descriptor beyond the last we want to sync */
707 1.1 martin if (end > start) {
708 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
709 1.1 martin TX_DESC_OFFSET(start),
710 1.1 martin TX_DESC_OFFSET(end)-TX_DESC_OFFSET(start),
711 1.1 martin ops);
712 1.1 martin return;
713 1.1 martin }
714 1.1 martin /* sync from 'start' to end of ring */
715 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
716 1.1 martin TX_DESC_OFFSET(start),
717 1.31 jmcneill TX_DESC_OFFSET(AWGE_TX_RING_COUNT)-TX_DESC_OFFSET(start),
718 1.1 martin ops);
719 1.47 jmcneill if (TX_DESC_OFFSET(end) - TX_DESC_OFFSET(0) > 0) {
720 1.47 jmcneill /* sync from start of ring to 'end' */
721 1.47 jmcneill bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
722 1.47 jmcneill TX_DESC_OFFSET(0),
723 1.47 jmcneill TX_DESC_OFFSET(end)-TX_DESC_OFFSET(0),
724 1.47 jmcneill ops);
725 1.47 jmcneill }
726 1.1 martin }
727 1.1 martin
728 1.1 martin static void
729 1.1 martin dwc_gmac_reset_tx_ring(struct dwc_gmac_softc *sc,
730 1.1 martin struct dwc_gmac_tx_ring *ring)
731 1.1 martin {
732 1.1 martin int i;
733 1.1 martin
734 1.38 skrll mutex_enter(&ring->t_mtx);
735 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
736 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
737 1.1 martin
738 1.1 martin if (data->td_m != NULL) {
739 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
740 1.1 martin 0, data->td_active->dm_mapsize,
741 1.1 martin BUS_DMASYNC_POSTWRITE);
742 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
743 1.1 martin m_freem(data->td_m);
744 1.1 martin data->td_m = NULL;
745 1.1 martin }
746 1.1 martin }
747 1.1 martin
748 1.1 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
749 1.1 martin TX_DESC_OFFSET(0),
750 1.1 martin AWGE_TX_RING_COUNT*sizeof(struct dwc_gmac_dev_dmadesc),
751 1.50.2.1 christos BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
752 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
753 1.6 martin sc->sc_txq.t_physaddr);
754 1.1 martin
755 1.1 martin ring->t_queued = 0;
756 1.1 martin ring->t_cur = ring->t_next = 0;
757 1.38 skrll mutex_exit(&ring->t_mtx);
758 1.1 martin }
759 1.1 martin
760 1.1 martin static void
761 1.1 martin dwc_gmac_free_tx_ring(struct dwc_gmac_softc *sc,
762 1.1 martin struct dwc_gmac_tx_ring *ring)
763 1.1 martin {
764 1.1 martin int i;
765 1.1 martin
766 1.1 martin /* unload the maps */
767 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
768 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
769 1.1 martin
770 1.1 martin if (data->td_m != NULL) {
771 1.1 martin bus_dmamap_sync(sc->sc_dmat, data->td_active,
772 1.1 martin 0, data->td_map->dm_mapsize,
773 1.1 martin BUS_DMASYNC_POSTWRITE);
774 1.1 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
775 1.1 martin m_freem(data->td_m);
776 1.1 martin data->td_m = NULL;
777 1.1 martin }
778 1.1 martin }
779 1.1 martin
780 1.1 martin /* and actually free them */
781 1.1 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
782 1.1 martin struct dwc_gmac_tx_data *data = &ring->t_data[i];
783 1.1 martin
784 1.1 martin bus_dmamap_destroy(sc->sc_dmat, data->td_map);
785 1.1 martin }
786 1.1 martin }
787 1.1 martin
788 1.1 martin static void
789 1.1 martin dwc_gmac_miibus_statchg(struct ifnet *ifp)
790 1.1 martin {
791 1.1 martin struct dwc_gmac_softc * const sc = ifp->if_softc;
792 1.1 martin struct mii_data * const mii = &sc->sc_mii;
793 1.25 jmcneill uint32_t conf, flow;
794 1.1 martin
795 1.1 martin /*
796 1.1 martin * Set MII or GMII interface based on the speed
797 1.38 skrll * negotiated by the PHY.
798 1.9 martin */
799 1.9 martin conf = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_CONF);
800 1.50.2.1 christos conf &= ~(AWIN_GMAC_MAC_CONF_FES100 | AWIN_GMAC_MAC_CONF_MIISEL
801 1.50.2.1 christos | AWIN_GMAC_MAC_CONF_FULLDPLX);
802 1.11 martin conf |= AWIN_GMAC_MAC_CONF_FRAMEBURST
803 1.11 martin | AWIN_GMAC_MAC_CONF_DISABLERXOWN
804 1.25 jmcneill | AWIN_GMAC_MAC_CONF_DISABLEJABBER
805 1.25 jmcneill | AWIN_GMAC_MAC_CONF_ACS
806 1.11 martin | AWIN_GMAC_MAC_CONF_RXENABLE
807 1.11 martin | AWIN_GMAC_MAC_CONF_TXENABLE;
808 1.1 martin switch (IFM_SUBTYPE(mii->mii_media_active)) {
809 1.1 martin case IFM_10_T:
810 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_MIISEL;
811 1.9 martin break;
812 1.1 martin case IFM_100_TX:
813 1.12 jmcneill conf |= AWIN_GMAC_MAC_CONF_FES100 |
814 1.12 jmcneill AWIN_GMAC_MAC_CONF_MIISEL;
815 1.1 martin break;
816 1.1 martin case IFM_1000_T:
817 1.1 martin break;
818 1.1 martin }
819 1.46 jmcneill if (sc->sc_set_speed)
820 1.46 jmcneill sc->sc_set_speed(sc, IFM_SUBTYPE(mii->mii_media_active));
821 1.25 jmcneill
822 1.25 jmcneill flow = 0;
823 1.25 jmcneill if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) {
824 1.9 martin conf |= AWIN_GMAC_MAC_CONF_FULLDPLX;
825 1.25 jmcneill flow |= __SHIFTIN(0x200, AWIN_GMAC_MAC_FLOWCTRL_PAUSE);
826 1.25 jmcneill }
827 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_TXPAUSE) {
828 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_TFE;
829 1.25 jmcneill }
830 1.25 jmcneill if (mii->mii_media_active & IFM_ETH_RXPAUSE) {
831 1.25 jmcneill flow |= AWIN_GMAC_MAC_FLOWCTRL_RFE;
832 1.25 jmcneill }
833 1.25 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh,
834 1.25 jmcneill AWIN_GMAC_MAC_FLOWCTRL, flow);
835 1.9 martin
836 1.9 martin #ifdef DWC_GMAC_DEBUG
837 1.9 martin aprint_normal_dev(sc->sc_dev,
838 1.9 martin "setting MAC conf register: %08x\n", conf);
839 1.9 martin #endif
840 1.9 martin
841 1.9 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
842 1.9 martin AWIN_GMAC_MAC_CONF, conf);
843 1.1 martin }
844 1.1 martin
845 1.1 martin static int
846 1.1 martin dwc_gmac_init(struct ifnet *ifp)
847 1.1 martin {
848 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
849 1.38 skrll
850 1.38 skrll mutex_enter(sc->sc_lock);
851 1.38 skrll int ret = dwc_gmac_init_locked(ifp);
852 1.38 skrll mutex_exit(sc->sc_lock);
853 1.38 skrll
854 1.38 skrll return ret;
855 1.38 skrll }
856 1.38 skrll
857 1.38 skrll static int
858 1.38 skrll dwc_gmac_init_locked(struct ifnet *ifp)
859 1.38 skrll {
860 1.38 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
861 1.13 jmcneill uint32_t ffilt;
862 1.1 martin
863 1.1 martin if (ifp->if_flags & IFF_RUNNING)
864 1.1 martin return 0;
865 1.1 martin
866 1.38 skrll dwc_gmac_stop_locked(ifp, 0);
867 1.1 martin
868 1.1 martin /*
869 1.11 martin * Configure DMA burst/transfer mode and RX/TX priorities.
870 1.11 martin * XXX - the GMAC_BUSMODE_PRIORXTX bits are undocumented.
871 1.11 martin */
872 1.11 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE,
873 1.25 jmcneill GMAC_BUSMODE_FIXEDBURST | GMAC_BUSMODE_4PBL |
874 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_RPBL) |
875 1.25 jmcneill __SHIFTIN(2, GMAC_BUSMODE_PBL));
876 1.11 martin
877 1.11 martin /*
878 1.13 jmcneill * Set up address filter
879 1.11 martin */
880 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
881 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
882 1.13 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_PR;
883 1.20 jmcneill } else {
884 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_PR;
885 1.20 jmcneill }
886 1.20 jmcneill if (ifp->if_flags & IFF_BROADCAST) {
887 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_DBF;
888 1.20 jmcneill } else {
889 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_DBF;
890 1.20 jmcneill }
891 1.13 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
892 1.11 martin
893 1.11 martin /*
894 1.20 jmcneill * Set up multicast filter
895 1.20 jmcneill */
896 1.20 jmcneill dwc_gmac_setmulti(sc);
897 1.20 jmcneill
898 1.20 jmcneill /*
899 1.6 martin * Set up dma pointer for RX and TX ring
900 1.1 martin */
901 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR,
902 1.6 martin sc->sc_rxq.r_physaddr);
903 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR,
904 1.6 martin sc->sc_txq.t_physaddr);
905 1.6 martin
906 1.6 martin /*
907 1.10 martin * Start RX/TX part
908 1.6 martin */
909 1.46 jmcneill uint32_t opmode = GMAC_DMA_OP_RXSTART | GMAC_DMA_OP_TXSTART;
910 1.46 jmcneill if ((sc->sc_flags & DWC_GMAC_FORCE_THRESH_DMA_MODE) == 0) {
911 1.46 jmcneill opmode |= GMAC_DMA_OP_RXSTOREFORWARD | GMAC_DMA_OP_TXSTOREFORWARD;
912 1.46 jmcneill }
913 1.46 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE, opmode);
914 1.1 martin
915 1.38 skrll sc->sc_stopping = false;
916 1.38 skrll
917 1.1 martin ifp->if_flags |= IFF_RUNNING;
918 1.1 martin ifp->if_flags &= ~IFF_OACTIVE;
919 1.1 martin
920 1.1 martin return 0;
921 1.1 martin }
922 1.1 martin
923 1.1 martin static void
924 1.1 martin dwc_gmac_start(struct ifnet *ifp)
925 1.1 martin {
926 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
927 1.45 martin #ifdef DWCGMAC_MPSAFE
928 1.43 ozaki KASSERT(if_is_mpsafe(ifp));
929 1.45 martin #endif
930 1.38 skrll
931 1.38 skrll mutex_enter(sc->sc_lock);
932 1.38 skrll if (!sc->sc_stopping) {
933 1.38 skrll mutex_enter(&sc->sc_txq.t_mtx);
934 1.38 skrll dwc_gmac_start_locked(ifp);
935 1.38 skrll mutex_exit(&sc->sc_txq.t_mtx);
936 1.38 skrll }
937 1.38 skrll mutex_exit(sc->sc_lock);
938 1.38 skrll }
939 1.38 skrll
940 1.38 skrll static void
941 1.38 skrll dwc_gmac_start_locked(struct ifnet *ifp)
942 1.38 skrll {
943 1.38 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
944 1.1 martin int old = sc->sc_txq.t_queued;
945 1.30 martin int start = sc->sc_txq.t_cur;
946 1.1 martin struct mbuf *m0;
947 1.1 martin
948 1.1 martin if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
949 1.1 martin return;
950 1.1 martin
951 1.1 martin for (;;) {
952 1.1 martin IFQ_POLL(&ifp->if_snd, m0);
953 1.1 martin if (m0 == NULL)
954 1.1 martin break;
955 1.1 martin if (dwc_gmac_queue(sc, m0) != 0) {
956 1.1 martin ifp->if_flags |= IFF_OACTIVE;
957 1.1 martin break;
958 1.1 martin }
959 1.1 martin IFQ_DEQUEUE(&ifp->if_snd, m0);
960 1.50 msaitoh bpf_mtap(ifp, m0, BPF_D_OUT);
961 1.32 martin if (sc->sc_txq.t_queued == AWGE_TX_RING_COUNT) {
962 1.32 martin ifp->if_flags |= IFF_OACTIVE;
963 1.32 martin break;
964 1.32 martin }
965 1.1 martin }
966 1.1 martin
967 1.1 martin if (sc->sc_txq.t_queued != old) {
968 1.1 martin /* packets have been queued, kick it off */
969 1.30 martin dwc_gmac_txdesc_sync(sc, start, sc->sc_txq.t_cur,
970 1.50.2.1 christos BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
971 1.10 martin
972 1.10 martin #ifdef DWC_GMAC_DEBUG
973 1.10 martin dwc_dump_status(sc);
974 1.10 martin #endif
975 1.50.2.1 christos bus_space_write_4(sc->sc_bst, sc->sc_bsh,
976 1.50.2.1 christos AWIN_GMAC_DMA_TXPOLL, ~0U);
977 1.1 martin }
978 1.1 martin }
979 1.1 martin
980 1.1 martin static void
981 1.1 martin dwc_gmac_stop(struct ifnet *ifp, int disable)
982 1.1 martin {
983 1.1 martin struct dwc_gmac_softc *sc = ifp->if_softc;
984 1.1 martin
985 1.38 skrll mutex_enter(sc->sc_lock);
986 1.38 skrll dwc_gmac_stop_locked(ifp, disable);
987 1.38 skrll mutex_exit(sc->sc_lock);
988 1.38 skrll }
989 1.38 skrll
990 1.38 skrll static void
991 1.38 skrll dwc_gmac_stop_locked(struct ifnet *ifp, int disable)
992 1.38 skrll {
993 1.38 skrll struct dwc_gmac_softc *sc = ifp->if_softc;
994 1.38 skrll
995 1.38 skrll sc->sc_stopping = true;
996 1.38 skrll
997 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
998 1.6 martin AWIN_GMAC_DMA_OPMODE,
999 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1000 1.50.2.1 christos AWIN_GMAC_DMA_OPMODE)
1001 1.50.2.1 christos & ~(GMAC_DMA_OP_TXSTART | GMAC_DMA_OP_RXSTART));
1002 1.6 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1003 1.6 martin AWIN_GMAC_DMA_OPMODE,
1004 1.6 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1005 1.50.2.1 christos AWIN_GMAC_DMA_OPMODE) | GMAC_DMA_OP_FLUSHTX);
1006 1.6 martin
1007 1.1 martin mii_down(&sc->sc_mii);
1008 1.1 martin dwc_gmac_reset_tx_ring(sc, &sc->sc_txq);
1009 1.1 martin dwc_gmac_reset_rx_ring(sc, &sc->sc_rxq);
1010 1.48 jmcneill
1011 1.48 jmcneill ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1012 1.1 martin }
1013 1.1 martin
1014 1.1 martin /*
1015 1.1 martin * Add m0 to the TX ring
1016 1.1 martin */
1017 1.1 martin static int
1018 1.1 martin dwc_gmac_queue(struct dwc_gmac_softc *sc, struct mbuf *m0)
1019 1.1 martin {
1020 1.1 martin struct dwc_gmac_dev_dmadesc *desc = NULL;
1021 1.1 martin struct dwc_gmac_tx_data *data = NULL;
1022 1.1 martin bus_dmamap_t map;
1023 1.1 martin int error, i, first;
1024 1.1 martin
1025 1.8 martin #ifdef DWC_GMAC_DEBUG
1026 1.8 martin aprint_normal_dev(sc->sc_dev,
1027 1.8 martin "dwc_gmac_queue: adding mbuf chain %p\n", m0);
1028 1.8 martin #endif
1029 1.8 martin
1030 1.1 martin first = sc->sc_txq.t_cur;
1031 1.1 martin map = sc->sc_txq.t_data[first].td_map;
1032 1.1 martin
1033 1.1 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0,
1034 1.50.2.1 christos BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1035 1.1 martin if (error != 0) {
1036 1.1 martin aprint_error_dev(sc->sc_dev, "could not map mbuf "
1037 1.1 martin "(len: %d, error %d)\n", m0->m_pkthdr.len, error);
1038 1.1 martin return error;
1039 1.1 martin }
1040 1.1 martin
1041 1.32 martin if (sc->sc_txq.t_queued + map->dm_nsegs > AWGE_TX_RING_COUNT) {
1042 1.1 martin bus_dmamap_unload(sc->sc_dmat, map);
1043 1.1 martin return ENOBUFS;
1044 1.1 martin }
1045 1.1 martin
1046 1.1 martin for (i = 0; i < map->dm_nsegs; i++) {
1047 1.1 martin data = &sc->sc_txq.t_data[sc->sc_txq.t_cur];
1048 1.8 martin desc = &sc->sc_txq.t_desc[sc->sc_txq.t_cur];
1049 1.8 martin
1050 1.8 martin desc->ddesc_data = htole32(map->dm_segs[i].ds_addr);
1051 1.7 martin
1052 1.7 martin #ifdef DWC_GMAC_DEBUG
1053 1.7 martin aprint_normal_dev(sc->sc_dev, "enqueing desc #%d data %08lx "
1054 1.50.2.1 christos "len %lu\n", sc->sc_txq.t_cur,
1055 1.7 martin (unsigned long)map->dm_segs[i].ds_addr,
1056 1.50.2.1 christos (unsigned long)map->dm_segs[i].ds_len);
1057 1.7 martin #endif
1058 1.7 martin
1059 1.50.2.1 christos sc->sc_descm->tx_init_flags(desc);
1060 1.50.2.1 christos sc->sc_descm->tx_set_len(desc, map->dm_segs[i].ds_len);
1061 1.50.2.1 christos
1062 1.50.2.1 christos if (i == 0)
1063 1.50.2.1 christos sc->sc_descm->tx_set_first_frag(desc);
1064 1.1 martin
1065 1.1 martin /*
1066 1.1 martin * Defer passing ownership of the first descriptor
1067 1.23 joerg * until we are done.
1068 1.1 martin */
1069 1.50.2.1 christos if (i != 0)
1070 1.50.2.1 christos sc->sc_descm->tx_set_owned_by_dev(desc);
1071 1.8 martin
1072 1.6 martin sc->sc_txq.t_queued++;
1073 1.8 martin sc->sc_txq.t_cur = TX_NEXT(sc->sc_txq.t_cur);
1074 1.1 martin }
1075 1.1 martin
1076 1.50.2.1 christos sc->sc_descm->tx_set_last_frag(desc);
1077 1.1 martin
1078 1.1 martin data->td_m = m0;
1079 1.1 martin data->td_active = map;
1080 1.1 martin
1081 1.1 martin bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1082 1.34 jmcneill BUS_DMASYNC_PREWRITE);
1083 1.1 martin
1084 1.32 martin /* Pass first to device */
1085 1.50.2.1 christos sc->sc_descm->tx_set_owned_by_dev(&sc->sc_txq.t_desc[first]);
1086 1.50.2.1 christos
1087 1.50.2.1 christos bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1088 1.50.2.1 christos BUS_DMASYNC_PREWRITE);
1089 1.32 martin
1090 1.1 martin return 0;
1091 1.1 martin }
1092 1.1 martin
1093 1.22 martin /*
1094 1.22 martin * If the interface is up and running, only modify the receive
1095 1.22 martin * filter when setting promiscuous or debug mode. Otherwise fall
1096 1.22 martin * through to ether_ioctl, which will reset the chip.
1097 1.22 martin */
1098 1.22 martin static int
1099 1.22 martin dwc_gmac_ifflags_cb(struct ethercom *ec)
1100 1.22 martin {
1101 1.22 martin struct ifnet *ifp = &ec->ec_if;
1102 1.22 martin struct dwc_gmac_softc *sc = ifp->if_softc;
1103 1.38 skrll int ret = 0;
1104 1.38 skrll
1105 1.38 skrll mutex_enter(sc->sc_lock);
1106 1.50.2.3 martin u_short change = ifp->if_flags ^ sc->sc_if_flags;
1107 1.38 skrll sc->sc_if_flags = ifp->if_flags;
1108 1.22 martin
1109 1.50.2.1 christos if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
1110 1.38 skrll ret = ENETRESET;
1111 1.38 skrll goto out;
1112 1.38 skrll }
1113 1.38 skrll if ((change & IFF_PROMISC) != 0) {
1114 1.22 martin dwc_gmac_setmulti(sc);
1115 1.38 skrll }
1116 1.38 skrll out:
1117 1.38 skrll mutex_exit(sc->sc_lock);
1118 1.38 skrll
1119 1.38 skrll return ret;
1120 1.22 martin }
1121 1.22 martin
1122 1.1 martin static int
1123 1.1 martin dwc_gmac_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1124 1.1 martin {
1125 1.20 jmcneill struct dwc_gmac_softc *sc = ifp->if_softc;
1126 1.38 skrll int error = 0;
1127 1.38 skrll
1128 1.38 skrll int s = splnet();
1129 1.38 skrll error = ether_ioctl(ifp, cmd, data);
1130 1.1 martin
1131 1.38 skrll #ifdef DWCGMAC_MPSAFE
1132 1.38 skrll splx(s);
1133 1.38 skrll #endif
1134 1.1 martin
1135 1.38 skrll if (error == ENETRESET) {
1136 1.1 martin error = 0;
1137 1.1 martin if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1138 1.1 martin ;
1139 1.22 martin else if (ifp->if_flags & IFF_RUNNING) {
1140 1.22 martin /*
1141 1.22 martin * Multicast list has changed; set the hardware filter
1142 1.22 martin * accordingly.
1143 1.22 martin */
1144 1.38 skrll mutex_enter(sc->sc_lock);
1145 1.20 jmcneill dwc_gmac_setmulti(sc);
1146 1.38 skrll mutex_exit(sc->sc_lock);
1147 1.22 martin }
1148 1.1 martin }
1149 1.1 martin
1150 1.22 martin /* Try to get things going again */
1151 1.22 martin if (ifp->if_flags & IFF_UP)
1152 1.22 martin dwc_gmac_start(ifp);
1153 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1154 1.38 skrll
1155 1.38 skrll #ifndef DWCGMAC_MPSAFE
1156 1.1 martin splx(s);
1157 1.38 skrll #endif
1158 1.38 skrll
1159 1.1 martin return error;
1160 1.1 martin }
1161 1.1 martin
1162 1.8 martin static void
1163 1.8 martin dwc_gmac_tx_intr(struct dwc_gmac_softc *sc)
1164 1.8 martin {
1165 1.32 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
1166 1.8 martin struct dwc_gmac_tx_data *data;
1167 1.8 martin struct dwc_gmac_dev_dmadesc *desc;
1168 1.32 martin int i, nsegs;
1169 1.8 martin
1170 1.38 skrll mutex_enter(&sc->sc_txq.t_mtx);
1171 1.38 skrll
1172 1.32 martin for (i = sc->sc_txq.t_next; sc->sc_txq.t_queued > 0; i = TX_NEXT(i)) {
1173 1.8 martin #ifdef DWC_GMAC_DEBUG
1174 1.8 martin aprint_normal_dev(sc->sc_dev,
1175 1.8 martin "dwc_gmac_tx_intr: checking desc #%d (t_queued: %d)\n",
1176 1.8 martin i, sc->sc_txq.t_queued);
1177 1.8 martin #endif
1178 1.8 martin
1179 1.26 martin /*
1180 1.26 martin * i+1 does not need to be a valid descriptor,
1181 1.26 martin * this is just a special notion to just sync
1182 1.26 martin * a single tx descriptor (i)
1183 1.26 martin */
1184 1.26 martin dwc_gmac_txdesc_sync(sc, i, i+1,
1185 1.50.2.1 christos BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1186 1.11 martin
1187 1.32 martin desc = &sc->sc_txq.t_desc[i];
1188 1.50.2.1 christos if (sc->sc_descm->tx_is_owned_by_dev(desc))
1189 1.8 martin break;
1190 1.11 martin
1191 1.8 martin data = &sc->sc_txq.t_data[i];
1192 1.8 martin if (data->td_m == NULL)
1193 1.8 martin continue;
1194 1.32 martin
1195 1.50.2.2 martin if_statinc(ifp, if_opackets);
1196 1.32 martin nsegs = data->td_active->dm_nsegs;
1197 1.8 martin bus_dmamap_sync(sc->sc_dmat, data->td_active, 0,
1198 1.8 martin data->td_active->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1199 1.8 martin bus_dmamap_unload(sc->sc_dmat, data->td_active);
1200 1.8 martin
1201 1.8 martin #ifdef DWC_GMAC_DEBUG
1202 1.8 martin aprint_normal_dev(sc->sc_dev,
1203 1.8 martin "dwc_gmac_tx_intr: done with packet at desc #%d, "
1204 1.8 martin "freeing mbuf %p\n", i, data->td_m);
1205 1.8 martin #endif
1206 1.8 martin
1207 1.8 martin m_freem(data->td_m);
1208 1.8 martin data->td_m = NULL;
1209 1.32 martin
1210 1.32 martin sc->sc_txq.t_queued -= nsegs;
1211 1.8 martin }
1212 1.8 martin
1213 1.8 martin sc->sc_txq.t_next = i;
1214 1.8 martin
1215 1.8 martin if (sc->sc_txq.t_queued < AWGE_TX_RING_COUNT) {
1216 1.32 martin ifp->if_flags &= ~IFF_OACTIVE;
1217 1.8 martin }
1218 1.38 skrll mutex_exit(&sc->sc_txq.t_mtx);
1219 1.8 martin }
1220 1.8 martin
1221 1.8 martin static void
1222 1.8 martin dwc_gmac_rx_intr(struct dwc_gmac_softc *sc)
1223 1.8 martin {
1224 1.11 martin struct ifnet *ifp = &sc->sc_ec.ec_if;
1225 1.11 martin struct dwc_gmac_dev_dmadesc *desc;
1226 1.11 martin struct dwc_gmac_rx_data *data;
1227 1.11 martin bus_addr_t physaddr;
1228 1.11 martin struct mbuf *m, *mnew;
1229 1.11 martin int i, len, error;
1230 1.11 martin
1231 1.38 skrll mutex_enter(&sc->sc_rxq.r_mtx);
1232 1.11 martin for (i = sc->sc_rxq.r_cur; ; i = RX_NEXT(i)) {
1233 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1234 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1235 1.50.2.1 christos BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1236 1.11 martin desc = &sc->sc_rxq.r_desc[i];
1237 1.11 martin data = &sc->sc_rxq.r_data[i];
1238 1.11 martin
1239 1.50.2.1 christos if (sc->sc_descm->rx_is_owned_by_dev(desc))
1240 1.11 martin break;
1241 1.11 martin
1242 1.50.2.1 christos if (sc->sc_descm->rx_has_error(desc)) {
1243 1.11 martin #ifdef DWC_GMAC_DEBUG
1244 1.15 martin aprint_normal_dev(sc->sc_dev,
1245 1.15 martin "RX error: descriptor status %08x, skipping\n",
1246 1.50.2.1 christos le32toh(desc->ddesc_status0));
1247 1.11 martin #endif
1248 1.50.2.2 martin if_statinc(ifp, if_ierrors);
1249 1.11 martin goto skip;
1250 1.11 martin }
1251 1.11 martin
1252 1.50.2.1 christos len = sc->sc_descm->rx_get_len(desc);
1253 1.11 martin
1254 1.11 martin #ifdef DWC_GMAC_DEBUG
1255 1.15 martin aprint_normal_dev(sc->sc_dev,
1256 1.15 martin "rx int: device is done with descriptor #%d, len: %d\n",
1257 1.15 martin i, len);
1258 1.11 martin #endif
1259 1.11 martin
1260 1.11 martin /*
1261 1.11 martin * Try to get a new mbuf before passing this one
1262 1.11 martin * up, if that fails, drop the packet and reuse
1263 1.11 martin * the existing one.
1264 1.11 martin */
1265 1.11 martin MGETHDR(mnew, M_DONTWAIT, MT_DATA);
1266 1.11 martin if (mnew == NULL) {
1267 1.50.2.2 martin if_statinc(ifp, if_ierrors);
1268 1.11 martin goto skip;
1269 1.11 martin }
1270 1.11 martin MCLGET(mnew, M_DONTWAIT);
1271 1.11 martin if ((mnew->m_flags & M_EXT) == 0) {
1272 1.11 martin m_freem(mnew);
1273 1.50.2.2 martin if_statinc(ifp, if_ierrors);
1274 1.11 martin goto skip;
1275 1.11 martin }
1276 1.50.2.3 martin mnew->m_len = mnew->m_pkthdr.len = mnew->m_ext.ext_size;
1277 1.50.2.3 martin if (mnew->m_len > AWGE_MAX_PACKET) {
1278 1.50.2.3 martin mnew->m_len = mnew->m_pkthdr.len = AWGE_MAX_PACKET;
1279 1.50.2.3 martin }
1280 1.11 martin
1281 1.11 martin /* unload old DMA map */
1282 1.11 martin bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1283 1.11 martin data->rd_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1284 1.11 martin bus_dmamap_unload(sc->sc_dmat, data->rd_map);
1285 1.11 martin
1286 1.11 martin /* and reload with new mbuf */
1287 1.50.2.3 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1288 1.50.2.3 martin mnew, BUS_DMA_READ | BUS_DMA_NOWAIT);
1289 1.11 martin if (error != 0) {
1290 1.11 martin m_freem(mnew);
1291 1.11 martin /* try to reload old mbuf */
1292 1.50.2.3 martin error = bus_dmamap_load_mbuf(sc->sc_dmat, data->rd_map,
1293 1.50.2.3 martin data->rd_m, BUS_DMA_READ | BUS_DMA_NOWAIT);
1294 1.11 martin if (error != 0) {
1295 1.11 martin panic("%s: could not load old rx mbuf",
1296 1.11 martin device_xname(sc->sc_dev));
1297 1.11 martin }
1298 1.50.2.2 martin if_statinc(ifp, if_ierrors);
1299 1.11 martin goto skip;
1300 1.11 martin }
1301 1.11 martin physaddr = data->rd_map->dm_segs[0].ds_addr;
1302 1.11 martin
1303 1.11 martin /*
1304 1.11 martin * New mbuf loaded, update RX ring and continue
1305 1.11 martin */
1306 1.11 martin m = data->rd_m;
1307 1.11 martin data->rd_m = mnew;
1308 1.11 martin desc->ddesc_data = htole32(physaddr);
1309 1.11 martin
1310 1.11 martin /* finalize mbuf */
1311 1.11 martin m->m_pkthdr.len = m->m_len = len;
1312 1.36 ozaki m_set_rcvif(m, ifp);
1313 1.19 matt m->m_flags |= M_HASFCS;
1314 1.11 martin
1315 1.39 skrll if_percpuq_enqueue(sc->sc_ipq, m);
1316 1.11 martin
1317 1.11 martin skip:
1318 1.27 matt bus_dmamap_sync(sc->sc_dmat, data->rd_map, 0,
1319 1.27 matt data->rd_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1320 1.50.2.1 christos
1321 1.50.2.1 christos sc->sc_descm->rx_init_flags(desc);
1322 1.50.2.3 martin sc->sc_descm->rx_set_len(desc, data->rd_m->m_len);
1323 1.50.2.1 christos sc->sc_descm->rx_set_owned_by_dev(desc);
1324 1.50.2.1 christos
1325 1.11 martin bus_dmamap_sync(sc->sc_dmat, sc->sc_dma_ring_map,
1326 1.11 martin RX_DESC_OFFSET(i), sizeof(*desc),
1327 1.50.2.1 christos BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1328 1.11 martin }
1329 1.11 martin
1330 1.11 martin /* update RX pointer */
1331 1.11 martin sc->sc_rxq.r_cur = i;
1332 1.11 martin
1333 1.38 skrll mutex_exit(&sc->sc_rxq.r_mtx);
1334 1.8 martin }
1335 1.8 martin
1336 1.22 martin /*
1337 1.24 skrll * Reverse order of bits - http://aggregate.org/MAGIC/#Bit%20Reversal
1338 1.22 martin */
1339 1.22 martin static uint32_t
1340 1.22 martin bitrev32(uint32_t x)
1341 1.22 martin {
1342 1.22 martin x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
1343 1.22 martin x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
1344 1.22 martin x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
1345 1.22 martin x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
1346 1.22 martin
1347 1.22 martin return (x >> 16) | (x << 16);
1348 1.22 martin }
1349 1.22 martin
1350 1.20 jmcneill static void
1351 1.20 jmcneill dwc_gmac_setmulti(struct dwc_gmac_softc *sc)
1352 1.20 jmcneill {
1353 1.20 jmcneill struct ifnet * const ifp = &sc->sc_ec.ec_if;
1354 1.20 jmcneill struct ether_multi *enm;
1355 1.20 jmcneill struct ether_multistep step;
1356 1.50.2.1 christos struct ethercom *ec = &sc->sc_ec;
1357 1.20 jmcneill uint32_t hashes[2] = { 0, 0 };
1358 1.22 martin uint32_t ffilt, h;
1359 1.38 skrll int mcnt;
1360 1.22 martin
1361 1.38 skrll KASSERT(mutex_owned(sc->sc_lock));
1362 1.20 jmcneill
1363 1.20 jmcneill ffilt = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT);
1364 1.38 skrll
1365 1.20 jmcneill if (ifp->if_flags & IFF_PROMISC) {
1366 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PR;
1367 1.22 martin goto special_filter;
1368 1.20 jmcneill }
1369 1.20 jmcneill
1370 1.50.2.1 christos ffilt &= ~(AWIN_GMAC_MAC_FFILT_PM | AWIN_GMAC_MAC_FFILT_PR);
1371 1.20 jmcneill
1372 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW, 0);
1373 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH, 0);
1374 1.20 jmcneill
1375 1.50.2.1 christos ETHER_LOCK(ec);
1376 1.50.2.1 christos ec->ec_flags &= ~ETHER_F_ALLMULTI;
1377 1.50.2.1 christos ETHER_FIRST_MULTI(step, ec, enm);
1378 1.20 jmcneill mcnt = 0;
1379 1.20 jmcneill while (enm != NULL) {
1380 1.20 jmcneill if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1381 1.22 martin ETHER_ADDR_LEN) != 0) {
1382 1.22 martin ffilt |= AWIN_GMAC_MAC_FFILT_PM;
1383 1.50.2.1 christos ec->ec_flags |= ETHER_F_ALLMULTI;
1384 1.50.2.1 christos ETHER_UNLOCK(ec);
1385 1.22 martin goto special_filter;
1386 1.22 martin }
1387 1.20 jmcneill
1388 1.22 martin h = bitrev32(
1389 1.22 martin ~ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN)
1390 1.22 martin ) >> 26;
1391 1.20 jmcneill hashes[h >> 5] |= (1 << (h & 0x1f));
1392 1.20 jmcneill
1393 1.20 jmcneill mcnt++;
1394 1.20 jmcneill ETHER_NEXT_MULTI(step, enm);
1395 1.20 jmcneill }
1396 1.50.2.1 christos ETHER_UNLOCK(ec);
1397 1.20 jmcneill
1398 1.20 jmcneill if (mcnt)
1399 1.20 jmcneill ffilt |= AWIN_GMAC_MAC_FFILT_HMC;
1400 1.20 jmcneill else
1401 1.20 jmcneill ffilt &= ~AWIN_GMAC_MAC_FFILT_HMC;
1402 1.20 jmcneill
1403 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT, ffilt);
1404 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1405 1.20 jmcneill hashes[0]);
1406 1.20 jmcneill bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1407 1.20 jmcneill hashes[1]);
1408 1.50.2.1 christos sc->sc_if_flags = ifp->if_flags;
1409 1.22 martin
1410 1.22 martin #ifdef DWC_GMAC_DEBUG
1411 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1412 1.22 martin #endif
1413 1.22 martin return;
1414 1.22 martin
1415 1.22 martin special_filter:
1416 1.22 martin #ifdef DWC_GMAC_DEBUG
1417 1.22 martin dwc_gmac_dump_ffilt(sc, ffilt);
1418 1.22 martin #endif
1419 1.22 martin /* no MAC hashes, ALLMULTI or PROMISC */
1420 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT,
1421 1.22 martin ffilt);
1422 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTLOW,
1423 1.22 martin 0xffffffff);
1424 1.22 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_HTHIGH,
1425 1.22 martin 0xffffffff);
1426 1.22 martin sc->sc_if_flags = sc->sc_ec.ec_if.if_flags;
1427 1.20 jmcneill }
1428 1.20 jmcneill
1429 1.1 martin int
1430 1.1 martin dwc_gmac_intr(struct dwc_gmac_softc *sc)
1431 1.1 martin {
1432 1.1 martin uint32_t status, dma_status;
1433 1.8 martin int rv = 0;
1434 1.1 martin
1435 1.38 skrll if (sc->sc_stopping)
1436 1.38 skrll return 0;
1437 1.38 skrll
1438 1.1 martin status = bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_INTR);
1439 1.2 martin if (status & AWIN_GMAC_MII_IRQ) {
1440 1.1 martin (void)bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1441 1.1 martin AWIN_GMAC_MII_STATUS);
1442 1.8 martin rv = 1;
1443 1.2 martin mii_pollstat(&sc->sc_mii);
1444 1.2 martin }
1445 1.1 martin
1446 1.1 martin dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1447 1.1 martin AWIN_GMAC_DMA_STATUS);
1448 1.1 martin
1449 1.50.2.1 christos if (dma_status & (GMAC_DMA_INT_NIE | GMAC_DMA_INT_AIE))
1450 1.8 martin rv = 1;
1451 1.1 martin
1452 1.8 martin if (dma_status & GMAC_DMA_INT_TIE)
1453 1.8 martin dwc_gmac_tx_intr(sc);
1454 1.1 martin
1455 1.8 martin if (dma_status & GMAC_DMA_INT_RIE)
1456 1.8 martin dwc_gmac_rx_intr(sc);
1457 1.8 martin
1458 1.8 martin /*
1459 1.8 martin * Check error conditions
1460 1.8 martin */
1461 1.8 martin if (dma_status & GMAC_DMA_INT_ERRORS) {
1462 1.50.2.2 martin if_statinc(&sc->sc_ec.ec_if, if_oerrors);
1463 1.8 martin #ifdef DWC_GMAC_DEBUG
1464 1.8 martin dwc_dump_and_abort(sc, "interrupt error condition");
1465 1.8 martin #endif
1466 1.8 martin }
1467 1.8 martin
1468 1.50.2.3 martin rnd_add_uint32(&sc->rnd_source, dma_status);
1469 1.50.2.3 martin
1470 1.8 martin /* ack interrupt */
1471 1.8 martin if (dma_status)
1472 1.8 martin bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1473 1.8 martin AWIN_GMAC_DMA_STATUS, dma_status & GMAC_DMA_INT_MASK);
1474 1.8 martin
1475 1.28 martin /*
1476 1.28 martin * Get more packets
1477 1.28 martin */
1478 1.28 martin if (rv)
1479 1.40 ozaki if_schedule_deferred_start(&sc->sc_ec.ec_if);
1480 1.28 martin
1481 1.8 martin return rv;
1482 1.1 martin }
1483 1.7 martin
1484 1.50.2.1 christos static void
1485 1.50.2.1 christos dwc_gmac_desc_set_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1486 1.50.2.1 christos {
1487 1.50.2.1 christos
1488 1.50.2.1 christos desc->ddesc_status0 |= htole32(DDESC_STATUS_OWNEDBYDEV);
1489 1.50.2.1 christos }
1490 1.50.2.1 christos
1491 1.50.2.1 christos static int
1492 1.50.2.1 christos dwc_gmac_desc_is_owned_by_dev(struct dwc_gmac_dev_dmadesc *desc)
1493 1.50.2.1 christos {
1494 1.50.2.1 christos
1495 1.50.2.1 christos return !!(le32toh(desc->ddesc_status0) & DDESC_STATUS_OWNEDBYDEV);
1496 1.50.2.1 christos }
1497 1.50.2.1 christos
1498 1.50.2.1 christos static void
1499 1.50.2.1 christos dwc_gmac_desc_std_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1500 1.50.2.1 christos {
1501 1.50.2.1 christos uint32_t cntl = le32toh(desc->ddesc_cntl1);
1502 1.50.2.1 christos
1503 1.50.2.1 christos desc->ddesc_cntl1 = htole32((cntl & ~DDESC_CNTL_SIZE1MASK) |
1504 1.50.2.1 christos __SHIFTIN(len, DDESC_CNTL_SIZE1MASK));
1505 1.50.2.1 christos }
1506 1.50.2.1 christos
1507 1.50.2.1 christos static uint32_t
1508 1.50.2.1 christos dwc_gmac_desc_std_get_len(struct dwc_gmac_dev_dmadesc *desc)
1509 1.50.2.1 christos {
1510 1.50.2.1 christos
1511 1.50.2.1 christos return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_STATUS_FRMLENMSK);
1512 1.50.2.1 christos }
1513 1.50.2.1 christos
1514 1.50.2.1 christos static void
1515 1.50.2.1 christos dwc_gmac_desc_std_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1516 1.50.2.1 christos {
1517 1.50.2.1 christos
1518 1.50.2.1 christos desc->ddesc_status0 = 0;
1519 1.50.2.1 christos desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1520 1.50.2.1 christos }
1521 1.50.2.1 christos
1522 1.50.2.1 christos static void
1523 1.50.2.1 christos dwc_gmac_desc_std_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1524 1.50.2.1 christos {
1525 1.50.2.1 christos uint32_t cntl = le32toh(desc->ddesc_cntl1);
1526 1.50.2.1 christos
1527 1.50.2.1 christos desc->ddesc_cntl1 = htole32(cntl | DDESC_CNTL_TXFIRST);
1528 1.50.2.1 christos }
1529 1.50.2.1 christos
1530 1.50.2.1 christos static void
1531 1.50.2.1 christos dwc_gmac_desc_std_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1532 1.50.2.1 christos {
1533 1.50.2.1 christos uint32_t cntl = le32toh(desc->ddesc_cntl1);
1534 1.50.2.1 christos
1535 1.50.2.1 christos desc->ddesc_cntl1 = htole32(cntl |
1536 1.50.2.1 christos DDESC_CNTL_TXLAST | DDESC_CNTL_TXINT);
1537 1.50.2.1 christos }
1538 1.50.2.1 christos
1539 1.50.2.1 christos static void
1540 1.50.2.1 christos dwc_gmac_desc_std_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1541 1.50.2.1 christos {
1542 1.50.2.1 christos
1543 1.50.2.1 christos desc->ddesc_status0 = 0;
1544 1.50.2.1 christos desc->ddesc_cntl1 = htole32(DDESC_CNTL_TXCHAIN);
1545 1.50.2.1 christos }
1546 1.50.2.1 christos
1547 1.50.2.1 christos static int
1548 1.50.2.1 christos dwc_gmac_desc_std_rx_has_error(struct dwc_gmac_dev_dmadesc *desc) {
1549 1.50.2.1 christos return !!(le32toh(desc->ddesc_status0) &
1550 1.50.2.1 christos (DDESC_STATUS_RXERROR | DDESC_STATUS_RXTRUNCATED));
1551 1.50.2.1 christos }
1552 1.50.2.1 christos
1553 1.50.2.1 christos static void
1554 1.50.2.1 christos dwc_gmac_desc_enh_set_len(struct dwc_gmac_dev_dmadesc *desc, int len)
1555 1.50.2.1 christos {
1556 1.50.2.1 christos uint32_t tdes1 = le32toh(desc->ddesc_cntl1);
1557 1.50.2.1 christos
1558 1.50.2.1 christos desc->ddesc_cntl1 = htole32((tdes1 & ~DDESC_DES1_SIZE1MASK) |
1559 1.50.2.1 christos __SHIFTIN(len, DDESC_DES1_SIZE1MASK));
1560 1.50.2.1 christos }
1561 1.50.2.1 christos
1562 1.50.2.1 christos static uint32_t
1563 1.50.2.1 christos dwc_gmac_desc_enh_get_len(struct dwc_gmac_dev_dmadesc *desc)
1564 1.50.2.1 christos {
1565 1.50.2.1 christos
1566 1.50.2.1 christos return __SHIFTOUT(le32toh(desc->ddesc_status0), DDESC_RDES0_FL);
1567 1.50.2.1 christos }
1568 1.50.2.1 christos
1569 1.50.2.1 christos static void
1570 1.50.2.1 christos dwc_gmac_desc_enh_tx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1571 1.50.2.1 christos {
1572 1.50.2.1 christos
1573 1.50.2.1 christos desc->ddesc_status0 = htole32(DDESC_TDES0_TCH);
1574 1.50.2.1 christos desc->ddesc_cntl1 = 0;
1575 1.50.2.1 christos }
1576 1.50.2.1 christos
1577 1.50.2.1 christos static void
1578 1.50.2.1 christos dwc_gmac_desc_enh_tx_set_first_frag(struct dwc_gmac_dev_dmadesc *desc)
1579 1.50.2.1 christos {
1580 1.50.2.1 christos uint32_t tdes0 = le32toh(desc->ddesc_status0);
1581 1.50.2.1 christos
1582 1.50.2.1 christos desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_FS);
1583 1.50.2.1 christos }
1584 1.50.2.1 christos
1585 1.50.2.1 christos static void
1586 1.50.2.1 christos dwc_gmac_desc_enh_tx_set_last_frag(struct dwc_gmac_dev_dmadesc *desc)
1587 1.50.2.1 christos {
1588 1.50.2.1 christos uint32_t tdes0 = le32toh(desc->ddesc_status0);
1589 1.50.2.1 christos
1590 1.50.2.1 christos desc->ddesc_status0 = htole32(tdes0 | DDESC_TDES0_LS | DDESC_TDES0_IC);
1591 1.50.2.1 christos }
1592 1.50.2.1 christos
1593 1.50.2.1 christos static void
1594 1.50.2.1 christos dwc_gmac_desc_enh_rx_init_flags(struct dwc_gmac_dev_dmadesc *desc)
1595 1.50.2.1 christos {
1596 1.50.2.1 christos
1597 1.50.2.1 christos desc->ddesc_status0 = 0;
1598 1.50.2.1 christos desc->ddesc_cntl1 = htole32(DDESC_RDES1_RCH);
1599 1.50.2.1 christos }
1600 1.50.2.1 christos
1601 1.50.2.1 christos static int
1602 1.50.2.1 christos dwc_gmac_desc_enh_rx_has_error(struct dwc_gmac_dev_dmadesc *desc)
1603 1.50.2.1 christos {
1604 1.50.2.1 christos
1605 1.50.2.1 christos return !!(le32toh(desc->ddesc_status0) &
1606 1.50.2.1 christos (DDESC_RDES0_ES | DDESC_RDES0_LE));
1607 1.50.2.1 christos }
1608 1.50.2.1 christos
1609 1.7 martin #ifdef DWC_GMAC_DEBUG
1610 1.7 martin static void
1611 1.7 martin dwc_gmac_dump_dma(struct dwc_gmac_softc *sc)
1612 1.7 martin {
1613 1.7 martin aprint_normal_dev(sc->sc_dev, "busmode: %08x\n",
1614 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_BUSMODE));
1615 1.7 martin aprint_normal_dev(sc->sc_dev, "tx poll: %08x\n",
1616 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TXPOLL));
1617 1.7 martin aprint_normal_dev(sc->sc_dev, "rx poll: %08x\n",
1618 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RXPOLL));
1619 1.7 martin aprint_normal_dev(sc->sc_dev, "rx descriptors: %08x\n",
1620 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_RX_ADDR));
1621 1.7 martin aprint_normal_dev(sc->sc_dev, "tx descriptors: %08x\n",
1622 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_TX_ADDR));
1623 1.7 martin aprint_normal_dev(sc->sc_dev, "status: %08x\n",
1624 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_STATUS));
1625 1.7 martin aprint_normal_dev(sc->sc_dev, "op mode: %08x\n",
1626 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_OPMODE));
1627 1.7 martin aprint_normal_dev(sc->sc_dev, "int enable: %08x\n",
1628 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_INTENABLE));
1629 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx: %08x\n",
1630 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_DESC));
1631 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx: %08x\n",
1632 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_DESC));
1633 1.7 martin aprint_normal_dev(sc->sc_dev, "cur tx buffer: %08x\n",
1634 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_TX_BUFADDR));
1635 1.7 martin aprint_normal_dev(sc->sc_dev, "cur rx buffer: %08x\n",
1636 1.7 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_DMA_CUR_RX_BUFADDR));
1637 1.7 martin }
1638 1.7 martin
1639 1.7 martin static void
1640 1.7 martin dwc_gmac_dump_tx_desc(struct dwc_gmac_softc *sc)
1641 1.7 martin {
1642 1.7 martin int i;
1643 1.7 martin
1644 1.8 martin aprint_normal_dev(sc->sc_dev, "TX queue: cur=%d, next=%d, queued=%d\n",
1645 1.8 martin sc->sc_txq.t_cur, sc->sc_txq.t_next, sc->sc_txq.t_queued);
1646 1.8 martin aprint_normal_dev(sc->sc_dev, "TX DMA descriptors:\n");
1647 1.7 martin for (i = 0; i < AWGE_TX_RING_COUNT; i++) {
1648 1.7 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_txq.t_desc[i];
1649 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1650 1.15 martin "data: %08x next: %08x\n",
1651 1.15 martin i, sc->sc_txq.t_physaddr +
1652 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1653 1.50.2.1 christos le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1654 1.7 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1655 1.7 martin }
1656 1.7 martin }
1657 1.8 martin
1658 1.8 martin static void
1659 1.11 martin dwc_gmac_dump_rx_desc(struct dwc_gmac_softc *sc)
1660 1.11 martin {
1661 1.11 martin int i;
1662 1.11 martin
1663 1.11 martin aprint_normal_dev(sc->sc_dev, "RX queue: cur=%d, next=%d\n",
1664 1.11 martin sc->sc_rxq.r_cur, sc->sc_rxq.r_next);
1665 1.11 martin aprint_normal_dev(sc->sc_dev, "RX DMA descriptors:\n");
1666 1.11 martin for (i = 0; i < AWGE_RX_RING_COUNT; i++) {
1667 1.11 martin struct dwc_gmac_dev_dmadesc *desc = &sc->sc_rxq.r_desc[i];
1668 1.15 martin aprint_normal("#%d (%08lx): status: %08x cntl: %08x "
1669 1.15 martin "data: %08x next: %08x\n",
1670 1.15 martin i, sc->sc_rxq.r_physaddr +
1671 1.15 martin i*sizeof(struct dwc_gmac_dev_dmadesc),
1672 1.50.2.1 christos le32toh(desc->ddesc_status0), le32toh(desc->ddesc_cntl1),
1673 1.11 martin le32toh(desc->ddesc_data), le32toh(desc->ddesc_next));
1674 1.11 martin }
1675 1.11 martin }
1676 1.11 martin
1677 1.11 martin static void
1678 1.10 martin dwc_dump_status(struct dwc_gmac_softc *sc)
1679 1.8 martin {
1680 1.8 martin uint32_t status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1681 1.8 martin AWIN_GMAC_MAC_INTR);
1682 1.8 martin uint32_t dma_status = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1683 1.8 martin AWIN_GMAC_DMA_STATUS);
1684 1.8 martin char buf[200];
1685 1.8 martin
1686 1.8 martin /* print interrupt state */
1687 1.8 martin snprintb(buf, sizeof(buf), "\177\20"
1688 1.10 martin "b\x10""NI\0"
1689 1.10 martin "b\x0f""AI\0"
1690 1.10 martin "b\x0e""ER\0"
1691 1.10 martin "b\x0d""FB\0"
1692 1.10 martin "b\x0a""ET\0"
1693 1.10 martin "b\x09""RW\0"
1694 1.10 martin "b\x08""RS\0"
1695 1.10 martin "b\x07""RU\0"
1696 1.10 martin "b\x06""RI\0"
1697 1.10 martin "b\x05""UN\0"
1698 1.10 martin "b\x04""OV\0"
1699 1.10 martin "b\x03""TJ\0"
1700 1.10 martin "b\x02""TU\0"
1701 1.10 martin "b\x01""TS\0"
1702 1.10 martin "b\x00""TI\0"
1703 1.8 martin "\0", dma_status);
1704 1.10 martin aprint_normal_dev(sc->sc_dev, "INTR status: %08x, DMA status: %s\n",
1705 1.8 martin status, buf);
1706 1.10 martin }
1707 1.8 martin
1708 1.10 martin static void
1709 1.10 martin dwc_dump_and_abort(struct dwc_gmac_softc *sc, const char *msg)
1710 1.10 martin {
1711 1.10 martin dwc_dump_status(sc);
1712 1.22 martin dwc_gmac_dump_ffilt(sc,
1713 1.22 martin bus_space_read_4(sc->sc_bst, sc->sc_bsh, AWIN_GMAC_MAC_FFILT));
1714 1.8 martin dwc_gmac_dump_dma(sc);
1715 1.8 martin dwc_gmac_dump_tx_desc(sc);
1716 1.11 martin dwc_gmac_dump_rx_desc(sc);
1717 1.8 martin
1718 1.21 joerg panic("%s", msg);
1719 1.8 martin }
1720 1.22 martin
1721 1.22 martin static void dwc_gmac_dump_ffilt(struct dwc_gmac_softc *sc, uint32_t ffilt)
1722 1.22 martin {
1723 1.22 martin char buf[200];
1724 1.22 martin
1725 1.22 martin /* print filter setup */
1726 1.22 martin snprintb(buf, sizeof(buf), "\177\20"
1727 1.22 martin "b\x1f""RA\0"
1728 1.22 martin "b\x0a""HPF\0"
1729 1.22 martin "b\x09""SAF\0"
1730 1.22 martin "b\x08""SAIF\0"
1731 1.22 martin "b\x05""DBF\0"
1732 1.22 martin "b\x04""PM\0"
1733 1.22 martin "b\x03""DAIF\0"
1734 1.22 martin "b\x02""HMC\0"
1735 1.22 martin "b\x01""HUC\0"
1736 1.22 martin "b\x00""PR\0"
1737 1.22 martin "\0", ffilt);
1738 1.22 martin aprint_normal_dev(sc->sc_dev, "FFILT: %s\n", buf);
1739 1.22 martin }
1740 1.7 martin #endif
1741