if_gfe.c revision 1.35 1 1.35 joerg /* $NetBSD: if_gfe.c,v 1.35 2010/04/05 07:20:24 joerg Exp $ */
2 1.1 matt
3 1.1 matt /*
4 1.1 matt * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * Redistribution and use in source and binary forms, with or without
8 1.1 matt * modification, are permitted provided that the following conditions
9 1.1 matt * are met:
10 1.1 matt * 1. Redistributions of source code must retain the above copyright
11 1.1 matt * notice, this list of conditions and the following disclaimer.
12 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 matt * notice, this list of conditions and the following disclaimer in the
14 1.1 matt * documentation and/or other materials provided with the distribution.
15 1.1 matt * 3. All advertising materials mentioning features or use of this software
16 1.1 matt * must display the following acknowledgement:
17 1.1 matt * This product includes software developed for the NetBSD Project by
18 1.1 matt * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 1.1 matt * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 1.1 matt * or promote products derived from this software without specific prior
21 1.1 matt * written permission.
22 1.1 matt * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 1.1 matt * or promote products derived from this software without specific prior
24 1.1 matt * written permission.
25 1.1 matt *
26 1.1 matt * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 1.1 matt * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 1.1 matt * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 1.1 matt * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 1.1 matt * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
38 1.1 matt */
39 1.1 matt
40 1.1 matt /*
41 1.1 matt * if_gfe.c -- GT ethernet MAC driver
42 1.1 matt */
43 1.12 lukem
44 1.12 lukem #include <sys/cdefs.h>
45 1.35 joerg __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.35 2010/04/05 07:20:24 joerg Exp $");
46 1.1 matt
47 1.1 matt #include "opt_inet.h"
48 1.1 matt
49 1.1 matt #include <sys/param.h>
50 1.1 matt #include <sys/types.h>
51 1.1 matt #include <sys/inttypes.h>
52 1.1 matt #include <sys/queue.h>
53 1.1 matt
54 1.7 thorpej #include <uvm/uvm_extern.h>
55 1.7 thorpej
56 1.1 matt #include <sys/callout.h>
57 1.1 matt #include <sys/device.h>
58 1.1 matt #include <sys/errno.h>
59 1.1 matt #include <sys/ioctl.h>
60 1.1 matt #include <sys/mbuf.h>
61 1.1 matt #include <sys/socket.h>
62 1.1 matt
63 1.26 ad #include <sys/bus.h>
64 1.1 matt
65 1.1 matt #include <net/if.h>
66 1.1 matt #include <net/if_dl.h>
67 1.1 matt #include <net/if_ether.h>
68 1.1 matt #include <net/if_media.h>
69 1.1 matt
70 1.1 matt #ifdef INET
71 1.1 matt #include <netinet/in.h>
72 1.1 matt #include <netinet/if_inarp.h>
73 1.1 matt #endif
74 1.1 matt #include <net/bpf.h>
75 1.1 matt
76 1.1 matt #include <dev/mii/miivar.h>
77 1.1 matt
78 1.1 matt #include <dev/marvell/gtintrreg.h>
79 1.1 matt #include <dev/marvell/gtethreg.h>
80 1.1 matt
81 1.1 matt #include <dev/marvell/gtvar.h>
82 1.1 matt #include <dev/marvell/if_gfevar.h>
83 1.1 matt
84 1.1 matt #define GE_READ(sc, reg) \
85 1.3 matt bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg)
86 1.1 matt #define GE_WRITE(sc, reg, v) \
87 1.3 matt bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v))
88 1.1 matt
89 1.1 matt #define GE_DEBUG
90 1.1 matt #if 0
91 1.1 matt #define GE_NOHASH
92 1.1 matt #define GE_NORX
93 1.1 matt #endif
94 1.1 matt
95 1.1 matt #ifdef GE_DEBUG
96 1.1 matt #define GE_DPRINTF(sc, a) do \
97 1.1 matt if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \
98 1.1 matt printf a; \
99 1.1 matt while (0)
100 1.1 matt #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func))
101 1.1 matt #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]"))
102 1.1 matt #else
103 1.1 matt #define GE_DPRINTF(sc, a) do { } while (0)
104 1.1 matt #define GE_FUNC_ENTER(sc, func) do { } while (0)
105 1.1 matt #define GE_FUNC_EXIT(sc, str) do { } while (0)
106 1.1 matt #endif
107 1.1 matt enum gfe_whack_op {
108 1.1 matt GE_WHACK_START, GE_WHACK_RESTART,
109 1.1 matt GE_WHACK_CHANGE, GE_WHACK_STOP
110 1.1 matt };
111 1.1 matt
112 1.1 matt enum gfe_hash_op {
113 1.1 matt GE_HASH_ADD, GE_HASH_REMOVE,
114 1.1 matt };
115 1.1 matt
116 1.2 matt #if 1
117 1.2 matt #define htogt32(a) htobe32(a)
118 1.2 matt #define gt32toh(a) be32toh(a)
119 1.2 matt #else
120 1.2 matt #define htogt32(a) htole32(a)
121 1.2 matt #define gt32toh(a) le32toh(a)
122 1.2 matt #endif
123 1.2 matt
124 1.6 matt #define GE_RXDSYNC(sc, rxq, n, ops) \
125 1.6 matt bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \
126 1.6 matt (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \
127 1.6 matt (ops))
128 1.6 matt #define GE_RXDPRESYNC(sc, rxq, n) \
129 1.6 matt GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
130 1.6 matt #define GE_RXDPOSTSYNC(sc, rxq, n) \
131 1.6 matt GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
132 1.6 matt
133 1.6 matt #define GE_TXDSYNC(sc, txq, n, ops) \
134 1.6 matt bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \
135 1.6 matt (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \
136 1.6 matt (ops))
137 1.6 matt #define GE_TXDPRESYNC(sc, txq, n) \
138 1.6 matt GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
139 1.6 matt #define GE_TXDPOSTSYNC(sc, txq, n) \
140 1.6 matt GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
141 1.6 matt
142 1.1 matt #define STATIC
143 1.1 matt
144 1.33 cegger STATIC int gfe_match (device_t, cfdata_t, void *);
145 1.33 cegger STATIC void gfe_attach (device_t, device_t, void *);
146 1.1 matt
147 1.2 matt STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int,
148 1.2 matt size_t, int);
149 1.1 matt STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *);
150 1.1 matt
151 1.21 christos STATIC int gfe_ifioctl (struct ifnet *, u_long, void *);
152 1.1 matt STATIC void gfe_ifstart (struct ifnet *);
153 1.1 matt STATIC void gfe_ifwatchdog (struct ifnet *);
154 1.1 matt
155 1.33 cegger STATIC int gfe_mii_read (device_t, int, int);
156 1.33 cegger STATIC void gfe_mii_write (device_t, int, int, int);
157 1.33 cegger STATIC void gfe_mii_statchg (device_t);
158 1.1 matt
159 1.1 matt STATIC void gfe_tick(void *arg);
160 1.1 matt
161 1.1 matt STATIC void gfe_tx_restart(void *);
162 1.1 matt STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio);
163 1.1 matt STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t);
164 1.1 matt STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int);
165 1.15 matt STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio);
166 1.1 matt STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio);
167 1.1 matt STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op);
168 1.1 matt
169 1.1 matt STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio);
170 1.1 matt STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio);
171 1.1 matt STATIC int gfe_rx_prime(struct gfe_softc *);
172 1.1 matt STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t);
173 1.1 matt STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio);
174 1.15 matt STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio);
175 1.1 matt STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op);
176 1.1 matt
177 1.1 matt STATIC int gfe_intr(void *);
178 1.1 matt
179 1.1 matt STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op);
180 1.1 matt
181 1.6 matt STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]);
182 1.1 matt STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op,
183 1.6 matt enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]);
184 1.1 matt STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *,
185 1.1 matt u_long);
186 1.1 matt STATIC int gfe_hash_fill(struct gfe_softc *);
187 1.1 matt STATIC int gfe_hash_alloc(struct gfe_softc *);
188 1.1 matt
189 1.1 matt /* Linkup to the rest of the kernel */
190 1.1 matt CFATTACH_DECL(gfe, sizeof(struct gfe_softc),
191 1.1 matt gfe_match, gfe_attach, NULL, NULL);
192 1.1 matt
193 1.2 matt extern struct cfdriver gfe_cd;
194 1.2 matt
195 1.1 matt int
196 1.33 cegger gfe_match(device_t parent, cfdata_t cf, void *aux)
197 1.1 matt {
198 1.1 matt struct gt_softc *gt = (struct gt_softc *) parent;
199 1.1 matt struct gt_attach_args *ga = aux;
200 1.1 matt uint8_t enaddr[6];
201 1.1 matt
202 1.2 matt if (!GT_ETHEROK(gt, ga, &gfe_cd))
203 1.1 matt return 0;
204 1.1 matt
205 1.1 matt if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0)
206 1.1 matt return 0;
207 1.1 matt
208 1.1 matt if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 &&
209 1.1 matt enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0)
210 1.1 matt return 0;
211 1.1 matt
212 1.1 matt return 1;
213 1.16 perry }
214 1.1 matt
215 1.1 matt /*
216 1.1 matt * Attach this instance, and then all the sub-devices
217 1.1 matt */
218 1.1 matt void
219 1.33 cegger gfe_attach(device_t parent, device_t self, void *aux)
220 1.1 matt {
221 1.5 matt struct gt_attach_args * const ga = aux;
222 1.20 thorpej struct gt_softc * const gt = device_private(parent);
223 1.20 thorpej struct gfe_softc * const sc = device_private(self);
224 1.5 matt struct ifnet * const ifp = &sc->sc_ec.ec_if;
225 1.1 matt uint32_t data;
226 1.1 matt uint8_t enaddr[6];
227 1.1 matt int phyaddr;
228 1.1 matt uint32_t sdcr;
229 1.15 matt int error;
230 1.1 matt
231 1.2 matt GT_ETHERFOUND(gt, ga);
232 1.2 matt
233 1.2 matt sc->sc_gt_memt = ga->ga_memt;
234 1.2 matt sc->sc_gt_memh = ga->ga_memh;
235 1.1 matt sc->sc_dmat = ga->ga_dmat;
236 1.1 matt sc->sc_macno = ga->ga_unit;
237 1.3 matt
238 1.3 matt if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh,
239 1.3 matt ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) {
240 1.3 matt aprint_error(": failed to map registers\n");
241 1.3 matt }
242 1.1 matt
243 1.23 ad callout_init(&sc->sc_co, 0);
244 1.1 matt
245 1.2 matt data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR);
246 1.1 matt phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno);
247 1.1 matt
248 1.1 matt gtget_macaddr(gt, sc->sc_macno, enaddr);
249 1.1 matt
250 1.1 matt sc->sc_pcr = GE_READ(sc, EPCR);
251 1.1 matt sc->sc_pcxr = GE_READ(sc, EPCXR);
252 1.1 matt sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC;
253 1.1 matt
254 1.2 matt aprint_normal(": address %s", ether_sprintf(enaddr));
255 1.1 matt
256 1.1 matt #if defined(DEBUG)
257 1.2 matt aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr);
258 1.1 matt #endif
259 1.1 matt
260 1.1 matt sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override;
261 1.19 thorpej if (device_cfdata(&sc->sc_dev)->cf_flags & 1) {
262 1.2 matt aprint_normal(", phy %d (rmii)", phyaddr);
263 1.2 matt sc->sc_pcxr |= ETH_EPCXR_RMIIEn;
264 1.2 matt } else {
265 1.2 matt aprint_normal(", phy %d (mii)", phyaddr);
266 1.2 matt sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn;
267 1.2 matt }
268 1.19 thorpej if (device_cfdata(&sc->sc_dev)->cf_flags & 2)
269 1.15 matt sc->sc_flags |= GE_NOFREE;
270 1.1 matt sc->sc_pcxr &= ~(3 << 14);
271 1.1 matt sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14);
272 1.1 matt
273 1.1 matt if (sc->sc_pcr & ETH_EPCR_EN) {
274 1.1 matt int tries = 1000;
275 1.1 matt /*
276 1.1 matt * Abort transmitter and receiver and wait for them to quiese
277 1.1 matt */
278 1.1 matt GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT);
279 1.1 matt do {
280 1.1 matt delay(100);
281 1.1 matt } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT)));
282 1.1 matt }
283 1.1 matt
284 1.8 scw sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF);
285 1.1 matt
286 1.1 matt #if defined(DEBUG)
287 1.2 matt aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr);
288 1.1 matt #endif
289 1.1 matt
290 1.1 matt /*
291 1.1 matt * Now turn off the GT. If it didn't quiese, too ***ing bad.
292 1.1 matt */
293 1.1 matt GE_WRITE(sc, EPCR, sc->sc_pcr);
294 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
295 1.1 matt sdcr = GE_READ(sc, ESDCR);
296 1.1 matt ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4);
297 1.1 matt sdcr |= ETH_ESDCR_RIFB;
298 1.1 matt GE_WRITE(sc, ESDCR, sdcr);
299 1.1 matt sc->sc_max_frame_length = 1536;
300 1.1 matt
301 1.2 matt aprint_normal("\n");
302 1.5 matt sc->sc_mii.mii_ifp = ifp;
303 1.1 matt sc->sc_mii.mii_readreg = gfe_mii_read;
304 1.1 matt sc->sc_mii.mii_writereg = gfe_mii_write;
305 1.1 matt sc->sc_mii.mii_statchg = gfe_mii_statchg;
306 1.1 matt
307 1.27 dyoung sc->sc_ec.ec_mii = &sc->sc_mii;
308 1.27 dyoung ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
309 1.27 dyoung ether_mediastatus);
310 1.1 matt
311 1.1 matt mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr,
312 1.1 matt MII_OFFSET_ANY, MIIF_NOISOLATE);
313 1.1 matt if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
314 1.1 matt ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
315 1.1 matt ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
316 1.1 matt } else {
317 1.1 matt ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
318 1.1 matt }
319 1.1 matt
320 1.29 cegger strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
321 1.1 matt ifp->if_softc = sc;
322 1.2 matt /* ifp->if_mowner = &sc->sc_mowner; */
323 1.1 matt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
324 1.1 matt #if 0
325 1.1 matt ifp->if_flags |= IFF_DEBUG;
326 1.1 matt #endif
327 1.1 matt ifp->if_ioctl = gfe_ifioctl;
328 1.1 matt ifp->if_start = gfe_ifstart;
329 1.1 matt ifp->if_watchdog = gfe_ifwatchdog;
330 1.1 matt
331 1.15 matt if (sc->sc_flags & GE_NOFREE) {
332 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI);
333 1.15 matt if (!error)
334 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI);
335 1.15 matt if (!error)
336 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO);
337 1.15 matt if (!error)
338 1.15 matt error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO);
339 1.15 matt if (!error)
340 1.15 matt error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI);
341 1.15 matt if (!error)
342 1.15 matt error = gfe_hash_alloc(sc);
343 1.15 matt if (error)
344 1.15 matt aprint_error(
345 1.15 matt "%s: failed to allocate resources: %d\n",
346 1.15 matt ifp->if_xname, error);
347 1.15 matt }
348 1.15 matt
349 1.1 matt if_attach(ifp);
350 1.1 matt ether_ifattach(ifp, enaddr);
351 1.35 joerg bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header));
352 1.1 matt #if NRND > 0
353 1.29 cegger rnd_attach_source(&sc->sc_rnd_source, device_xname(self), RND_TYPE_NET, 0);
354 1.1 matt #endif
355 1.1 matt intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET,
356 1.1 matt gfe_intr, sc);
357 1.1 matt }
358 1.1 matt
359 1.1 matt int
360 1.1 matt gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs,
361 1.2 matt size_t size, int flags)
362 1.1 matt {
363 1.1 matt int error = 0;
364 1.1 matt GE_FUNC_ENTER(sc, "gfe_dmamem_alloc");
365 1.15 matt
366 1.15 matt KASSERT(gdm->gdm_kva == NULL);
367 1.1 matt gdm->gdm_size = size;
368 1.1 matt gdm->gdm_maxsegs = maxsegs;
369 1.1 matt
370 1.7 thorpej error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE,
371 1.1 matt gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs,
372 1.1 matt BUS_DMA_NOWAIT);
373 1.1 matt if (error)
374 1.1 matt goto fail;
375 1.1 matt
376 1.1 matt error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs,
377 1.2 matt gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT);
378 1.1 matt if (error)
379 1.1 matt goto fail;
380 1.1 matt
381 1.1 matt error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs,
382 1.1 matt gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map);
383 1.1 matt if (error)
384 1.1 matt goto fail;
385 1.1 matt
386 1.1 matt error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva,
387 1.1 matt gdm->gdm_size, NULL, BUS_DMA_NOWAIT);
388 1.2 matt if (error)
389 1.2 matt goto fail;
390 1.1 matt
391 1.2 matt /* invalidate from cache */
392 1.2 matt bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size,
393 1.2 matt BUS_DMASYNC_PREREAD);
394 1.1 matt fail:
395 1.1 matt if (error) {
396 1.1 matt gfe_dmamem_free(sc, gdm);
397 1.1 matt GE_DPRINTF(sc, (":err=%d", error));
398 1.1 matt }
399 1.2 matt GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x",
400 1.2 matt gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs,
401 1.2 matt gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len));
402 1.1 matt GE_FUNC_EXIT(sc, "");
403 1.1 matt return error;
404 1.1 matt }
405 1.1 matt
406 1.1 matt void
407 1.1 matt gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm)
408 1.1 matt {
409 1.1 matt GE_FUNC_ENTER(sc, "gfe_dmamem_free");
410 1.1 matt if (gdm->gdm_map)
411 1.1 matt bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map);
412 1.1 matt if (gdm->gdm_kva)
413 1.1 matt bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size);
414 1.1 matt if (gdm->gdm_nsegs > 0)
415 1.1 matt bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs);
416 1.1 matt gdm->gdm_map = NULL;
417 1.1 matt gdm->gdm_kva = NULL;
418 1.1 matt gdm->gdm_nsegs = 0;
419 1.1 matt GE_FUNC_EXIT(sc, "");
420 1.1 matt }
421 1.1 matt
422 1.1 matt int
423 1.21 christos gfe_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
424 1.1 matt {
425 1.1 matt struct gfe_softc * const sc = ifp->if_softc;
426 1.1 matt struct ifreq *ifr = (struct ifreq *) data;
427 1.1 matt struct ifaddr *ifa = (struct ifaddr *) data;
428 1.1 matt int s, error = 0;
429 1.1 matt
430 1.1 matt GE_FUNC_ENTER(sc, "gfe_ifioctl");
431 1.1 matt s = splnet();
432 1.1 matt
433 1.1 matt switch (cmd) {
434 1.31 dyoung case SIOCINITIFADDR:
435 1.1 matt ifp->if_flags |= IFF_UP;
436 1.31 dyoung error = gfe_whack(sc, GE_WHACK_START);
437 1.1 matt switch (ifa->ifa_addr->sa_family) {
438 1.1 matt #ifdef INET
439 1.1 matt case AF_INET:
440 1.1 matt if (error == 0)
441 1.1 matt arp_ifinit(ifp, ifa);
442 1.1 matt break;
443 1.1 matt #endif
444 1.1 matt default:
445 1.1 matt break;
446 1.1 matt }
447 1.1 matt break;
448 1.1 matt
449 1.1 matt case SIOCSIFFLAGS:
450 1.31 dyoung if ((error = ifioctl_common(ifp, cmd, data)) != 0)
451 1.31 dyoung break;
452 1.31 dyoung /* XXX re-use ether_ioctl() */
453 1.1 matt switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
454 1.1 matt case IFF_UP|IFF_RUNNING:/* active->active, update */
455 1.1 matt error = gfe_whack(sc, GE_WHACK_CHANGE);
456 1.1 matt break;
457 1.1 matt case IFF_RUNNING: /* not up, so we stop */
458 1.1 matt error = gfe_whack(sc, GE_WHACK_STOP);
459 1.1 matt break;
460 1.1 matt case IFF_UP: /* not running, so we start */
461 1.1 matt error = gfe_whack(sc, GE_WHACK_START);
462 1.1 matt break;
463 1.1 matt case 0: /* idle->idle: do nothing */
464 1.1 matt break;
465 1.1 matt }
466 1.1 matt break;
467 1.1 matt
468 1.27 dyoung case SIOCSIFMEDIA:
469 1.27 dyoung case SIOCGIFMEDIA:
470 1.1 matt case SIOCADDMULTI:
471 1.1 matt case SIOCDELMULTI:
472 1.25 dyoung if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
473 1.1 matt if (ifp->if_flags & IFF_RUNNING)
474 1.1 matt error = gfe_whack(sc, GE_WHACK_CHANGE);
475 1.1 matt else
476 1.1 matt error = 0;
477 1.1 matt }
478 1.1 matt break;
479 1.1 matt
480 1.1 matt case SIOCSIFMTU:
481 1.1 matt if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
482 1.1 matt error = EINVAL;
483 1.1 matt break;
484 1.1 matt }
485 1.28 dyoung if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
486 1.28 dyoung error = 0;
487 1.1 matt break;
488 1.1 matt
489 1.1 matt default:
490 1.31 dyoung error = ether_ioctl(ifp, cmd, data);
491 1.1 matt break;
492 1.1 matt }
493 1.1 matt splx(s);
494 1.1 matt GE_FUNC_EXIT(sc, "");
495 1.1 matt return error;
496 1.1 matt }
497 1.1 matt
498 1.1 matt void
499 1.1 matt gfe_ifstart(struct ifnet *ifp)
500 1.1 matt {
501 1.1 matt struct gfe_softc * const sc = ifp->if_softc;
502 1.1 matt struct mbuf *m;
503 1.1 matt
504 1.1 matt GE_FUNC_ENTER(sc, "gfe_ifstart");
505 1.1 matt
506 1.1 matt if ((ifp->if_flags & IFF_RUNNING) == 0) {
507 1.1 matt GE_FUNC_EXIT(sc, "$");
508 1.1 matt return;
509 1.1 matt }
510 1.1 matt
511 1.1 matt for (;;) {
512 1.1 matt IF_DEQUEUE(&ifp->if_snd, m);
513 1.1 matt if (m == NULL) {
514 1.1 matt ifp->if_flags &= ~IFF_OACTIVE;
515 1.1 matt GE_FUNC_EXIT(sc, "");
516 1.1 matt return;
517 1.1 matt }
518 1.1 matt
519 1.1 matt /*
520 1.1 matt * No space in the pending queue? try later.
521 1.1 matt */
522 1.15 matt if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq))
523 1.1 matt break;
524 1.1 matt
525 1.1 matt /*
526 1.1 matt * Try to enqueue a mbuf to the device. If that fails, we
527 1.1 matt * can always try to map the next mbuf.
528 1.1 matt */
529 1.15 matt IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m);
530 1.1 matt GE_DPRINTF(sc, (">"));
531 1.1 matt #ifndef GE_NOTX
532 1.1 matt (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI);
533 1.1 matt #endif
534 1.1 matt }
535 1.1 matt
536 1.1 matt /*
537 1.1 matt * Attempt to queue the mbuf for send failed.
538 1.1 matt */
539 1.1 matt IF_PREPEND(&ifp->if_snd, m);
540 1.1 matt ifp->if_flags |= IFF_OACTIVE;
541 1.1 matt GE_FUNC_EXIT(sc, "%%");
542 1.1 matt }
543 1.1 matt
544 1.1 matt void
545 1.1 matt gfe_ifwatchdog(struct ifnet *ifp)
546 1.1 matt {
547 1.1 matt struct gfe_softc * const sc = ifp->if_softc;
548 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI];
549 1.1 matt
550 1.1 matt GE_FUNC_ENTER(sc, "gfe_ifwatchdog");
551 1.29 cegger printf("%s: device timeout", device_xname(&sc->sc_dev));
552 1.15 matt if (ifp->if_flags & IFF_RUNNING) {
553 1.6 matt uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]);
554 1.6 matt GE_TXDPOSTSYNC(sc, txq, txq->txq_fi);
555 1.6 matt GE_TXDPOSTSYNC(sc, txq, curtxdnum);
556 1.6 matt printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ",
557 1.6 matt txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts,
558 1.6 matt txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts,
559 1.1 matt GE_READ(sc, EICR));
560 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi);
561 1.6 matt GE_TXDPRESYNC(sc, txq, curtxdnum);
562 1.1 matt }
563 1.1 matt printf("\n");
564 1.1 matt ifp->if_oerrors++;
565 1.1 matt (void) gfe_whack(sc, GE_WHACK_RESTART);
566 1.1 matt GE_FUNC_EXIT(sc, "");
567 1.1 matt }
568 1.1 matt
569 1.1 matt int
571 1.1 matt gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio)
572 1.15 matt {
573 1.1 matt struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio];
574 1.1 matt int error;
575 1.1 matt
576 1.2 matt GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc");
577 1.1 matt GE_DPRINTF(sc, ("(%d)", rxprio));
578 1.2 matt
579 1.5 matt error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1,
580 1.1 matt GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE);
581 1.1 matt if (error) {
582 1.1 matt GE_FUNC_EXIT(sc, "!!");
583 1.1 matt return error;
584 1.15 matt }
585 1.1 matt
586 1.2 matt error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS,
587 1.1 matt GE_RXBUF_MEMSIZE, 0);
588 1.1 matt if (error) {
589 1.1 matt GE_FUNC_EXIT(sc, "!!!");
590 1.1 matt return error;
591 1.15 matt }
592 1.15 matt GE_FUNC_EXIT(sc, "");
593 1.15 matt return error;
594 1.1 matt }
595 1.15 matt
596 1.15 matt int
597 1.15 matt gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio)
598 1.15 matt {
599 1.15 matt struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio];
600 1.15 matt volatile struct gt_eth_desc *rxd;
601 1.15 matt const bus_dma_segment_t *ds;
602 1.15 matt int idx;
603 1.15 matt bus_addr_t nxtaddr;
604 1.15 matt bus_size_t boff;
605 1.15 matt
606 1.15 matt GE_FUNC_ENTER(sc, "gfe_rx_rxqinit");
607 1.15 matt GE_DPRINTF(sc, ("(%d)", rxprio));
608 1.15 matt
609 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) {
610 1.15 matt int error = gfe_rx_rxqalloc(sc, rxprio);
611 1.15 matt if (error) {
612 1.15 matt GE_FUNC_EXIT(sc, "!");
613 1.15 matt return error;
614 1.15 matt }
615 1.15 matt } else {
616 1.15 matt KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL);
617 1.15 matt KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL);
618 1.15 matt }
619 1.15 matt
620 1.1 matt memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE);
621 1.1 matt
622 1.1 matt rxq->rxq_descs =
623 1.1 matt (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva;
624 1.1 matt rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr;
625 1.1 matt rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva;
626 1.1 matt rxq->rxq_fi = 0;
627 1.1 matt rxq->rxq_active = GE_RXDESC_MAX;
628 1.1 matt for (idx = 0, rxd = rxq->rxq_descs,
629 1.1 matt boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs,
630 1.1 matt nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd);
631 1.1 matt idx < GE_RXDESC_MAX;
632 1.2 matt idx++, rxd++, nxtaddr += sizeof(*rxd)) {
633 1.2 matt rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16);
634 1.2 matt rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI);
635 1.1 matt rxd->ed_bufptr = htogt32(ds->ds_addr + boff);
636 1.1 matt /*
637 1.1 matt * update the nxtptr to point to the next txd.
638 1.1 matt */
639 1.1 matt if (idx == GE_RXDESC_MAX - 1)
640 1.2 matt nxtaddr = rxq->rxq_desc_busaddr;
641 1.1 matt rxd->ed_nxtptr = htogt32(nxtaddr);
642 1.1 matt boff += GE_RXBUF_SIZE;
643 1.1 matt if (boff == ds->ds_len) {
644 1.1 matt ds++;
645 1.1 matt boff = 0;
646 1.1 matt }
647 1.1 matt }
648 1.1 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0,
649 1.1 matt rxq->rxq_desc_mem.gdm_map->dm_mapsize,
650 1.1 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
651 1.1 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0,
652 1.2 matt rxq->rxq_buf_mem.gdm_map->dm_mapsize,
653 1.1 matt BUS_DMASYNC_PREREAD);
654 1.1 matt
655 1.1 matt rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError;
656 1.1 matt switch (rxprio) {
657 1.1 matt case GE_RXPRIO_HI:
658 1.1 matt rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3;
659 1.1 matt rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno);
660 1.1 matt rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno);
661 1.1 matt break;
662 1.1 matt case GE_RXPRIO_MEDHI:
663 1.1 matt rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2;
664 1.1 matt rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno);
665 1.1 matt rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno);
666 1.1 matt break;
667 1.1 matt case GE_RXPRIO_MEDLO:
668 1.1 matt rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1;
669 1.1 matt rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno);
670 1.1 matt rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno);
671 1.1 matt break;
672 1.1 matt case GE_RXPRIO_LO:
673 1.1 matt rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0;
674 1.1 matt rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno);
675 1.1 matt rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno);
676 1.1 matt break;
677 1.1 matt }
678 1.15 matt GE_FUNC_EXIT(sc, "");
679 1.1 matt return 0;
680 1.1 matt }
681 1.1 matt
682 1.1 matt void
683 1.1 matt gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio)
684 1.1 matt {
685 1.15 matt struct ifnet * const ifp = &sc->sc_ec.ec_if;
686 1.1 matt struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio];
687 1.1 matt struct mbuf *m = rxq->rxq_curpkt;
688 1.1 matt
689 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_get");
690 1.1 matt GE_DPRINTF(sc, ("(%d)", rxprio));
691 1.1 matt
692 1.1 matt while (rxq->rxq_active > 0) {
693 1.1 matt volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi];
694 1.1 matt struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi];
695 1.1 matt const struct ether_header *eh;
696 1.1 matt unsigned int cmdsts;
697 1.1 matt size_t buflen;
698 1.6 matt
699 1.2 matt GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi);
700 1.1 matt cmdsts = gt32toh(rxd->ed_cmdsts);
701 1.1 matt GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts));
702 1.1 matt rxq->rxq_cmdsts = cmdsts;
703 1.1 matt /*
704 1.1 matt * Sometimes the GE "forgets" to reset the ownership bit.
705 1.1 matt * But if the length has been rewritten, the packet is ours
706 1.1 matt * so pretend the O bit is set.
707 1.2 matt */
708 1.1 matt buflen = gt32toh(rxd->ed_lencnt) & 0xffff;
709 1.6 matt if ((cmdsts & RX_CMD_O) && buflen == 0) {
710 1.1 matt GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi);
711 1.1 matt break;
712 1.1 matt }
713 1.1 matt
714 1.1 matt /*
715 1.1 matt * If this is not a single buffer packet with no errors
716 1.1 matt * or for some reason it's bigger than our frame size,
717 1.1 matt * ignore it and go to the next packet.
718 1.1 matt */
719 1.1 matt if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) !=
720 1.1 matt (RX_CMD_F|RX_CMD_L) ||
721 1.1 matt buflen > sc->sc_max_frame_length) {
722 1.1 matt GE_DPRINTF(sc, ("!"));
723 1.1 matt --rxq->rxq_active;
724 1.1 matt ifp->if_ipackets++;
725 1.1 matt ifp->if_ierrors++;
726 1.1 matt goto give_it_back;
727 1.1 matt }
728 1.14 thorpej
729 1.14 thorpej /* CRC is included with the packet; trim it off. */
730 1.14 thorpej buflen -= ETHER_CRC_LEN;
731 1.1 matt
732 1.1 matt if (m == NULL) {
733 1.1 matt MGETHDR(m, M_DONTWAIT, MT_DATA);
734 1.1 matt if (m == NULL) {
735 1.1 matt GE_DPRINTF(sc, ("?"));
736 1.1 matt break;
737 1.1 matt }
738 1.1 matt }
739 1.1 matt if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) {
740 1.1 matt MCLGET(m, M_DONTWAIT);
741 1.1 matt if ((m->m_flags & M_EXT) == 0) {
742 1.1 matt GE_DPRINTF(sc, ("?"));
743 1.1 matt break;
744 1.1 matt }
745 1.5 matt }
746 1.1 matt m->m_data += 2;
747 1.1 matt m->m_len = 0;
748 1.5 matt m->m_pkthdr.len = 0;
749 1.1 matt m->m_pkthdr.rcvif = ifp;
750 1.1 matt rxq->rxq_cmdsts = cmdsts;
751 1.1 matt --rxq->rxq_active;
752 1.1 matt
753 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map,
754 1.1 matt rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD);
755 1.1 matt
756 1.30 he KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0);
757 1.1 matt memcpy(m->m_data + m->m_len, rxb->rxb_data, buflen);
758 1.1 matt m->m_len = buflen;
759 1.1 matt m->m_pkthdr.len = buflen;
760 1.1 matt
761 1.35 joerg ifp->if_ipackets++;
762 1.1 matt bpf_mtap(ifp, m);
763 1.1 matt
764 1.1 matt eh = (const struct ether_header *) m->m_data;
765 1.1 matt if ((ifp->if_flags & IFF_PROMISC) ||
766 1.1 matt (rxq->rxq_cmdsts & RX_STS_M) == 0 ||
767 1.1 matt (rxq->rxq_cmdsts & RX_STS_HE) ||
768 1.24 dyoung (eh->ether_dhost[0] & 1) != 0 ||
769 1.1 matt memcmp(eh->ether_dhost, CLLADDR(ifp->if_sadl),
770 1.1 matt ETHER_ADDR_LEN) == 0) {
771 1.1 matt (*ifp->if_input)(ifp, m);
772 1.1 matt m = NULL;
773 1.1 matt GE_DPRINTF(sc, (">"));
774 1.1 matt } else {
775 1.1 matt m->m_len = 0;
776 1.1 matt m->m_pkthdr.len = 0;
777 1.1 matt GE_DPRINTF(sc, ("+"));
778 1.1 matt }
779 1.1 matt rxq->rxq_cmdsts = 0;
780 1.1 matt
781 1.1 matt give_it_back:
782 1.2 matt rxd->ed_lencnt &= ~0xffff; /* zero out length */
783 1.2 matt rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI);
784 1.2 matt #if 0
785 1.2 matt GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)",
786 1.2 matt rxq->rxq_fi,
787 1.2 matt ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1],
788 1.2 matt ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3]));
789 1.6 matt #endif
790 1.1 matt GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi);
791 1.1 matt if (++rxq->rxq_fi == GE_RXDESC_MAX)
792 1.1 matt rxq->rxq_fi = 0;
793 1.1 matt rxq->rxq_active++;
794 1.1 matt }
795 1.1 matt rxq->rxq_curpkt = m;
796 1.1 matt GE_FUNC_EXIT(sc, "");
797 1.1 matt }
798 1.1 matt
799 1.1 matt uint32_t
800 1.1 matt gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask)
801 1.5 matt {
802 1.1 matt struct ifnet * const ifp = &sc->sc_ec.ec_if;
803 1.1 matt struct gfe_rxqueue *rxq;
804 1.1 matt uint32_t rxbits;
805 1.1 matt #define RXPRIO_DECODER 0xffffaa50
806 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_process");
807 1.1 matt
808 1.1 matt rxbits = ETH_IR_RxBuffer_GET(cause);
809 1.1 matt while (rxbits) {
810 1.1 matt enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3;
811 1.1 matt GE_DPRINTF(sc, ("%1x", rxbits));
812 1.1 matt rxbits &= ~(1 << rxprio);
813 1.1 matt gfe_rx_get(sc, rxprio);
814 1.1 matt }
815 1.1 matt
816 1.1 matt rxbits = ETH_IR_RxError_GET(cause);
817 1.1 matt while (rxbits) {
818 1.1 matt enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3;
819 1.1 matt uint32_t masks[(GE_RXDESC_MAX + 31) / 32];
820 1.1 matt int idx;
821 1.15 matt rxbits &= ~(1 << rxprio);
822 1.1 matt rxq = &sc->sc_rxq[rxprio];
823 1.1 matt sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits);
824 1.1 matt intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits);
825 1.1 matt if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) {
826 1.1 matt sc->sc_tickflags |= GE_TICK_RX_RESTART;
827 1.1 matt callout_reset(&sc->sc_co, 1, gfe_tick, sc);
828 1.5 matt }
829 1.1 matt ifp->if_ierrors++;
830 1.29 cegger GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n",
831 1.1 matt device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi));
832 1.2 matt memset(masks, 0, sizeof(masks));
833 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map,
834 1.2 matt 0, rxq->rxq_desc_mem.gdm_size,
835 1.1 matt BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
836 1.1 matt for (idx = 0; idx < GE_RXDESC_MAX; idx++) {
837 1.1 matt volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx];
838 1.2 matt
839 1.1 matt if (RX_CMD_O & gt32toh(rxd->ed_cmdsts))
840 1.1 matt masks[idx/32] |= 1 << (idx & 31);
841 1.2 matt }
842 1.2 matt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map,
843 1.2 matt 0, rxq->rxq_desc_mem.gdm_size,
844 1.1 matt BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
845 1.1 matt #if defined(DEBUG)
846 1.29 cegger printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n",
847 1.1 matt device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi,
848 1.1 matt rxq->rxq_cmdsts, masks[0], masks[1]);
849 1.1 matt #endif
850 1.1 matt }
851 1.1 matt if ((intrmask & ETH_IR_RxBits) == 0)
852 1.1 matt intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError);
853 1.1 matt
854 1.1 matt GE_FUNC_EXIT(sc, "");
855 1.1 matt return intrmask;
856 1.1 matt }
857 1.1 matt
858 1.1 matt int
859 1.1 matt gfe_rx_prime(struct gfe_softc *sc)
860 1.1 matt {
861 1.1 matt struct gfe_rxqueue *rxq;
862 1.1 matt int error;
863 1.1 matt
864 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_prime");
865 1.15 matt
866 1.1 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI);
867 1.1 matt if (error)
868 1.15 matt goto bail;
869 1.1 matt rxq = &sc->sc_rxq[GE_RXPRIO_HI];
870 1.1 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) {
871 1.1 matt GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr);
872 1.1 matt GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr);
873 1.1 matt }
874 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits;
875 1.15 matt
876 1.1 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI);
877 1.1 matt if (error)
878 1.1 matt goto bail;
879 1.15 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) {
880 1.1 matt rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI];
881 1.1 matt GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr);
882 1.1 matt GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr);
883 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits;
884 1.1 matt }
885 1.15 matt
886 1.1 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO);
887 1.1 matt if (error)
888 1.1 matt goto bail;
889 1.15 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) {
890 1.1 matt rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO];
891 1.1 matt GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr);
892 1.1 matt GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr);
893 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits;
894 1.1 matt }
895 1.15 matt
896 1.1 matt error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO);
897 1.1 matt if (error)
898 1.1 matt goto bail;
899 1.15 matt if ((sc->sc_flags & GE_RXACTIVE) == 0) {
900 1.1 matt rxq = &sc->sc_rxq[GE_RXPRIO_LO];
901 1.1 matt GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr);
902 1.1 matt GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr);
903 1.1 matt sc->sc_intrmask |= rxq->rxq_intrbits;
904 1.1 matt }
905 1.1 matt
906 1.1 matt bail:
907 1.1 matt GE_FUNC_EXIT(sc, "");
908 1.1 matt return error;
909 1.1 matt }
910 1.1 matt
911 1.1 matt void
912 1.1 matt gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio)
913 1.15 matt {
914 1.1 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio];
915 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_cleanup");
916 1.1 matt if (rxq == NULL) {
917 1.1 matt GE_FUNC_EXIT(sc, "");
918 1.1 matt return;
919 1.1 matt }
920 1.1 matt
921 1.1 matt if (rxq->rxq_curpkt)
922 1.15 matt m_freem(rxq->rxq_curpkt);
923 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) {
924 1.15 matt gfe_dmamem_free(sc, &rxq->rxq_desc_mem);
925 1.15 matt gfe_dmamem_free(sc, &rxq->rxq_buf_mem);
926 1.1 matt }
927 1.1 matt GE_FUNC_EXIT(sc, "");
928 1.1 matt }
929 1.1 matt
930 1.1 matt void
931 1.1 matt gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op)
932 1.1 matt {
933 1.1 matt GE_FUNC_ENTER(sc, "gfe_rx_stop");
934 1.1 matt sc->sc_flags &= ~GE_RXACTIVE;
935 1.1 matt sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError);
936 1.1 matt sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError);
937 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
938 1.1 matt GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR);
939 1.1 matt do {
940 1.1 matt delay(10);
941 1.1 matt } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR);
942 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_HI);
943 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI);
944 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO);
945 1.1 matt gfe_rx_cleanup(sc, GE_RXPRIO_LO);
946 1.1 matt GE_FUNC_EXIT(sc, "");
947 1.1 matt }
948 1.1 matt
949 1.1 matt void
951 1.1 matt gfe_tick(void *arg)
952 1.1 matt {
953 1.1 matt struct gfe_softc * const sc = arg;
954 1.1 matt uint32_t intrmask;
955 1.1 matt unsigned int tickflags;
956 1.1 matt int s;
957 1.1 matt
958 1.1 matt GE_FUNC_ENTER(sc, "gfe_tick");
959 1.1 matt
960 1.1 matt s = splnet();
961 1.1 matt
962 1.1 matt tickflags = sc->sc_tickflags;
963 1.1 matt sc->sc_tickflags = 0;
964 1.1 matt intrmask = sc->sc_intrmask;
965 1.1 matt if (tickflags & GE_TICK_TX_IFSTART)
966 1.1 matt gfe_ifstart(&sc->sc_ec.ec_if);
967 1.1 matt if (tickflags & GE_TICK_RX_RESTART) {
968 1.15 matt intrmask |= sc->sc_idlemask;
969 1.1 matt if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) {
970 1.1 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI];
971 1.1 matt rxq->rxq_fi = 0;
972 1.1 matt GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr);
973 1.1 matt GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr);
974 1.15 matt }
975 1.1 matt if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) {
976 1.1 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI];
977 1.1 matt rxq->rxq_fi = 0;
978 1.1 matt GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr);
979 1.1 matt GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr);
980 1.15 matt }
981 1.1 matt if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) {
982 1.1 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO];
983 1.1 matt rxq->rxq_fi = 0;
984 1.1 matt GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr);
985 1.1 matt GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr);
986 1.15 matt }
987 1.1 matt if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) {
988 1.1 matt struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO];
989 1.1 matt rxq->rxq_fi = 0;
990 1.1 matt GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr);
991 1.1 matt GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr);
992 1.1 matt }
993 1.1 matt sc->sc_idlemask = 0;
994 1.1 matt }
995 1.1 matt if (intrmask != sc->sc_intrmask) {
996 1.1 matt sc->sc_intrmask = intrmask;
997 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
998 1.1 matt }
999 1.1 matt gfe_intr(sc);
1000 1.1 matt splx(s);
1001 1.1 matt
1002 1.1 matt GE_FUNC_EXIT(sc, "");
1003 1.1 matt }
1004 1.1 matt
1005 1.1 matt int
1006 1.5 matt gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio)
1007 1.5 matt {
1008 1.15 matt const int dcache_line_size = curcpu()->ci_ci.dcache_line_size;
1009 1.1 matt struct ifnet * const ifp = &sc->sc_ec.ec_if;
1010 1.1 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1011 1.9 matt volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo];
1012 1.1 matt uint32_t intrmask = sc->sc_intrmask;
1013 1.1 matt size_t buflen;
1014 1.1 matt struct mbuf *m;
1015 1.1 matt
1016 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_enqueue");
1017 1.13 scw
1018 1.13 scw /*
1019 1.1 matt * Anything in the pending queue to enqueue? if not, punt. Likewise
1020 1.1 matt * if the txq is not yet created.
1021 1.13 scw * otherwise grab its dmamap.
1022 1.1 matt */
1023 1.1 matt if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) {
1024 1.1 matt GE_FUNC_EXIT(sc, "-");
1025 1.1 matt return 0;
1026 1.1 matt }
1027 1.1 matt
1028 1.1 matt /*
1029 1.1 matt * Have we [over]consumed our limit of descriptors?
1030 1.6 matt * Do we have enough free descriptors?
1031 1.1 matt */
1032 1.1 matt if (GE_TXDESC_MAX == txq->txq_nactive + 2) {
1033 1.1 matt volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi];
1034 1.6 matt uint32_t cmdsts;
1035 1.2 matt size_t pktlen;
1036 1.1 matt GE_TXDPOSTSYNC(sc, txq, txq->txq_fi);
1037 1.6 matt cmdsts = gt32toh(txd2->ed_cmdsts);
1038 1.6 matt if (cmdsts & TX_CMD_O) {
1039 1.6 matt int nextin;
1040 1.6 matt /*
1041 1.6 matt * Sometime the Discovery forgets to update the
1042 1.6 matt * last descriptor. See if we own the descriptor
1043 1.6 matt * after it (since we know we've turned that to
1044 1.6 matt * the discovery and if we owned it, the Discovery
1045 1.6 matt * gave it back). If we do, we know the Discovery
1046 1.6 matt * gave back this one but forgot to mark it as ours.
1047 1.6 matt */
1048 1.6 matt nextin = txq->txq_fi + 1;
1049 1.6 matt if (nextin == GE_TXDESC_MAX)
1050 1.6 matt nextin = 0;
1051 1.6 matt GE_TXDPOSTSYNC(sc, txq, nextin);
1052 1.6 matt if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) {
1053 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi);
1054 1.6 matt GE_TXDPRESYNC(sc, txq, nextin);
1055 1.6 matt GE_FUNC_EXIT(sc, "@");
1056 1.6 matt return 0;
1057 1.6 matt }
1058 1.29 cegger #ifdef DEBUG
1059 1.6 matt printf("%s: txenqueue: transmitter resynced at %d\n",
1060 1.1 matt device_xname(&sc->sc_dev), txq->txq_fi);
1061 1.1 matt #endif
1062 1.1 matt }
1063 1.2 matt if (++txq->txq_fi == GE_TXDESC_MAX)
1064 1.2 matt txq->txq_fi = 0;
1065 1.5 matt txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr;
1066 1.1 matt pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff;
1067 1.1 matt txq->txq_inptr += roundup(pktlen, dcache_line_size);
1068 1.1 matt txq->txq_nactive--;
1069 1.5 matt
1070 1.1 matt /* statistics */
1071 1.5 matt ifp->if_opackets++;
1072 1.1 matt if (cmdsts & TX_STS_ES)
1073 1.1 matt ifp->if_oerrors++;
1074 1.1 matt GE_DPRINTF(sc, ("%%"));
1075 1.9 matt }
1076 1.9 matt
1077 1.1 matt buflen = roundup(m->m_pkthdr.len, dcache_line_size);
1078 1.1 matt
1079 1.1 matt /*
1080 1.1 matt * If this packet would wrap around the end of the buffer, reset back
1081 1.9 matt * to the beginning.
1082 1.1 matt */
1083 1.1 matt if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) {
1084 1.1 matt txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr;
1085 1.1 matt txq->txq_outptr = 0;
1086 1.1 matt }
1087 1.1 matt
1088 1.1 matt /*
1089 1.1 matt * Make sure the output packet doesn't run over the beginning of
1090 1.5 matt * what we've already given the GT.
1091 1.9 matt */
1092 1.1 matt if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr &&
1093 1.1 matt txq->txq_outptr + buflen > txq->txq_inptr) {
1094 1.1 matt intrmask |= txq->txq_intrbits &
1095 1.1 matt (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow);
1096 1.1 matt if (sc->sc_intrmask != intrmask) {
1097 1.1 matt sc->sc_intrmask = intrmask;
1098 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
1099 1.1 matt }
1100 1.1 matt GE_FUNC_EXIT(sc, "#");
1101 1.1 matt return 0;
1102 1.16 perry }
1103 1.1 matt
1104 1.1 matt /*
1105 1.1 matt * The end-of-list descriptor we put on last time is the starting point
1106 1.1 matt * for this packet. The GT is supposed to terminate list processing on
1107 1.1 matt * a NULL nxtptr but that currently is broken so a CPU-owned descriptor
1108 1.1 matt * must terminate the list.
1109 1.1 matt */
1110 1.1 matt intrmask = sc->sc_intrmask;
1111 1.22 he
1112 1.1 matt m_copydata(m, 0, m->m_pkthdr.len,
1113 1.9 matt (char *)txq->txq_buf_mem.gdm_kva + (int)txq->txq_outptr);
1114 1.2 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map,
1115 1.2 matt txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE);
1116 1.6 matt txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr);
1117 1.2 matt txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16);
1118 1.1 matt GE_TXDPRESYNC(sc, txq, txq->txq_lo);
1119 1.1 matt
1120 1.1 matt /*
1121 1.1 matt * Request a buffer interrupt every 2/3 of the way thru the transmit
1122 1.9 matt * buffer.
1123 1.1 matt */
1124 1.2 matt txq->txq_ei_gapcount += buflen;
1125 1.1 matt if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) {
1126 1.1 matt txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI);
1127 1.2 matt txq->txq_ei_gapcount = 0;
1128 1.1 matt } else {
1129 1.2 matt txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST);
1130 1.2 matt }
1131 1.2 matt #if 0
1132 1.2 matt GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo,
1133 1.2 matt ((unsigned long *)txd)[0], ((unsigned long *)txd)[1],
1134 1.6 matt ((unsigned long *)txd)[2], ((unsigned long *)txd)[3]));
1135 1.1 matt #endif
1136 1.9 matt GE_TXDPRESYNC(sc, txq, txq->txq_lo);
1137 1.1 matt
1138 1.1 matt txq->txq_outptr += buflen;
1139 1.1 matt /*
1140 1.1 matt * Tell the SDMA engine to "Fetch!"
1141 1.1 matt */
1142 1.1 matt GE_WRITE(sc, ESDCMR,
1143 1.1 matt txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL));
1144 1.1 matt
1145 1.1 matt GE_DPRINTF(sc, ("(%d)", txq->txq_lo));
1146 1.1 matt
1147 1.1 matt /*
1148 1.5 matt * Update the last out appropriately.
1149 1.1 matt */
1150 1.1 matt txq->txq_nactive++;
1151 1.1 matt if (++txq->txq_lo == GE_TXDESC_MAX)
1152 1.1 matt txq->txq_lo = 0;
1153 1.1 matt
1154 1.1 matt /*
1155 1.1 matt * Move mbuf from the pending queue to the snd queue.
1156 1.35 joerg */
1157 1.1 matt IF_DEQUEUE(&txq->txq_pendq, m);
1158 1.5 matt bpf_mtap(ifp, m);
1159 1.1 matt m_freem(m);
1160 1.1 matt ifp->if_flags &= ~IFF_OACTIVE;
1161 1.1 matt
1162 1.1 matt /*
1163 1.1 matt * Since we have put an item into the packet queue, we now want
1164 1.1 matt * an interrupt when the transmit queue finishes processing the
1165 1.1 matt * list. But only update the mask if needs changing.
1166 1.1 matt */
1167 1.1 matt intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow);
1168 1.1 matt if (sc->sc_intrmask != intrmask) {
1169 1.1 matt sc->sc_intrmask = intrmask;
1170 1.5 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
1171 1.5 matt }
1172 1.1 matt if (ifp->if_timer == 0)
1173 1.1 matt ifp->if_timer = 5;
1174 1.1 matt GE_FUNC_EXIT(sc, "*");
1175 1.1 matt return 1;
1176 1.1 matt }
1177 1.1 matt
1178 1.1 matt uint32_t
1179 1.15 matt gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask)
1180 1.5 matt {
1181 1.1 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1182 1.1 matt struct ifnet * const ifp = &sc->sc_ec.ec_if;
1183 1.1 matt
1184 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_done");
1185 1.1 matt
1186 1.1 matt if (txq == NULL) {
1187 1.1 matt GE_FUNC_EXIT(sc, "");
1188 1.1 matt return intrmask;
1189 1.1 matt }
1190 1.5 matt
1191 1.2 matt while (txq->txq_nactive > 0) {
1192 1.1 matt const int dcache_line_size = curcpu()->ci_ci.dcache_line_size;
1193 1.1 matt volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi];
1194 1.1 matt uint32_t cmdsts;
1195 1.6 matt size_t pktlen;
1196 1.2 matt
1197 1.6 matt GE_TXDPOSTSYNC(sc, txq, txq->txq_fi);
1198 1.6 matt if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) {
1199 1.6 matt int nextin;
1200 1.6 matt
1201 1.6 matt if (txq->txq_nactive == 1) {
1202 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi);
1203 1.6 matt GE_FUNC_EXIT(sc, "");
1204 1.1 matt return intrmask;
1205 1.6 matt }
1206 1.6 matt /*
1207 1.6 matt * Sometimes the Discovery forgets to update the
1208 1.6 matt * ownership bit in the descriptor. See if we own the
1209 1.6 matt * descriptor after it (since we know we've turned
1210 1.6 matt * that to the Discovery and if we own it now then the
1211 1.6 matt * Discovery gave it back). If we do, we know the
1212 1.1 matt * Discovery gave back this one but forgot to mark it
1213 1.6 matt * as ours.
1214 1.6 matt */
1215 1.6 matt nextin = txq->txq_fi + 1;
1216 1.6 matt if (nextin == GE_TXDESC_MAX)
1217 1.6 matt nextin = 0;
1218 1.6 matt GE_TXDPOSTSYNC(sc, txq, nextin);
1219 1.6 matt if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) {
1220 1.6 matt GE_TXDPRESYNC(sc, txq, txq->txq_fi);
1221 1.6 matt GE_TXDPRESYNC(sc, txq, nextin);
1222 1.1 matt GE_FUNC_EXIT(sc, "");
1223 1.6 matt return intrmask;
1224 1.6 matt }
1225 1.29 cegger #ifdef DEBUG
1226 1.1 matt printf("%s: txdone: transmitter resynced at %d\n",
1227 1.1 matt device_xname(&sc->sc_dev), txq->txq_fi);
1228 1.2 matt #endif
1229 1.2 matt }
1230 1.2 matt #if 0
1231 1.2 matt GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)",
1232 1.2 matt txq->txq_lo,
1233 1.2 matt ((unsigned long *)txd)[0], ((unsigned long *)txd)[1],
1234 1.1 matt ((unsigned long *)txd)[2], ((unsigned long *)txd)[3]));
1235 1.1 matt #endif
1236 1.1 matt GE_DPRINTF(sc, ("(%d)", txq->txq_fi));
1237 1.2 matt if (++txq->txq_fi == GE_TXDESC_MAX)
1238 1.2 matt txq->txq_fi = 0;
1239 1.2 matt txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr;
1240 1.2 matt pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff;
1241 1.10 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map,
1242 1.1 matt txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE);
1243 1.1 matt txq->txq_inptr += roundup(pktlen, dcache_line_size);
1244 1.5 matt
1245 1.1 matt /* statistics */
1246 1.5 matt ifp->if_opackets++;
1247 1.1 matt if (cmdsts & TX_STS_ES)
1248 1.6 matt ifp->if_oerrors++;
1249 1.1 matt
1250 1.5 matt /* txd->ed_bufptr = 0; */
1251 1.1 matt
1252 1.1 matt ifp->if_timer = 5;
1253 1.1 matt --txq->txq_nactive;
1254 1.1 matt }
1255 1.29 cegger if (txq->txq_nactive != 0)
1256 1.5 matt panic("%s: transmit fifo%d empty but active count (%d) > 0!",
1257 1.1 matt device_xname(&sc->sc_dev), txprio, txq->txq_nactive);
1258 1.1 matt ifp->if_timer = 0;
1259 1.1 matt intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow));
1260 1.1 matt intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow));
1261 1.1 matt GE_FUNC_EXIT(sc, "");
1262 1.1 matt return intrmask;
1263 1.1 matt }
1264 1.15 matt
1265 1.15 matt int
1266 1.15 matt gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio)
1267 1.15 matt {
1268 1.15 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1269 1.15 matt int error;
1270 1.15 matt
1271 1.15 matt GE_FUNC_ENTER(sc, "gfe_tx_txqalloc");
1272 1.15 matt
1273 1.15 matt error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1,
1274 1.15 matt GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE);
1275 1.15 matt if (error) {
1276 1.15 matt GE_FUNC_EXIT(sc, "");
1277 1.15 matt return error;
1278 1.15 matt }
1279 1.15 matt error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0);
1280 1.15 matt if (error) {
1281 1.15 matt gfe_dmamem_free(sc, &txq->txq_desc_mem);
1282 1.15 matt GE_FUNC_EXIT(sc, "");
1283 1.15 matt return error;
1284 1.15 matt }
1285 1.15 matt GE_FUNC_EXIT(sc, "");
1286 1.15 matt return 0;
1287 1.15 matt }
1288 1.1 matt
1289 1.1 matt int
1290 1.15 matt gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio)
1291 1.1 matt {
1292 1.1 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1293 1.1 matt volatile struct gt_eth_desc *txd;
1294 1.1 matt unsigned int i;
1295 1.1 matt bus_addr_t addr;
1296 1.1 matt
1297 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_start");
1298 1.1 matt
1299 1.1 matt sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh|
1300 1.15 matt ETH_IR_TxEndLow |ETH_IR_TxBufferLow);
1301 1.15 matt
1302 1.15 matt if (sc->sc_flags & GE_NOFREE) {
1303 1.15 matt KASSERT(txq->txq_desc_mem.gdm_kva != NULL);
1304 1.15 matt KASSERT(txq->txq_buf_mem.gdm_kva != NULL);
1305 1.1 matt } else {
1306 1.15 matt int error = gfe_tx_txqalloc(sc, txprio);
1307 1.1 matt if (error) {
1308 1.1 matt GE_FUNC_EXIT(sc, "!");
1309 1.1 matt return error;
1310 1.1 matt }
1311 1.1 matt }
1312 1.1 matt
1313 1.1 matt txq->txq_descs =
1314 1.1 matt (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva;
1315 1.1 matt txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr;
1316 1.1 matt txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr;
1317 1.1 matt
1318 1.1 matt txq->txq_pendq.ifq_maxlen = 10;
1319 1.1 matt txq->txq_ei_gapcount = 0;
1320 1.1 matt txq->txq_nactive = 0;
1321 1.1 matt txq->txq_fi = 0;
1322 1.1 matt txq->txq_lo = 0;
1323 1.1 matt txq->txq_inptr = GE_TXBUF_SIZE;
1324 1.1 matt txq->txq_outptr = 0;
1325 1.1 matt for (i = 0, txd = txq->txq_descs,
1326 1.1 matt addr = txq->txq_desc_busaddr + sizeof(*txd);
1327 1.1 matt i < GE_TXDESC_MAX - 1;
1328 1.1 matt i++, txd++, addr += sizeof(*txd)) {
1329 1.1 matt /*
1330 1.1 matt * update the nxtptr to point to the next txd.
1331 1.2 matt */
1332 1.1 matt txd->ed_cmdsts = 0;
1333 1.1 matt txd->ed_nxtptr = htogt32(addr);
1334 1.2 matt }
1335 1.1 matt txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr =
1336 1.15 matt htogt32(txq->txq_desc_busaddr);
1337 1.1 matt bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0,
1338 1.1 matt GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1339 1.1 matt
1340 1.1 matt switch (txprio) {
1341 1.1 matt case GE_TXPRIO_HI:
1342 1.1 matt txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh;
1343 1.1 matt txq->txq_esdcmrbits = ETH_ESDCMR_TXDH;
1344 1.1 matt txq->txq_epsrbits = ETH_EPSR_TxHigh;
1345 1.1 matt txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno);
1346 1.1 matt GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr);
1347 1.1 matt break;
1348 1.1 matt
1349 1.1 matt case GE_TXPRIO_LO:
1350 1.1 matt txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow;
1351 1.1 matt txq->txq_esdcmrbits = ETH_ESDCMR_TXDL;
1352 1.1 matt txq->txq_epsrbits = ETH_EPSR_TxLow;
1353 1.1 matt txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno);
1354 1.1 matt GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr);
1355 1.1 matt break;
1356 1.1 matt
1357 1.1 matt case GE_TXPRIO_NONE:
1358 1.1 matt break;
1359 1.1 matt }
1360 1.18 thorpej #if 0
1361 1.18 thorpej GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp));
1362 1.1 matt gt_write(device_parent(&sc->sc_dev), txq->txq_ectdp,
1363 1.1 matt txq->txq_desc_busaddr);
1364 1.1 matt GE_DPRINTF(sc, (")"));
1365 1.1 matt #endif
1366 1.1 matt
1367 1.1 matt /*
1368 1.1 matt * If we are restarting, there may be packets in the pending queue
1369 1.1 matt * waiting to be enqueued. Try enqueuing packets from both priority
1370 1.1 matt * queues until the pending queue is empty or there no room for them
1371 1.1 matt * on the device.
1372 1.1 matt */
1373 1.1 matt while (gfe_tx_enqueue(sc, txprio))
1374 1.1 matt continue;
1375 1.1 matt
1376 1.1 matt GE_FUNC_EXIT(sc, "");
1377 1.1 matt return 0;
1378 1.1 matt }
1379 1.1 matt
1380 1.1 matt void
1381 1.15 matt gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush)
1382 1.1 matt {
1383 1.1 matt struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1384 1.1 matt
1385 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_cleanup");
1386 1.1 matt if (txq == NULL) {
1387 1.1 matt GE_FUNC_EXIT(sc, "");
1388 1.1 matt return;
1389 1.1 matt }
1390 1.1 matt
1391 1.1 matt if (!flush) {
1392 1.1 matt GE_FUNC_EXIT(sc, "");
1393 1.1 matt return;
1394 1.15 matt }
1395 1.15 matt
1396 1.15 matt if ((sc->sc_flags & GE_NOFREE) == 0) {
1397 1.15 matt gfe_dmamem_free(sc, &txq->txq_desc_mem);
1398 1.1 matt gfe_dmamem_free(sc, &txq->txq_buf_mem);
1399 1.1 matt }
1400 1.1 matt GE_FUNC_EXIT(sc, "-F");
1401 1.1 matt }
1402 1.1 matt
1403 1.1 matt void
1404 1.1 matt gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op)
1405 1.1 matt {
1406 1.1 matt GE_FUNC_ENTER(sc, "gfe_tx_stop");
1407 1.1 matt
1408 1.1 matt GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL);
1409 1.1 matt
1410 1.1 matt sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask);
1411 1.1 matt sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask);
1412 1.1 matt sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh|
1413 1.1 matt ETH_IR_TxEndLow |ETH_IR_TxBufferLow);
1414 1.1 matt
1415 1.1 matt gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP);
1416 1.1 matt gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP);
1417 1.1 matt
1418 1.1 matt sc->sc_ec.ec_if.if_timer = 0;
1419 1.1 matt GE_FUNC_EXIT(sc, "");
1420 1.1 matt }
1421 1.1 matt
1422 1.1 matt int
1424 1.1 matt gfe_intr(void *arg)
1425 1.1 matt {
1426 1.1 matt struct gfe_softc * const sc = arg;
1427 1.1 matt uint32_t cause;
1428 1.1 matt uint32_t intrmask = sc->sc_intrmask;
1429 1.1 matt int claim = 0;
1430 1.1 matt int cnt;
1431 1.1 matt
1432 1.1 matt GE_FUNC_ENTER(sc, "gfe_intr");
1433 1.1 matt
1434 1.1 matt for (cnt = 0; cnt < 4; cnt++) {
1435 1.1 matt if (sc->sc_intrmask != intrmask) {
1436 1.1 matt sc->sc_intrmask = intrmask;
1437 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
1438 1.1 matt }
1439 1.1 matt cause = GE_READ(sc, EICR);
1440 1.1 matt cause &= sc->sc_intrmask;
1441 1.1 matt GE_DPRINTF(sc, (".%#x", cause));
1442 1.1 matt if (cause == 0)
1443 1.1 matt break;
1444 1.1 matt
1445 1.1 matt claim = 1;
1446 1.1 matt
1447 1.1 matt GE_WRITE(sc, EICR, ~cause);
1448 1.1 matt #ifndef GE_NORX
1449 1.1 matt if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError))
1450 1.1 matt intrmask = gfe_rx_process(sc, cause, intrmask);
1451 1.1 matt #endif
1452 1.1 matt
1453 1.1 matt #ifndef GE_NOTX
1454 1.1 matt if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh))
1455 1.1 matt intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask);
1456 1.1 matt if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow))
1457 1.1 matt intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask);
1458 1.1 matt #endif
1459 1.1 matt if (cause & ETH_IR_MIIPhySTC) {
1460 1.1 matt sc->sc_flags |= GE_PHYSTSCHG;
1461 1.13 scw /* intrmask &= ~ETH_IR_MIIPhySTC; */
1462 1.13 scw }
1463 1.13 scw }
1464 1.13 scw
1465 1.13 scw while (gfe_tx_enqueue(sc, GE_TXPRIO_HI))
1466 1.1 matt continue;
1467 1.1 matt while (gfe_tx_enqueue(sc, GE_TXPRIO_LO))
1468 1.1 matt continue;
1469 1.1 matt
1470 1.1 matt GE_FUNC_EXIT(sc, "");
1471 1.1 matt return claim;
1472 1.33 cegger }
1473 1.1 matt
1474 1.18 thorpej int
1476 1.1 matt gfe_mii_read (device_t self, int phy, int reg)
1477 1.1 matt {
1478 1.33 cegger return gt_mii_read(self, device_parent(self), phy, reg);
1479 1.1 matt }
1480 1.18 thorpej
1481 1.1 matt void
1482 1.1 matt gfe_mii_write (device_t self, int phy, int reg, int value)
1483 1.1 matt {
1484 1.33 cegger gt_mii_write(self, device_parent(self), phy, reg, value);
1485 1.1 matt }
1486 1.20 thorpej
1487 1.1 matt void
1488 1.1 matt gfe_mii_statchg (device_t self)
1489 1.1 matt {
1490 1.1 matt /* struct gfe_softc *sc = device_private(self); */
1491 1.1 matt /* do nothing? */
1492 1.1 matt }
1493 1.1 matt
1494 1.1 matt int
1496 1.1 matt gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op)
1497 1.1 matt {
1498 1.1 matt int error = 0;
1499 1.1 matt GE_FUNC_ENTER(sc, "gfe_whack");
1500 1.1 matt
1501 1.1 matt switch (op) {
1502 1.1 matt case GE_WHACK_RESTART:
1503 1.1 matt #ifndef GE_NOTX
1504 1.1 matt gfe_tx_stop(sc, op);
1505 1.1 matt #endif
1506 1.1 matt /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */
1507 1.1 matt /* FALLTHROUGH */
1508 1.1 matt case GE_WHACK_START:
1509 1.1 matt #ifndef GE_NOHASH
1510 1.1 matt if (error == 0 && sc->sc_hashtable == NULL) {
1511 1.1 matt error = gfe_hash_alloc(sc);
1512 1.1 matt if (error)
1513 1.1 matt break;
1514 1.1 matt }
1515 1.1 matt if (op != GE_WHACK_RESTART)
1516 1.1 matt gfe_hash_fill(sc);
1517 1.1 matt #endif
1518 1.1 matt #ifndef GE_NORX
1519 1.1 matt if (op != GE_WHACK_RESTART) {
1520 1.1 matt error = gfe_rx_prime(sc);
1521 1.1 matt if (error)
1522 1.1 matt break;
1523 1.1 matt }
1524 1.1 matt #endif
1525 1.1 matt #ifndef GE_NOTX
1526 1.1 matt error = gfe_tx_start(sc, GE_TXPRIO_HI);
1527 1.1 matt if (error)
1528 1.1 matt break;
1529 1.1 matt #endif
1530 1.1 matt sc->sc_ec.ec_if.if_flags |= IFF_RUNNING;
1531 1.1 matt GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN);
1532 1.1 matt GE_WRITE(sc, EPCXR, sc->sc_pcxr);
1533 1.1 matt GE_WRITE(sc, EICR, 0);
1534 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
1535 1.1 matt #ifndef GE_NOHASH
1536 1.1 matt GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr);
1537 1.1 matt #endif
1538 1.1 matt #ifndef GE_NORX
1539 1.1 matt GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD);
1540 1.1 matt sc->sc_flags |= GE_RXACTIVE;
1541 1.1 matt #endif
1542 1.1 matt /* FALLTHROUGH */
1543 1.1 matt case GE_WHACK_CHANGE:
1544 1.2 matt GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)",
1545 1.2 matt GE_READ(sc, EPCR), GE_READ(sc, EIMR)));
1546 1.2 matt GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN);
1547 1.1 matt GE_WRITE(sc, EIMR, sc->sc_intrmask);
1548 1.1 matt gfe_ifstart(&sc->sc_ec.ec_if);
1549 1.1 matt GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)",
1550 1.1 matt GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1)));
1551 1.1 matt GE_FUNC_EXIT(sc, "");
1552 1.1 matt return error;
1553 1.1 matt case GE_WHACK_STOP:
1554 1.1 matt break;
1555 1.1 matt }
1556 1.1 matt
1557 1.1 matt #ifdef GE_DEBUG
1558 1.1 matt if (error)
1559 1.1 matt GE_DPRINTF(sc, (" failed: %d\n", error));
1560 1.1 matt #endif
1561 1.1 matt GE_WRITE(sc, EPCR, sc->sc_pcr);
1562 1.1 matt GE_WRITE(sc, EIMR, 0);
1563 1.1 matt sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING;
1564 1.1 matt #ifndef GE_NOTX
1565 1.1 matt gfe_tx_stop(sc, GE_WHACK_STOP);
1566 1.15 matt #endif
1567 1.15 matt #ifndef GE_NORX
1568 1.15 matt gfe_rx_stop(sc, GE_WHACK_STOP);
1569 1.15 matt #endif
1570 1.1 matt #ifndef GE_NOHASH
1571 1.1 matt if ((sc->sc_flags & GE_NOFREE) == 0) {
1572 1.1 matt gfe_dmamem_free(sc, &sc->sc_hash_mem);
1573 1.1 matt sc->sc_hashtable = NULL;
1574 1.1 matt }
1575 1.1 matt #endif
1576 1.1 matt
1577 1.1 matt GE_FUNC_EXIT(sc, "");
1578 1.1 matt return error;
1579 1.1 matt }
1580 1.1 matt
1581 1.1 matt int
1583 1.1 matt gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN])
1584 1.1 matt {
1585 1.1 matt uint32_t w0, add0, add1;
1586 1.1 matt uint32_t result;
1587 1.1 matt
1588 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_compute");
1589 1.1 matt add0 = ((uint32_t) eaddr[5] << 0) |
1590 1.1 matt ((uint32_t) eaddr[4] << 8) |
1591 1.1 matt ((uint32_t) eaddr[3] << 16);
1592 1.1 matt
1593 1.1 matt add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4);
1594 1.1 matt add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2);
1595 1.1 matt add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1);
1596 1.1 matt
1597 1.1 matt add1 = ((uint32_t) eaddr[2] << 0) |
1598 1.1 matt ((uint32_t) eaddr[1] << 8) |
1599 1.1 matt ((uint32_t) eaddr[0] << 16);
1600 1.1 matt
1601 1.1 matt add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4);
1602 1.1 matt add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2);
1603 1.1 matt add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1);
1604 1.1 matt
1605 1.1 matt GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr)));
1606 1.1 matt /*
1607 1.1 matt * hashResult is the 15 bits Hash entry address.
1608 1.1 matt * ethernetADD is a 48 bit number, which is derived from the Ethernet
1609 1.1 matt * MAC address, by nibble swapping in every byte (i.e MAC address
1610 1.1 matt * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb).
1611 1.1 matt */
1612 1.1 matt
1613 1.1 matt if ((sc->sc_pcr & ETH_EPCR_HM) == 0) {
1614 1.1 matt /*
1615 1.1 matt * hashResult[14:0] = hashFunc0(ethernetADD[47:0])
1616 1.1 matt *
1617 1.1 matt * hashFunc0 calculates the hashResult in the following manner:
1618 1.1 matt * hashResult[ 8:0] = ethernetADD[14:8,1,0]
1619 1.1 matt * XOR ethernetADD[23:15] XOR ethernetADD[32:24]
1620 1.1 matt */
1621 1.1 matt result = (add0 & 3) | ((add0 >> 6) & ~3);
1622 1.1 matt result ^= (add0 >> 15) ^ (add1 >> 0);
1623 1.1 matt result &= 0x1ff;
1624 1.1 matt /*
1625 1.1 matt * hashResult[14:9] = ethernetADD[7:2]
1626 1.1 matt */
1627 1.1 matt result |= (add0 & ~3) << 7; /* excess bits will be masked */
1628 1.1 matt GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff));
1629 1.1 matt } else {
1630 1.1 matt #define TRIBITFLIP 073516240 /* yes its in octal */
1631 1.1 matt /*
1632 1.1 matt * hashResult[14:0] = hashFunc1(ethernetADD[47:0])
1633 1.1 matt *
1634 1.1 matt * hashFunc1 calculates the hashResult in the following manner:
1635 1.1 matt * hashResult[08:00] = ethernetADD[06:14]
1636 1.1 matt * XOR ethernetADD[15:23] XOR ethernetADD[24:32]
1637 1.1 matt */
1638 1.1 matt w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff;
1639 1.1 matt /*
1640 1.1 matt * Now bitswap those 9 bits
1641 1.1 matt */
1642 1.1 matt result = 0;
1643 1.1 matt result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6;
1644 1.1 matt result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3;
1645 1.1 matt result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0;
1646 1.1 matt
1647 1.1 matt /*
1648 1.1 matt * hashResult[14:09] = ethernetADD[00:05]
1649 1.1 matt */
1650 1.1 matt result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12;
1651 1.1 matt result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9;
1652 1.1 matt GE_DPRINTF(sc, ("1(%#x)", result));
1653 1.1 matt }
1654 1.6 matt GE_FUNC_EXIT(sc, "");
1655 1.1 matt return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff);
1656 1.1 matt }
1657 1.1 matt
1658 1.1 matt int
1659 1.1 matt gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op,
1660 1.1 matt enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN])
1661 1.1 matt {
1662 1.1 matt uint64_t he;
1663 1.1 matt uint64_t *maybe_he_p = NULL;
1664 1.1 matt int limit;
1665 1.1 matt int hash;
1666 1.1 matt int maybe_hash = 0;
1667 1.29 cegger
1668 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_entry_op");
1669 1.1 matt
1670 1.1 matt hash = gfe_hash_compute(sc, eaddr);
1671 1.1 matt
1672 1.1 matt if (sc->sc_hashtable == NULL) {
1673 1.1 matt panic("%s:%d: hashtable == NULL!", device_xname(&sc->sc_dev),
1674 1.1 matt __LINE__);
1675 1.1 matt }
1676 1.1 matt
1677 1.1 matt /*
1678 1.1 matt * Assume we are going to insert so create the hash entry we
1679 1.1 matt * are going to insert. We also use it to match entries we
1680 1.1 matt * will be removing.
1681 1.1 matt */
1682 1.1 matt he = ((uint64_t) eaddr[5] << 43) |
1683 1.1 matt ((uint64_t) eaddr[4] << 35) |
1684 1.1 matt ((uint64_t) eaddr[3] << 27) |
1685 1.1 matt ((uint64_t) eaddr[2] << 19) |
1686 1.1 matt ((uint64_t) eaddr[1] << 11) |
1687 1.1 matt ((uint64_t) eaddr[0] << 3) |
1688 1.1 matt HSH_PRIO_INS(prio) | HSH_V | HSH_R;
1689 1.1 matt
1690 1.1 matt /*
1691 1.16 perry * The GT will search upto 12 entries for a hit, so we must mimic that.
1692 1.1 matt */
1693 1.1 matt hash &= sc->sc_hashmask / sizeof(he);
1694 1.1 matt for (limit = HSH_LIMIT; limit > 0 ; --limit) {
1695 1.1 matt /*
1696 1.1 matt * Does the GT wrap at the end, stop at the, or overrun the
1697 1.1 matt * end? Assume it wraps for now. Stash a copy of the
1698 1.1 matt * current hash entry.
1699 1.1 matt */
1700 1.1 matt uint64_t *he_p = &sc->sc_hashtable[hash];
1701 1.1 matt uint64_t thishe = *he_p;
1702 1.1 matt
1703 1.1 matt /*
1704 1.1 matt * If the hash entry isn't valid, that break the chain. And
1705 1.1 matt * this entry a good candidate for reuse.
1706 1.1 matt */
1707 1.1 matt if ((thishe & HSH_V) == 0) {
1708 1.1 matt maybe_he_p = he_p;
1709 1.1 matt break;
1710 1.1 matt }
1711 1.1 matt
1712 1.1 matt /*
1713 1.1 matt * If the hash entry has the same address we are looking for
1714 1.1 matt * then ... if we are removing and the skip bit is set, its
1715 1.1 matt * already been removed. if are adding and the skip bit is
1716 1.1 matt * clear, then its already added. In either return EBUSY
1717 1.1 matt * indicating the op has already been done. Otherwise flip
1718 1.1 matt * the skip bit and return 0.
1719 1.1 matt */
1720 1.2 matt if (((he ^ thishe) & HSH_ADDR_MASK) == 0) {
1721 1.2 matt if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) ||
1722 1.1 matt ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0))
1723 1.1 matt return EBUSY;
1724 1.1 matt *he_p = thishe ^ HSH_S;
1725 1.1 matt bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map,
1726 1.1 matt hash * sizeof(he), sizeof(he),
1727 1.1 matt BUS_DMASYNC_PREWRITE);
1728 1.1 matt GE_FUNC_EXIT(sc, "^");
1729 1.1 matt return 0;
1730 1.1 matt }
1731 1.1 matt
1732 1.1 matt /*
1733 1.1 matt * If we haven't found a slot for the entry and this entry
1734 1.16 perry * is currently being skipped, return this entry.
1735 1.1 matt */
1736 1.1 matt if (maybe_he_p == NULL && (thishe & HSH_S)) {
1737 1.1 matt maybe_he_p = he_p;
1738 1.1 matt maybe_hash = hash;
1739 1.1 matt }
1740 1.1 matt
1741 1.1 matt hash = (hash + 1) & (sc->sc_hashmask / sizeof(he));
1742 1.1 matt }
1743 1.1 matt
1744 1.1 matt /*
1745 1.1 matt * If we got here, then there was no entry to remove.
1746 1.1 matt */
1747 1.1 matt if (op == GE_HASH_REMOVE) {
1748 1.1 matt GE_FUNC_EXIT(sc, "?");
1749 1.1 matt return ENOENT;
1750 1.1 matt }
1751 1.1 matt
1752 1.1 matt /*
1753 1.1 matt * If we couldn't find a slot, return an error.
1754 1.1 matt */
1755 1.1 matt if (maybe_he_p == NULL) {
1756 1.1 matt GE_FUNC_EXIT(sc, "!");
1757 1.1 matt return ENOSPC;
1758 1.2 matt }
1759 1.1 matt
1760 1.1 matt /* Update the entry.
1761 1.1 matt */
1762 1.1 matt *maybe_he_p = he;
1763 1.1 matt bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map,
1764 1.1 matt maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE);
1765 1.1 matt GE_FUNC_EXIT(sc, "+");
1766 1.1 matt return 0;
1767 1.1 matt }
1768 1.1 matt
1769 1.1 matt int
1770 1.1 matt gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd)
1771 1.1 matt {
1772 1.1 matt struct gfe_softc * const sc = ec->ec_if.if_softc;
1773 1.1 matt int error;
1774 1.1 matt enum gfe_hash_op op;
1775 1.1 matt enum gfe_rxprio prio;
1776 1.1 matt
1777 1.1 matt GE_FUNC_ENTER(sc, "hash_multichg");
1778 1.1 matt /*
1779 1.1 matt * Is this a wildcard entry? If so and its being removed, recompute.
1780 1.1 matt */
1781 1.1 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1782 1.1 matt if (cmd == SIOCDELMULTI) {
1783 1.1 matt GE_FUNC_EXIT(sc, "");
1784 1.1 matt return ENETRESET;
1785 1.1 matt }
1786 1.1 matt
1787 1.1 matt /*
1788 1.1 matt * Switch in
1789 1.1 matt */
1790 1.1 matt sc->sc_flags |= GE_ALLMULTI;
1791 1.1 matt if ((sc->sc_pcr & ETH_EPCR_PM) == 0) {
1792 1.1 matt sc->sc_pcr |= ETH_EPCR_PM;
1793 1.1 matt GE_WRITE(sc, EPCR, sc->sc_pcr);
1794 1.1 matt GE_FUNC_EXIT(sc, "");
1795 1.1 matt return 0;
1796 1.1 matt }
1797 1.1 matt GE_FUNC_EXIT(sc, "");
1798 1.1 matt return ENETRESET;
1799 1.1 matt }
1800 1.1 matt
1801 1.1 matt prio = GE_RXPRIO_MEDLO;
1802 1.1 matt op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD);
1803 1.1 matt
1804 1.1 matt if (sc->sc_hashtable == NULL) {
1805 1.1 matt GE_FUNC_EXIT(sc, "");
1806 1.29 cegger return 0;
1807 1.1 matt }
1808 1.1 matt
1809 1.1 matt error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo);
1810 1.1 matt if (error == EBUSY) {
1811 1.1 matt printf("%s: multichg: tried to %s %s again\n",
1812 1.1 matt device_xname(&sc->sc_dev),
1813 1.1 matt cmd == SIOCDELMULTI ? "remove" : "add",
1814 1.1 matt ether_sprintf(enm->enm_addrlo));
1815 1.29 cegger GE_FUNC_EXIT(sc, "");
1816 1.1 matt return 0;
1817 1.1 matt }
1818 1.1 matt
1819 1.1 matt if (error == ENOENT) {
1820 1.1 matt printf("%s: multichg: failed to remove %s: not in table\n",
1821 1.1 matt device_xname(&sc->sc_dev),
1822 1.1 matt ether_sprintf(enm->enm_addrlo));
1823 1.29 cegger GE_FUNC_EXIT(sc, "");
1824 1.1 matt return 0;
1825 1.1 matt }
1826 1.1 matt
1827 1.1 matt if (error == ENOSPC) {
1828 1.1 matt printf("%s: multichg: failed to add %s: no space; regenerating table\n",
1829 1.29 cegger device_xname(&sc->sc_dev),
1830 1.1 matt ether_sprintf(enm->enm_addrlo));
1831 1.1 matt GE_FUNC_EXIT(sc, "");
1832 1.1 matt return ENETRESET;
1833 1.1 matt }
1834 1.1 matt GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n",
1835 1.1 matt device_xname(&sc->sc_dev),
1836 1.1 matt cmd == SIOCDELMULTI ? "remove" : "add",
1837 1.1 matt ether_sprintf(enm->enm_addrlo)));
1838 1.1 matt GE_FUNC_EXIT(sc, "");
1839 1.1 matt return 0;
1840 1.1 matt }
1841 1.1 matt
1842 1.1 matt int
1843 1.1 matt gfe_hash_fill(struct gfe_softc *sc)
1844 1.1 matt {
1845 1.1 matt struct ether_multistep step;
1846 1.24 dyoung struct ether_multi *enm;
1847 1.1 matt int error;
1848 1.1 matt
1849 1.1 matt GE_FUNC_ENTER(sc, "gfe_hash_fill");
1850 1.1 matt
1851 1.1 matt error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI,
1852 1.1 matt CLLADDR(sc->sc_ec.ec_if.if_sadl));
1853 1.1 matt if (error)
1854 1.1 matt GE_FUNC_EXIT(sc, "!");
1855 1.1 matt return error;
1856 1.1 matt
1857 1.1 matt sc->sc_flags &= ~GE_ALLMULTI;
1858 1.1 matt if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0)
1859 1.1 matt sc->sc_pcr &= ~ETH_EPCR_PM;
1860 1.1 matt ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1861 1.1 matt while (enm != NULL) {
1862 1.1 matt if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1863 1.1 matt sc->sc_flags |= GE_ALLMULTI;
1864 1.1 matt sc->sc_pcr |= ETH_EPCR_PM;
1865 1.1 matt } else {
1866 1.1 matt error = gfe_hash_entry_op(sc, GE_HASH_ADD,
1867 1.1 matt GE_RXPRIO_MEDLO, enm->enm_addrlo);
1868 1.1 matt if (error == ENOSPC)
1869 1.1 matt break;
1870 1.1 matt }
1871 1.1 matt ETHER_NEXT_MULTI(step, enm);
1872 1.1 matt }
1873 1.1 matt
1874 1.1 matt GE_FUNC_EXIT(sc, "");
1875 1.1 matt return error;
1876 1.1 matt }
1877 1.1 matt
1878 1.2 matt int
1879 1.2 matt gfe_hash_alloc(struct gfe_softc *sc)
1880 1.1 matt {
1881 1.1 matt int error;
1882 1.29 cegger GE_FUNC_ENTER(sc, "gfe_hash_alloc");
1883 1.1 matt sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1;
1884 1.1 matt error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1,
1885 1.1 matt BUS_DMA_NOCACHE);
1886 1.1 matt if (error) {
1887 1.1 matt printf("%s: failed to allocate %d bytes for hash table: %d\n",
1888 1.1 matt device_xname(&sc->sc_dev), sc->sc_hashmask + 1, error);
1889 1.2 matt GE_FUNC_EXIT(sc, "");
1890 1.1 matt return error;
1891 1.1 matt }
1892 1.1 matt sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva;
1893 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1);
1894 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map,
1895 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE);
1896 GE_FUNC_EXIT(sc, "");
1897 return 0;
1898 }
1899