if_gfe.c revision 1.29 1 /* $NetBSD: if_gfe.c,v 1.29 2008/04/08 20:40:42 cegger Exp $ */
2
3 /*
4 * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed for the NetBSD Project by
18 * Allegro Networks, Inc., and Wasabi Systems, Inc.
19 * 4. The name of Allegro Networks, Inc. may not be used to endorse
20 * or promote products derived from this software without specific prior
21 * written permission.
22 * 5. The name of Wasabi Systems, Inc. may not be used to endorse
23 * or promote products derived from this software without specific prior
24 * written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND
27 * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
29 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC.
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * if_gfe.c -- GT ethernet MAC driver
42 */
43
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.29 2008/04/08 20:40:42 cegger Exp $");
46
47 #include "opt_inet.h"
48 #include "bpfilter.h"
49
50 #include <sys/param.h>
51 #include <sys/types.h>
52 #include <sys/inttypes.h>
53 #include <sys/queue.h>
54
55 #include <uvm/uvm_extern.h>
56
57 #include <sys/callout.h>
58 #include <sys/device.h>
59 #include <sys/errno.h>
60 #include <sys/ioctl.h>
61 #include <sys/mbuf.h>
62 #include <sys/socket.h>
63
64 #include <sys/bus.h>
65
66 #include <net/if.h>
67 #include <net/if_dl.h>
68 #include <net/if_ether.h>
69 #include <net/if_media.h>
70
71 #ifdef INET
72 #include <netinet/in.h>
73 #include <netinet/if_inarp.h>
74 #endif
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <dev/mii/miivar.h>
80
81 #include <dev/marvell/gtintrreg.h>
82 #include <dev/marvell/gtethreg.h>
83
84 #include <dev/marvell/gtvar.h>
85 #include <dev/marvell/if_gfevar.h>
86
87 #define GE_READ(sc, reg) \
88 bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg)
89 #define GE_WRITE(sc, reg, v) \
90 bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v))
91
92 #define GE_DEBUG
93 #if 0
94 #define GE_NOHASH
95 #define GE_NORX
96 #endif
97
98 #ifdef GE_DEBUG
99 #define GE_DPRINTF(sc, a) do \
100 if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \
101 printf a; \
102 while (0)
103 #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func))
104 #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]"))
105 #else
106 #define GE_DPRINTF(sc, a) do { } while (0)
107 #define GE_FUNC_ENTER(sc, func) do { } while (0)
108 #define GE_FUNC_EXIT(sc, str) do { } while (0)
109 #endif
110 enum gfe_whack_op {
111 GE_WHACK_START, GE_WHACK_RESTART,
112 GE_WHACK_CHANGE, GE_WHACK_STOP
113 };
114
115 enum gfe_hash_op {
116 GE_HASH_ADD, GE_HASH_REMOVE,
117 };
118
119 #if 1
120 #define htogt32(a) htobe32(a)
121 #define gt32toh(a) be32toh(a)
122 #else
123 #define htogt32(a) htole32(a)
124 #define gt32toh(a) le32toh(a)
125 #endif
126
127 #define GE_RXDSYNC(sc, rxq, n, ops) \
128 bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \
129 (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \
130 (ops))
131 #define GE_RXDPRESYNC(sc, rxq, n) \
132 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
133 #define GE_RXDPOSTSYNC(sc, rxq, n) \
134 GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
135
136 #define GE_TXDSYNC(sc, txq, n, ops) \
137 bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \
138 (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \
139 (ops))
140 #define GE_TXDPRESYNC(sc, txq, n) \
141 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)
142 #define GE_TXDPOSTSYNC(sc, txq, n) \
143 GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)
144
145 #define STATIC
146
147 STATIC int gfe_match (struct device *, struct cfdata *, void *);
148 STATIC void gfe_attach (struct device *, struct device *, void *);
149
150 STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int,
151 size_t, int);
152 STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *);
153
154 STATIC int gfe_ifioctl (struct ifnet *, u_long, void *);
155 STATIC void gfe_ifstart (struct ifnet *);
156 STATIC void gfe_ifwatchdog (struct ifnet *);
157
158 STATIC int gfe_mii_read (struct device *, int, int);
159 STATIC void gfe_mii_write (struct device *, int, int, int);
160 STATIC void gfe_mii_statchg (struct device *);
161
162 STATIC void gfe_tick(void *arg);
163
164 STATIC void gfe_tx_restart(void *);
165 STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio);
166 STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t);
167 STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int);
168 STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio);
169 STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio);
170 STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op);
171
172 STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio);
173 STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio);
174 STATIC int gfe_rx_prime(struct gfe_softc *);
175 STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t);
176 STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio);
177 STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio);
178 STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op);
179
180 STATIC int gfe_intr(void *);
181
182 STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op);
183
184 STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]);
185 STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op,
186 enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]);
187 STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *,
188 u_long);
189 STATIC int gfe_hash_fill(struct gfe_softc *);
190 STATIC int gfe_hash_alloc(struct gfe_softc *);
191
192 /* Linkup to the rest of the kernel */
193 CFATTACH_DECL(gfe, sizeof(struct gfe_softc),
194 gfe_match, gfe_attach, NULL, NULL);
195
196 extern struct cfdriver gfe_cd;
197
198 int
199 gfe_match(struct device *parent, struct cfdata *cf, void *aux)
200 {
201 struct gt_softc *gt = (struct gt_softc *) parent;
202 struct gt_attach_args *ga = aux;
203 uint8_t enaddr[6];
204
205 if (!GT_ETHEROK(gt, ga, &gfe_cd))
206 return 0;
207
208 if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0)
209 return 0;
210
211 if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 &&
212 enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0)
213 return 0;
214
215 return 1;
216 }
217
218 /*
219 * Attach this instance, and then all the sub-devices
220 */
221 void
222 gfe_attach(struct device *parent, struct device *self, void *aux)
223 {
224 struct gt_attach_args * const ga = aux;
225 struct gt_softc * const gt = device_private(parent);
226 struct gfe_softc * const sc = device_private(self);
227 struct ifnet * const ifp = &sc->sc_ec.ec_if;
228 uint32_t data;
229 uint8_t enaddr[6];
230 int phyaddr;
231 uint32_t sdcr;
232 int error;
233
234 GT_ETHERFOUND(gt, ga);
235
236 sc->sc_gt_memt = ga->ga_memt;
237 sc->sc_gt_memh = ga->ga_memh;
238 sc->sc_dmat = ga->ga_dmat;
239 sc->sc_macno = ga->ga_unit;
240
241 if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh,
242 ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) {
243 aprint_error(": failed to map registers\n");
244 }
245
246 callout_init(&sc->sc_co, 0);
247
248 data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR);
249 phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno);
250
251 gtget_macaddr(gt, sc->sc_macno, enaddr);
252
253 sc->sc_pcr = GE_READ(sc, EPCR);
254 sc->sc_pcxr = GE_READ(sc, EPCXR);
255 sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC;
256
257 aprint_normal(": address %s", ether_sprintf(enaddr));
258
259 #if defined(DEBUG)
260 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr);
261 #endif
262
263 sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override;
264 if (device_cfdata(&sc->sc_dev)->cf_flags & 1) {
265 aprint_normal(", phy %d (rmii)", phyaddr);
266 sc->sc_pcxr |= ETH_EPCXR_RMIIEn;
267 } else {
268 aprint_normal(", phy %d (mii)", phyaddr);
269 sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn;
270 }
271 if (device_cfdata(&sc->sc_dev)->cf_flags & 2)
272 sc->sc_flags |= GE_NOFREE;
273 sc->sc_pcxr &= ~(3 << 14);
274 sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14);
275
276 if (sc->sc_pcr & ETH_EPCR_EN) {
277 int tries = 1000;
278 /*
279 * Abort transmitter and receiver and wait for them to quiese
280 */
281 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT);
282 do {
283 delay(100);
284 } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT)));
285 }
286
287 sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF);
288
289 #if defined(DEBUG)
290 aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr);
291 #endif
292
293 /*
294 * Now turn off the GT. If it didn't quiese, too ***ing bad.
295 */
296 GE_WRITE(sc, EPCR, sc->sc_pcr);
297 GE_WRITE(sc, EIMR, sc->sc_intrmask);
298 sdcr = GE_READ(sc, ESDCR);
299 ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4);
300 sdcr |= ETH_ESDCR_RIFB;
301 GE_WRITE(sc, ESDCR, sdcr);
302 sc->sc_max_frame_length = 1536;
303
304 aprint_normal("\n");
305 sc->sc_mii.mii_ifp = ifp;
306 sc->sc_mii.mii_readreg = gfe_mii_read;
307 sc->sc_mii.mii_writereg = gfe_mii_write;
308 sc->sc_mii.mii_statchg = gfe_mii_statchg;
309
310 sc->sc_ec.ec_mii = &sc->sc_mii;
311 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
312 ether_mediastatus);
313
314 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr,
315 MII_OFFSET_ANY, MIIF_NOISOLATE);
316 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
317 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
318 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
319 } else {
320 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
321 }
322
323 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
324 ifp->if_softc = sc;
325 /* ifp->if_mowner = &sc->sc_mowner; */
326 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
327 #if 0
328 ifp->if_flags |= IFF_DEBUG;
329 #endif
330 ifp->if_ioctl = gfe_ifioctl;
331 ifp->if_start = gfe_ifstart;
332 ifp->if_watchdog = gfe_ifwatchdog;
333
334 if (sc->sc_flags & GE_NOFREE) {
335 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI);
336 if (!error)
337 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI);
338 if (!error)
339 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO);
340 if (!error)
341 error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO);
342 if (!error)
343 error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI);
344 if (!error)
345 error = gfe_hash_alloc(sc);
346 if (error)
347 aprint_error(
348 "%s: failed to allocate resources: %d\n",
349 ifp->if_xname, error);
350 }
351
352 if_attach(ifp);
353 ether_ifattach(ifp, enaddr);
354 #if NBPFILTER > 0
355 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
356 #endif
357 #if NRND > 0
358 rnd_attach_source(&sc->sc_rnd_source, device_xname(self), RND_TYPE_NET, 0);
359 #endif
360 intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET,
361 gfe_intr, sc);
362 }
363
364 int
365 gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs,
366 size_t size, int flags)
367 {
368 int error = 0;
369 GE_FUNC_ENTER(sc, "gfe_dmamem_alloc");
370
371 KASSERT(gdm->gdm_kva == NULL);
372 gdm->gdm_size = size;
373 gdm->gdm_maxsegs = maxsegs;
374
375 error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE,
376 gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs,
377 BUS_DMA_NOWAIT);
378 if (error)
379 goto fail;
380
381 error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs,
382 gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT);
383 if (error)
384 goto fail;
385
386 error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs,
387 gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map);
388 if (error)
389 goto fail;
390
391 error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva,
392 gdm->gdm_size, NULL, BUS_DMA_NOWAIT);
393 if (error)
394 goto fail;
395
396 /* invalidate from cache */
397 bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size,
398 BUS_DMASYNC_PREREAD);
399 fail:
400 if (error) {
401 gfe_dmamem_free(sc, gdm);
402 GE_DPRINTF(sc, (":err=%d", error));
403 }
404 GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%x/%x",
405 gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs,
406 gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len));
407 GE_FUNC_EXIT(sc, "");
408 return error;
409 }
410
411 void
412 gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm)
413 {
414 GE_FUNC_ENTER(sc, "gfe_dmamem_free");
415 if (gdm->gdm_map)
416 bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map);
417 if (gdm->gdm_kva)
418 bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size);
419 if (gdm->gdm_nsegs > 0)
420 bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs);
421 gdm->gdm_map = NULL;
422 gdm->gdm_kva = NULL;
423 gdm->gdm_nsegs = 0;
424 GE_FUNC_EXIT(sc, "");
425 }
426
427 int
428 gfe_ifioctl(struct ifnet *ifp, u_long cmd, void *data)
429 {
430 struct gfe_softc * const sc = ifp->if_softc;
431 struct ifreq *ifr = (struct ifreq *) data;
432 struct ifaddr *ifa = (struct ifaddr *) data;
433 int s, error = 0;
434
435 GE_FUNC_ENTER(sc, "gfe_ifioctl");
436 s = splnet();
437
438 switch (cmd) {
439 case SIOCSIFADDR:
440 ifp->if_flags |= IFF_UP;
441 switch (ifa->ifa_addr->sa_family) {
442 #ifdef INET
443 case AF_INET:
444 error = gfe_whack(sc, GE_WHACK_START);
445 if (error == 0)
446 arp_ifinit(ifp, ifa);
447 break;
448 #endif
449 default:
450 error = gfe_whack(sc, GE_WHACK_START);
451 break;
452 }
453 break;
454
455 case SIOCSIFFLAGS:
456 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
457 case IFF_UP|IFF_RUNNING:/* active->active, update */
458 error = gfe_whack(sc, GE_WHACK_CHANGE);
459 break;
460 case IFF_RUNNING: /* not up, so we stop */
461 error = gfe_whack(sc, GE_WHACK_STOP);
462 break;
463 case IFF_UP: /* not running, so we start */
464 error = gfe_whack(sc, GE_WHACK_START);
465 break;
466 case 0: /* idle->idle: do nothing */
467 break;
468 }
469 break;
470
471 case SIOCSIFMEDIA:
472 case SIOCGIFMEDIA:
473 case SIOCADDMULTI:
474 case SIOCDELMULTI:
475 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
476 if (ifp->if_flags & IFF_RUNNING)
477 error = gfe_whack(sc, GE_WHACK_CHANGE);
478 else
479 error = 0;
480 }
481 break;
482
483 case SIOCSIFMTU:
484 if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
485 error = EINVAL;
486 break;
487 }
488 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
489 error = 0;
490 break;
491
492 default:
493 error = EINVAL;
494 break;
495 }
496 splx(s);
497 GE_FUNC_EXIT(sc, "");
498 return error;
499 }
500
501 void
502 gfe_ifstart(struct ifnet *ifp)
503 {
504 struct gfe_softc * const sc = ifp->if_softc;
505 struct mbuf *m;
506
507 GE_FUNC_ENTER(sc, "gfe_ifstart");
508
509 if ((ifp->if_flags & IFF_RUNNING) == 0) {
510 GE_FUNC_EXIT(sc, "$");
511 return;
512 }
513
514 for (;;) {
515 IF_DEQUEUE(&ifp->if_snd, m);
516 if (m == NULL) {
517 ifp->if_flags &= ~IFF_OACTIVE;
518 GE_FUNC_EXIT(sc, "");
519 return;
520 }
521
522 /*
523 * No space in the pending queue? try later.
524 */
525 if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq))
526 break;
527
528 /*
529 * Try to enqueue a mbuf to the device. If that fails, we
530 * can always try to map the next mbuf.
531 */
532 IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m);
533 GE_DPRINTF(sc, (">"));
534 #ifndef GE_NOTX
535 (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI);
536 #endif
537 }
538
539 /*
540 * Attempt to queue the mbuf for send failed.
541 */
542 IF_PREPEND(&ifp->if_snd, m);
543 ifp->if_flags |= IFF_OACTIVE;
544 GE_FUNC_EXIT(sc, "%%");
545 }
546
547 void
548 gfe_ifwatchdog(struct ifnet *ifp)
549 {
550 struct gfe_softc * const sc = ifp->if_softc;
551 struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI];
552
553 GE_FUNC_ENTER(sc, "gfe_ifwatchdog");
554 printf("%s: device timeout", device_xname(&sc->sc_dev));
555 if (ifp->if_flags & IFF_RUNNING) {
556 uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]);
557 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi);
558 GE_TXDPOSTSYNC(sc, txq, curtxdnum);
559 printf(" (fi=%d(%#x),lo=%d,cur=%d(%#x),icm=%#x) ",
560 txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts,
561 txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts,
562 GE_READ(sc, EICR));
563 GE_TXDPRESYNC(sc, txq, txq->txq_fi);
564 GE_TXDPRESYNC(sc, txq, curtxdnum);
565 }
566 printf("\n");
567 ifp->if_oerrors++;
568 (void) gfe_whack(sc, GE_WHACK_RESTART);
569 GE_FUNC_EXIT(sc, "");
570 }
571
572 int
574 gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio)
575 {
576 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio];
577 int error;
578
579 GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc");
580 GE_DPRINTF(sc, ("(%d)", rxprio));
581
582 error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1,
583 GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE);
584 if (error) {
585 GE_FUNC_EXIT(sc, "!!");
586 return error;
587 }
588
589 error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS,
590 GE_RXBUF_MEMSIZE, 0);
591 if (error) {
592 GE_FUNC_EXIT(sc, "!!!");
593 return error;
594 }
595 GE_FUNC_EXIT(sc, "");
596 return error;
597 }
598
599 int
600 gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio)
601 {
602 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio];
603 volatile struct gt_eth_desc *rxd;
604 const bus_dma_segment_t *ds;
605 int idx;
606 bus_addr_t nxtaddr;
607 bus_size_t boff;
608
609 GE_FUNC_ENTER(sc, "gfe_rx_rxqinit");
610 GE_DPRINTF(sc, ("(%d)", rxprio));
611
612 if ((sc->sc_flags & GE_NOFREE) == 0) {
613 int error = gfe_rx_rxqalloc(sc, rxprio);
614 if (error) {
615 GE_FUNC_EXIT(sc, "!");
616 return error;
617 }
618 } else {
619 KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL);
620 KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL);
621 }
622
623 memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE);
624
625 rxq->rxq_descs =
626 (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva;
627 rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr;
628 rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva;
629 rxq->rxq_fi = 0;
630 rxq->rxq_active = GE_RXDESC_MAX;
631 for (idx = 0, rxd = rxq->rxq_descs,
632 boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs,
633 nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd);
634 idx < GE_RXDESC_MAX;
635 idx++, rxd++, nxtaddr += sizeof(*rxd)) {
636 rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16);
637 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI);
638 rxd->ed_bufptr = htogt32(ds->ds_addr + boff);
639 /*
640 * update the nxtptr to point to the next txd.
641 */
642 if (idx == GE_RXDESC_MAX - 1)
643 nxtaddr = rxq->rxq_desc_busaddr;
644 rxd->ed_nxtptr = htogt32(nxtaddr);
645 boff += GE_RXBUF_SIZE;
646 if (boff == ds->ds_len) {
647 ds++;
648 boff = 0;
649 }
650 }
651 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0,
652 rxq->rxq_desc_mem.gdm_map->dm_mapsize,
653 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
654 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0,
655 rxq->rxq_buf_mem.gdm_map->dm_mapsize,
656 BUS_DMASYNC_PREREAD);
657
658 rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError;
659 switch (rxprio) {
660 case GE_RXPRIO_HI:
661 rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3;
662 rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno);
663 rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno);
664 break;
665 case GE_RXPRIO_MEDHI:
666 rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2;
667 rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno);
668 rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno);
669 break;
670 case GE_RXPRIO_MEDLO:
671 rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1;
672 rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno);
673 rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno);
674 break;
675 case GE_RXPRIO_LO:
676 rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0;
677 rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno);
678 rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno);
679 break;
680 }
681 GE_FUNC_EXIT(sc, "");
682 return 0;
683 }
684
685 void
686 gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio)
687 {
688 struct ifnet * const ifp = &sc->sc_ec.ec_if;
689 struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio];
690 struct mbuf *m = rxq->rxq_curpkt;
691
692 GE_FUNC_ENTER(sc, "gfe_rx_get");
693 GE_DPRINTF(sc, ("(%d)", rxprio));
694
695 while (rxq->rxq_active > 0) {
696 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi];
697 struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi];
698 const struct ether_header *eh;
699 unsigned int cmdsts;
700 size_t buflen;
701
702 GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi);
703 cmdsts = gt32toh(rxd->ed_cmdsts);
704 GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts));
705 rxq->rxq_cmdsts = cmdsts;
706 /*
707 * Sometimes the GE "forgets" to reset the ownership bit.
708 * But if the length has been rewritten, the packet is ours
709 * so pretend the O bit is set.
710 */
711 buflen = gt32toh(rxd->ed_lencnt) & 0xffff;
712 if ((cmdsts & RX_CMD_O) && buflen == 0) {
713 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi);
714 break;
715 }
716
717 /*
718 * If this is not a single buffer packet with no errors
719 * or for some reason it's bigger than our frame size,
720 * ignore it and go to the next packet.
721 */
722 if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) !=
723 (RX_CMD_F|RX_CMD_L) ||
724 buflen > sc->sc_max_frame_length) {
725 GE_DPRINTF(sc, ("!"));
726 --rxq->rxq_active;
727 ifp->if_ipackets++;
728 ifp->if_ierrors++;
729 goto give_it_back;
730 }
731
732 /* CRC is included with the packet; trim it off. */
733 buflen -= ETHER_CRC_LEN;
734
735 if (m == NULL) {
736 MGETHDR(m, M_DONTWAIT, MT_DATA);
737 if (m == NULL) {
738 GE_DPRINTF(sc, ("?"));
739 break;
740 }
741 }
742 if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) {
743 MCLGET(m, M_DONTWAIT);
744 if ((m->m_flags & M_EXT) == 0) {
745 GE_DPRINTF(sc, ("?"));
746 break;
747 }
748 }
749 m->m_data += 2;
750 m->m_len = 0;
751 m->m_pkthdr.len = 0;
752 m->m_pkthdr.rcvif = ifp;
753 rxq->rxq_cmdsts = cmdsts;
754 --rxq->rxq_active;
755
756 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map,
757 rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD);
758
759 KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0);
760 memcpy(m->m_data + m->m_len, rxb->rb_data, buflen);
761 m->m_len = buflen;
762 m->m_pkthdr.len = buflen;
763
764 ifp->if_ipackets++;
765 #if NBPFILTER > 0
766 if (ifp->if_bpf != NULL)
767 bpf_mtap(ifp->if_bpf, m);
768 #endif
769
770 eh = (const struct ether_header *) m->m_data;
771 if ((ifp->if_flags & IFF_PROMISC) ||
772 (rxq->rxq_cmdsts & RX_STS_M) == 0 ||
773 (rxq->rxq_cmdsts & RX_STS_HE) ||
774 (eh->ether_dhost[0] & 1) != 0 ||
775 memcmp(eh->ether_dhost, CLLADDR(ifp->if_sadl),
776 ETHER_ADDR_LEN) == 0) {
777 (*ifp->if_input)(ifp, m);
778 m = NULL;
779 GE_DPRINTF(sc, (">"));
780 } else {
781 m->m_len = 0;
782 m->m_pkthdr.len = 0;
783 GE_DPRINTF(sc, ("+"));
784 }
785 rxq->rxq_cmdsts = 0;
786
787 give_it_back:
788 rxd->ed_lencnt &= ~0xffff; /* zero out length */
789 rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI);
790 #if 0
791 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)",
792 rxq->rxq_fi,
793 ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1],
794 ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3]));
795 #endif
796 GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi);
797 if (++rxq->rxq_fi == GE_RXDESC_MAX)
798 rxq->rxq_fi = 0;
799 rxq->rxq_active++;
800 }
801 rxq->rxq_curpkt = m;
802 GE_FUNC_EXIT(sc, "");
803 }
804
805 uint32_t
806 gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask)
807 {
808 struct ifnet * const ifp = &sc->sc_ec.ec_if;
809 struct gfe_rxqueue *rxq;
810 uint32_t rxbits;
811 #define RXPRIO_DECODER 0xffffaa50
812 GE_FUNC_ENTER(sc, "gfe_rx_process");
813
814 rxbits = ETH_IR_RxBuffer_GET(cause);
815 while (rxbits) {
816 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3;
817 GE_DPRINTF(sc, ("%1x", rxbits));
818 rxbits &= ~(1 << rxprio);
819 gfe_rx_get(sc, rxprio);
820 }
821
822 rxbits = ETH_IR_RxError_GET(cause);
823 while (rxbits) {
824 enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3;
825 uint32_t masks[(GE_RXDESC_MAX + 31) / 32];
826 int idx;
827 rxbits &= ~(1 << rxprio);
828 rxq = &sc->sc_rxq[rxprio];
829 sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits);
830 intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits);
831 if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) {
832 sc->sc_tickflags |= GE_TICK_RX_RESTART;
833 callout_reset(&sc->sc_co, 1, gfe_tick, sc);
834 }
835 ifp->if_ierrors++;
836 GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n",
837 device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi));
838 memset(masks, 0, sizeof(masks));
839 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map,
840 0, rxq->rxq_desc_mem.gdm_size,
841 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
842 for (idx = 0; idx < GE_RXDESC_MAX; idx++) {
843 volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx];
844
845 if (RX_CMD_O & gt32toh(rxd->ed_cmdsts))
846 masks[idx/32] |= 1 << (idx & 31);
847 }
848 bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map,
849 0, rxq->rxq_desc_mem.gdm_size,
850 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
851 #if defined(DEBUG)
852 printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n",
853 device_xname(&sc->sc_dev), rxprio, rxq->rxq_fi,
854 rxq->rxq_cmdsts, masks[0], masks[1]);
855 #endif
856 }
857 if ((intrmask & ETH_IR_RxBits) == 0)
858 intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError);
859
860 GE_FUNC_EXIT(sc, "");
861 return intrmask;
862 }
863
864 int
865 gfe_rx_prime(struct gfe_softc *sc)
866 {
867 struct gfe_rxqueue *rxq;
868 int error;
869
870 GE_FUNC_ENTER(sc, "gfe_rx_prime");
871
872 error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI);
873 if (error)
874 goto bail;
875 rxq = &sc->sc_rxq[GE_RXPRIO_HI];
876 if ((sc->sc_flags & GE_RXACTIVE) == 0) {
877 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr);
878 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr);
879 }
880 sc->sc_intrmask |= rxq->rxq_intrbits;
881
882 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI);
883 if (error)
884 goto bail;
885 if ((sc->sc_flags & GE_RXACTIVE) == 0) {
886 rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI];
887 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr);
888 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr);
889 sc->sc_intrmask |= rxq->rxq_intrbits;
890 }
891
892 error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO);
893 if (error)
894 goto bail;
895 if ((sc->sc_flags & GE_RXACTIVE) == 0) {
896 rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO];
897 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr);
898 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr);
899 sc->sc_intrmask |= rxq->rxq_intrbits;
900 }
901
902 error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO);
903 if (error)
904 goto bail;
905 if ((sc->sc_flags & GE_RXACTIVE) == 0) {
906 rxq = &sc->sc_rxq[GE_RXPRIO_LO];
907 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr);
908 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr);
909 sc->sc_intrmask |= rxq->rxq_intrbits;
910 }
911
912 bail:
913 GE_FUNC_EXIT(sc, "");
914 return error;
915 }
916
917 void
918 gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio)
919 {
920 struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio];
921 GE_FUNC_ENTER(sc, "gfe_rx_cleanup");
922 if (rxq == NULL) {
923 GE_FUNC_EXIT(sc, "");
924 return;
925 }
926
927 if (rxq->rxq_curpkt)
928 m_freem(rxq->rxq_curpkt);
929 if ((sc->sc_flags & GE_NOFREE) == 0) {
930 gfe_dmamem_free(sc, &rxq->rxq_desc_mem);
931 gfe_dmamem_free(sc, &rxq->rxq_buf_mem);
932 }
933 GE_FUNC_EXIT(sc, "");
934 }
935
936 void
937 gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op)
938 {
939 GE_FUNC_ENTER(sc, "gfe_rx_stop");
940 sc->sc_flags &= ~GE_RXACTIVE;
941 sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError);
942 sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError);
943 GE_WRITE(sc, EIMR, sc->sc_intrmask);
944 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR);
945 do {
946 delay(10);
947 } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR);
948 gfe_rx_cleanup(sc, GE_RXPRIO_HI);
949 gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI);
950 gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO);
951 gfe_rx_cleanup(sc, GE_RXPRIO_LO);
952 GE_FUNC_EXIT(sc, "");
953 }
954
955 void
957 gfe_tick(void *arg)
958 {
959 struct gfe_softc * const sc = arg;
960 uint32_t intrmask;
961 unsigned int tickflags;
962 int s;
963
964 GE_FUNC_ENTER(sc, "gfe_tick");
965
966 s = splnet();
967
968 tickflags = sc->sc_tickflags;
969 sc->sc_tickflags = 0;
970 intrmask = sc->sc_intrmask;
971 if (tickflags & GE_TICK_TX_IFSTART)
972 gfe_ifstart(&sc->sc_ec.ec_if);
973 if (tickflags & GE_TICK_RX_RESTART) {
974 intrmask |= sc->sc_idlemask;
975 if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) {
976 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI];
977 rxq->rxq_fi = 0;
978 GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr);
979 GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr);
980 }
981 if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) {
982 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI];
983 rxq->rxq_fi = 0;
984 GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr);
985 GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr);
986 }
987 if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) {
988 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO];
989 rxq->rxq_fi = 0;
990 GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr);
991 GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr);
992 }
993 if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) {
994 struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO];
995 rxq->rxq_fi = 0;
996 GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr);
997 GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr);
998 }
999 sc->sc_idlemask = 0;
1000 }
1001 if (intrmask != sc->sc_intrmask) {
1002 sc->sc_intrmask = intrmask;
1003 GE_WRITE(sc, EIMR, sc->sc_intrmask);
1004 }
1005 gfe_intr(sc);
1006 splx(s);
1007
1008 GE_FUNC_EXIT(sc, "");
1009 }
1010
1011 int
1012 gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio)
1013 {
1014 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size;
1015 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1016 struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1017 volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo];
1018 uint32_t intrmask = sc->sc_intrmask;
1019 size_t buflen;
1020 struct mbuf *m;
1021
1022 GE_FUNC_ENTER(sc, "gfe_tx_enqueue");
1023
1024 /*
1025 * Anything in the pending queue to enqueue? if not, punt. Likewise
1026 * if the txq is not yet created.
1027 * otherwise grab its dmamap.
1028 */
1029 if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) {
1030 GE_FUNC_EXIT(sc, "-");
1031 return 0;
1032 }
1033
1034 /*
1035 * Have we [over]consumed our limit of descriptors?
1036 * Do we have enough free descriptors?
1037 */
1038 if (GE_TXDESC_MAX == txq->txq_nactive + 2) {
1039 volatile struct gt_eth_desc * const txd2 = &txq->txq_descs[txq->txq_fi];
1040 uint32_t cmdsts;
1041 size_t pktlen;
1042 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi);
1043 cmdsts = gt32toh(txd2->ed_cmdsts);
1044 if (cmdsts & TX_CMD_O) {
1045 int nextin;
1046 /*
1047 * Sometime the Discovery forgets to update the
1048 * last descriptor. See if we own the descriptor
1049 * after it (since we know we've turned that to
1050 * the discovery and if we owned it, the Discovery
1051 * gave it back). If we do, we know the Discovery
1052 * gave back this one but forgot to mark it as ours.
1053 */
1054 nextin = txq->txq_fi + 1;
1055 if (nextin == GE_TXDESC_MAX)
1056 nextin = 0;
1057 GE_TXDPOSTSYNC(sc, txq, nextin);
1058 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) {
1059 GE_TXDPRESYNC(sc, txq, txq->txq_fi);
1060 GE_TXDPRESYNC(sc, txq, nextin);
1061 GE_FUNC_EXIT(sc, "@");
1062 return 0;
1063 }
1064 #ifdef DEBUG
1065 printf("%s: txenqueue: transmitter resynced at %d\n",
1066 device_xname(&sc->sc_dev), txq->txq_fi);
1067 #endif
1068 }
1069 if (++txq->txq_fi == GE_TXDESC_MAX)
1070 txq->txq_fi = 0;
1071 txq->txq_inptr = gt32toh(txd2->ed_bufptr) - txq->txq_buf_busaddr;
1072 pktlen = (gt32toh(txd2->ed_lencnt) >> 16) & 0xffff;
1073 txq->txq_inptr += roundup(pktlen, dcache_line_size);
1074 txq->txq_nactive--;
1075
1076 /* statistics */
1077 ifp->if_opackets++;
1078 if (cmdsts & TX_STS_ES)
1079 ifp->if_oerrors++;
1080 GE_DPRINTF(sc, ("%%"));
1081 }
1082
1083 buflen = roundup(m->m_pkthdr.len, dcache_line_size);
1084
1085 /*
1086 * If this packet would wrap around the end of the buffer, reset back
1087 * to the beginning.
1088 */
1089 if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) {
1090 txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr;
1091 txq->txq_outptr = 0;
1092 }
1093
1094 /*
1095 * Make sure the output packet doesn't run over the beginning of
1096 * what we've already given the GT.
1097 */
1098 if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr &&
1099 txq->txq_outptr + buflen > txq->txq_inptr) {
1100 intrmask |= txq->txq_intrbits &
1101 (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow);
1102 if (sc->sc_intrmask != intrmask) {
1103 sc->sc_intrmask = intrmask;
1104 GE_WRITE(sc, EIMR, sc->sc_intrmask);
1105 }
1106 GE_FUNC_EXIT(sc, "#");
1107 return 0;
1108 }
1109
1110 /*
1111 * The end-of-list descriptor we put on last time is the starting point
1112 * for this packet. The GT is supposed to terminate list processing on
1113 * a NULL nxtptr but that currently is broken so a CPU-owned descriptor
1114 * must terminate the list.
1115 */
1116 intrmask = sc->sc_intrmask;
1117
1118 m_copydata(m, 0, m->m_pkthdr.len,
1119 (char *)txq->txq_buf_mem.gdm_kva + (int)txq->txq_outptr);
1120 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map,
1121 txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE);
1122 txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr);
1123 txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16);
1124 GE_TXDPRESYNC(sc, txq, txq->txq_lo);
1125
1126 /*
1127 * Request a buffer interrupt every 2/3 of the way thru the transmit
1128 * buffer.
1129 */
1130 txq->txq_ei_gapcount += buflen;
1131 if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) {
1132 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI);
1133 txq->txq_ei_gapcount = 0;
1134 } else {
1135 txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST);
1136 }
1137 #if 0
1138 GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo,
1139 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1],
1140 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3]));
1141 #endif
1142 GE_TXDPRESYNC(sc, txq, txq->txq_lo);
1143
1144 txq->txq_outptr += buflen;
1145 /*
1146 * Tell the SDMA engine to "Fetch!"
1147 */
1148 GE_WRITE(sc, ESDCMR,
1149 txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL));
1150
1151 GE_DPRINTF(sc, ("(%d)", txq->txq_lo));
1152
1153 /*
1154 * Update the last out appropriately.
1155 */
1156 txq->txq_nactive++;
1157 if (++txq->txq_lo == GE_TXDESC_MAX)
1158 txq->txq_lo = 0;
1159
1160 /*
1161 * Move mbuf from the pending queue to the snd queue.
1162 */
1163 IF_DEQUEUE(&txq->txq_pendq, m);
1164 #if NBPFILTER > 0
1165 if (ifp->if_bpf != NULL)
1166 bpf_mtap(ifp->if_bpf, m);
1167 #endif
1168 m_freem(m);
1169 ifp->if_flags &= ~IFF_OACTIVE;
1170
1171 /*
1172 * Since we have put an item into the packet queue, we now want
1173 * an interrupt when the transmit queue finishes processing the
1174 * list. But only update the mask if needs changing.
1175 */
1176 intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow);
1177 if (sc->sc_intrmask != intrmask) {
1178 sc->sc_intrmask = intrmask;
1179 GE_WRITE(sc, EIMR, sc->sc_intrmask);
1180 }
1181 if (ifp->if_timer == 0)
1182 ifp->if_timer = 5;
1183 GE_FUNC_EXIT(sc, "*");
1184 return 1;
1185 }
1186
1187 uint32_t
1188 gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask)
1189 {
1190 struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1191 struct ifnet * const ifp = &sc->sc_ec.ec_if;
1192
1193 GE_FUNC_ENTER(sc, "gfe_tx_done");
1194
1195 if (txq == NULL) {
1196 GE_FUNC_EXIT(sc, "");
1197 return intrmask;
1198 }
1199
1200 while (txq->txq_nactive > 0) {
1201 const int dcache_line_size = curcpu()->ci_ci.dcache_line_size;
1202 volatile struct gt_eth_desc *txd = &txq->txq_descs[txq->txq_fi];
1203 uint32_t cmdsts;
1204 size_t pktlen;
1205
1206 GE_TXDPOSTSYNC(sc, txq, txq->txq_fi);
1207 if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) {
1208 int nextin;
1209
1210 if (txq->txq_nactive == 1) {
1211 GE_TXDPRESYNC(sc, txq, txq->txq_fi);
1212 GE_FUNC_EXIT(sc, "");
1213 return intrmask;
1214 }
1215 /*
1216 * Sometimes the Discovery forgets to update the
1217 * ownership bit in the descriptor. See if we own the
1218 * descriptor after it (since we know we've turned
1219 * that to the Discovery and if we own it now then the
1220 * Discovery gave it back). If we do, we know the
1221 * Discovery gave back this one but forgot to mark it
1222 * as ours.
1223 */
1224 nextin = txq->txq_fi + 1;
1225 if (nextin == GE_TXDESC_MAX)
1226 nextin = 0;
1227 GE_TXDPOSTSYNC(sc, txq, nextin);
1228 if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) {
1229 GE_TXDPRESYNC(sc, txq, txq->txq_fi);
1230 GE_TXDPRESYNC(sc, txq, nextin);
1231 GE_FUNC_EXIT(sc, "");
1232 return intrmask;
1233 }
1234 #ifdef DEBUG
1235 printf("%s: txdone: transmitter resynced at %d\n",
1236 device_xname(&sc->sc_dev), txq->txq_fi);
1237 #endif
1238 }
1239 #if 0
1240 GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)",
1241 txq->txq_lo,
1242 ((unsigned long *)txd)[0], ((unsigned long *)txd)[1],
1243 ((unsigned long *)txd)[2], ((unsigned long *)txd)[3]));
1244 #endif
1245 GE_DPRINTF(sc, ("(%d)", txq->txq_fi));
1246 if (++txq->txq_fi == GE_TXDESC_MAX)
1247 txq->txq_fi = 0;
1248 txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr;
1249 pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff;
1250 bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map,
1251 txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE);
1252 txq->txq_inptr += roundup(pktlen, dcache_line_size);
1253
1254 /* statistics */
1255 ifp->if_opackets++;
1256 if (cmdsts & TX_STS_ES)
1257 ifp->if_oerrors++;
1258
1259 /* txd->ed_bufptr = 0; */
1260
1261 ifp->if_timer = 5;
1262 --txq->txq_nactive;
1263 }
1264 if (txq->txq_nactive != 0)
1265 panic("%s: transmit fifo%d empty but active count (%d) > 0!",
1266 device_xname(&sc->sc_dev), txprio, txq->txq_nactive);
1267 ifp->if_timer = 0;
1268 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow));
1269 intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow));
1270 GE_FUNC_EXIT(sc, "");
1271 return intrmask;
1272 }
1273
1274 int
1275 gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio)
1276 {
1277 struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1278 int error;
1279
1280 GE_FUNC_ENTER(sc, "gfe_tx_txqalloc");
1281
1282 error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1,
1283 GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE);
1284 if (error) {
1285 GE_FUNC_EXIT(sc, "");
1286 return error;
1287 }
1288 error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0);
1289 if (error) {
1290 gfe_dmamem_free(sc, &txq->txq_desc_mem);
1291 GE_FUNC_EXIT(sc, "");
1292 return error;
1293 }
1294 GE_FUNC_EXIT(sc, "");
1295 return 0;
1296 }
1297
1298 int
1299 gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio)
1300 {
1301 struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1302 volatile struct gt_eth_desc *txd;
1303 unsigned int i;
1304 bus_addr_t addr;
1305
1306 GE_FUNC_ENTER(sc, "gfe_tx_start");
1307
1308 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh|
1309 ETH_IR_TxEndLow |ETH_IR_TxBufferLow);
1310
1311 if (sc->sc_flags & GE_NOFREE) {
1312 KASSERT(txq->txq_desc_mem.gdm_kva != NULL);
1313 KASSERT(txq->txq_buf_mem.gdm_kva != NULL);
1314 } else {
1315 int error = gfe_tx_txqalloc(sc, txprio);
1316 if (error) {
1317 GE_FUNC_EXIT(sc, "!");
1318 return error;
1319 }
1320 }
1321
1322 txq->txq_descs =
1323 (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva;
1324 txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr;
1325 txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr;
1326
1327 txq->txq_pendq.ifq_maxlen = 10;
1328 txq->txq_ei_gapcount = 0;
1329 txq->txq_nactive = 0;
1330 txq->txq_fi = 0;
1331 txq->txq_lo = 0;
1332 txq->txq_inptr = GE_TXBUF_SIZE;
1333 txq->txq_outptr = 0;
1334 for (i = 0, txd = txq->txq_descs,
1335 addr = txq->txq_desc_busaddr + sizeof(*txd);
1336 i < GE_TXDESC_MAX - 1;
1337 i++, txd++, addr += sizeof(*txd)) {
1338 /*
1339 * update the nxtptr to point to the next txd.
1340 */
1341 txd->ed_cmdsts = 0;
1342 txd->ed_nxtptr = htogt32(addr);
1343 }
1344 txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr =
1345 htogt32(txq->txq_desc_busaddr);
1346 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0,
1347 GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1348
1349 switch (txprio) {
1350 case GE_TXPRIO_HI:
1351 txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh;
1352 txq->txq_esdcmrbits = ETH_ESDCMR_TXDH;
1353 txq->txq_epsrbits = ETH_EPSR_TxHigh;
1354 txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno);
1355 GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr);
1356 break;
1357
1358 case GE_TXPRIO_LO:
1359 txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow;
1360 txq->txq_esdcmrbits = ETH_ESDCMR_TXDL;
1361 txq->txq_epsrbits = ETH_EPSR_TxLow;
1362 txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno);
1363 GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr);
1364 break;
1365
1366 case GE_TXPRIO_NONE:
1367 break;
1368 }
1369 #if 0
1370 GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp));
1371 gt_write(device_parent(&sc->sc_dev), txq->txq_ectdp,
1372 txq->txq_desc_busaddr);
1373 GE_DPRINTF(sc, (")"));
1374 #endif
1375
1376 /*
1377 * If we are restarting, there may be packets in the pending queue
1378 * waiting to be enqueued. Try enqueuing packets from both priority
1379 * queues until the pending queue is empty or there no room for them
1380 * on the device.
1381 */
1382 while (gfe_tx_enqueue(sc, txprio))
1383 continue;
1384
1385 GE_FUNC_EXIT(sc, "");
1386 return 0;
1387 }
1388
1389 void
1390 gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush)
1391 {
1392 struct gfe_txqueue * const txq = &sc->sc_txq[txprio];
1393
1394 GE_FUNC_ENTER(sc, "gfe_tx_cleanup");
1395 if (txq == NULL) {
1396 GE_FUNC_EXIT(sc, "");
1397 return;
1398 }
1399
1400 if (!flush) {
1401 GE_FUNC_EXIT(sc, "");
1402 return;
1403 }
1404
1405 if ((sc->sc_flags & GE_NOFREE) == 0) {
1406 gfe_dmamem_free(sc, &txq->txq_desc_mem);
1407 gfe_dmamem_free(sc, &txq->txq_buf_mem);
1408 }
1409 GE_FUNC_EXIT(sc, "-F");
1410 }
1411
1412 void
1413 gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op)
1414 {
1415 GE_FUNC_ENTER(sc, "gfe_tx_stop");
1416
1417 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL);
1418
1419 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask);
1420 sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask);
1421 sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh|
1422 ETH_IR_TxEndLow |ETH_IR_TxBufferLow);
1423
1424 gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP);
1425 gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP);
1426
1427 sc->sc_ec.ec_if.if_timer = 0;
1428 GE_FUNC_EXIT(sc, "");
1429 }
1430
1431 int
1433 gfe_intr(void *arg)
1434 {
1435 struct gfe_softc * const sc = arg;
1436 uint32_t cause;
1437 uint32_t intrmask = sc->sc_intrmask;
1438 int claim = 0;
1439 int cnt;
1440
1441 GE_FUNC_ENTER(sc, "gfe_intr");
1442
1443 for (cnt = 0; cnt < 4; cnt++) {
1444 if (sc->sc_intrmask != intrmask) {
1445 sc->sc_intrmask = intrmask;
1446 GE_WRITE(sc, EIMR, sc->sc_intrmask);
1447 }
1448 cause = GE_READ(sc, EICR);
1449 cause &= sc->sc_intrmask;
1450 GE_DPRINTF(sc, (".%#x", cause));
1451 if (cause == 0)
1452 break;
1453
1454 claim = 1;
1455
1456 GE_WRITE(sc, EICR, ~cause);
1457 #ifndef GE_NORX
1458 if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError))
1459 intrmask = gfe_rx_process(sc, cause, intrmask);
1460 #endif
1461
1462 #ifndef GE_NOTX
1463 if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh))
1464 intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask);
1465 if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow))
1466 intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask);
1467 #endif
1468 if (cause & ETH_IR_MIIPhySTC) {
1469 sc->sc_flags |= GE_PHYSTSCHG;
1470 /* intrmask &= ~ETH_IR_MIIPhySTC; */
1471 }
1472 }
1473
1474 while (gfe_tx_enqueue(sc, GE_TXPRIO_HI))
1475 continue;
1476 while (gfe_tx_enqueue(sc, GE_TXPRIO_LO))
1477 continue;
1478
1479 GE_FUNC_EXIT(sc, "");
1480 return claim;
1481 }
1482
1483 int
1485 gfe_mii_read (struct device *self, int phy, int reg)
1486 {
1487 return gt_mii_read(self, device_parent(self), phy, reg);
1488 }
1489
1490 void
1491 gfe_mii_write (struct device *self, int phy, int reg, int value)
1492 {
1493 gt_mii_write(self, device_parent(self), phy, reg, value);
1494 }
1495
1496 void
1497 gfe_mii_statchg (struct device *self)
1498 {
1499 /* struct gfe_softc *sc = device_private(self); */
1500 /* do nothing? */
1501 }
1502
1503 int
1505 gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op)
1506 {
1507 int error = 0;
1508 GE_FUNC_ENTER(sc, "gfe_whack");
1509
1510 switch (op) {
1511 case GE_WHACK_RESTART:
1512 #ifndef GE_NOTX
1513 gfe_tx_stop(sc, op);
1514 #endif
1515 /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */
1516 /* FALLTHROUGH */
1517 case GE_WHACK_START:
1518 #ifndef GE_NOHASH
1519 if (error == 0 && sc->sc_hashtable == NULL) {
1520 error = gfe_hash_alloc(sc);
1521 if (error)
1522 break;
1523 }
1524 if (op != GE_WHACK_RESTART)
1525 gfe_hash_fill(sc);
1526 #endif
1527 #ifndef GE_NORX
1528 if (op != GE_WHACK_RESTART) {
1529 error = gfe_rx_prime(sc);
1530 if (error)
1531 break;
1532 }
1533 #endif
1534 #ifndef GE_NOTX
1535 error = gfe_tx_start(sc, GE_TXPRIO_HI);
1536 if (error)
1537 break;
1538 #endif
1539 sc->sc_ec.ec_if.if_flags |= IFF_RUNNING;
1540 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN);
1541 GE_WRITE(sc, EPCXR, sc->sc_pcxr);
1542 GE_WRITE(sc, EICR, 0);
1543 GE_WRITE(sc, EIMR, sc->sc_intrmask);
1544 #ifndef GE_NOHASH
1545 GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr);
1546 #endif
1547 #ifndef GE_NORX
1548 GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD);
1549 sc->sc_flags |= GE_RXACTIVE;
1550 #endif
1551 /* FALLTHROUGH */
1552 case GE_WHACK_CHANGE:
1553 GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)",
1554 GE_READ(sc, EPCR), GE_READ(sc, EIMR)));
1555 GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN);
1556 GE_WRITE(sc, EIMR, sc->sc_intrmask);
1557 gfe_ifstart(&sc->sc_ec.ec_if);
1558 GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)",
1559 GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1)));
1560 GE_FUNC_EXIT(sc, "");
1561 return error;
1562 case GE_WHACK_STOP:
1563 break;
1564 }
1565
1566 #ifdef GE_DEBUG
1567 if (error)
1568 GE_DPRINTF(sc, (" failed: %d\n", error));
1569 #endif
1570 GE_WRITE(sc, EPCR, sc->sc_pcr);
1571 GE_WRITE(sc, EIMR, 0);
1572 sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING;
1573 #ifndef GE_NOTX
1574 gfe_tx_stop(sc, GE_WHACK_STOP);
1575 #endif
1576 #ifndef GE_NORX
1577 gfe_rx_stop(sc, GE_WHACK_STOP);
1578 #endif
1579 #ifndef GE_NOHASH
1580 if ((sc->sc_flags & GE_NOFREE) == 0) {
1581 gfe_dmamem_free(sc, &sc->sc_hash_mem);
1582 sc->sc_hashtable = NULL;
1583 }
1584 #endif
1585
1586 GE_FUNC_EXIT(sc, "");
1587 return error;
1588 }
1589
1590 int
1592 gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN])
1593 {
1594 uint32_t w0, add0, add1;
1595 uint32_t result;
1596
1597 GE_FUNC_ENTER(sc, "gfe_hash_compute");
1598 add0 = ((uint32_t) eaddr[5] << 0) |
1599 ((uint32_t) eaddr[4] << 8) |
1600 ((uint32_t) eaddr[3] << 16);
1601
1602 add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4);
1603 add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2);
1604 add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1);
1605
1606 add1 = ((uint32_t) eaddr[2] << 0) |
1607 ((uint32_t) eaddr[1] << 8) |
1608 ((uint32_t) eaddr[0] << 16);
1609
1610 add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4);
1611 add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2);
1612 add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1);
1613
1614 GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr)));
1615 /*
1616 * hashResult is the 15 bits Hash entry address.
1617 * ethernetADD is a 48 bit number, which is derived from the Ethernet
1618 * MAC address, by nibble swapping in every byte (i.e MAC address
1619 * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb).
1620 */
1621
1622 if ((sc->sc_pcr & ETH_EPCR_HM) == 0) {
1623 /*
1624 * hashResult[14:0] = hashFunc0(ethernetADD[47:0])
1625 *
1626 * hashFunc0 calculates the hashResult in the following manner:
1627 * hashResult[ 8:0] = ethernetADD[14:8,1,0]
1628 * XOR ethernetADD[23:15] XOR ethernetADD[32:24]
1629 */
1630 result = (add0 & 3) | ((add0 >> 6) & ~3);
1631 result ^= (add0 >> 15) ^ (add1 >> 0);
1632 result &= 0x1ff;
1633 /*
1634 * hashResult[14:9] = ethernetADD[7:2]
1635 */
1636 result |= (add0 & ~3) << 7; /* excess bits will be masked */
1637 GE_DPRINTF(sc, ("0(%#x)", result & 0x7fff));
1638 } else {
1639 #define TRIBITFLIP 073516240 /* yes its in octal */
1640 /*
1641 * hashResult[14:0] = hashFunc1(ethernetADD[47:0])
1642 *
1643 * hashFunc1 calculates the hashResult in the following manner:
1644 * hashResult[08:00] = ethernetADD[06:14]
1645 * XOR ethernetADD[15:23] XOR ethernetADD[24:32]
1646 */
1647 w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff;
1648 /*
1649 * Now bitswap those 9 bits
1650 */
1651 result = 0;
1652 result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6;
1653 result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3;
1654 result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0;
1655
1656 /*
1657 * hashResult[14:09] = ethernetADD[00:05]
1658 */
1659 result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12;
1660 result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9;
1661 GE_DPRINTF(sc, ("1(%#x)", result));
1662 }
1663 GE_FUNC_EXIT(sc, "");
1664 return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff);
1665 }
1666
1667 int
1668 gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op,
1669 enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN])
1670 {
1671 uint64_t he;
1672 uint64_t *maybe_he_p = NULL;
1673 int limit;
1674 int hash;
1675 int maybe_hash = 0;
1676
1677 GE_FUNC_ENTER(sc, "gfe_hash_entry_op");
1678
1679 hash = gfe_hash_compute(sc, eaddr);
1680
1681 if (sc->sc_hashtable == NULL) {
1682 panic("%s:%d: hashtable == NULL!", device_xname(&sc->sc_dev),
1683 __LINE__);
1684 }
1685
1686 /*
1687 * Assume we are going to insert so create the hash entry we
1688 * are going to insert. We also use it to match entries we
1689 * will be removing.
1690 */
1691 he = ((uint64_t) eaddr[5] << 43) |
1692 ((uint64_t) eaddr[4] << 35) |
1693 ((uint64_t) eaddr[3] << 27) |
1694 ((uint64_t) eaddr[2] << 19) |
1695 ((uint64_t) eaddr[1] << 11) |
1696 ((uint64_t) eaddr[0] << 3) |
1697 HSH_PRIO_INS(prio) | HSH_V | HSH_R;
1698
1699 /*
1700 * The GT will search upto 12 entries for a hit, so we must mimic that.
1701 */
1702 hash &= sc->sc_hashmask / sizeof(he);
1703 for (limit = HSH_LIMIT; limit > 0 ; --limit) {
1704 /*
1705 * Does the GT wrap at the end, stop at the, or overrun the
1706 * end? Assume it wraps for now. Stash a copy of the
1707 * current hash entry.
1708 */
1709 uint64_t *he_p = &sc->sc_hashtable[hash];
1710 uint64_t thishe = *he_p;
1711
1712 /*
1713 * If the hash entry isn't valid, that break the chain. And
1714 * this entry a good candidate for reuse.
1715 */
1716 if ((thishe & HSH_V) == 0) {
1717 maybe_he_p = he_p;
1718 break;
1719 }
1720
1721 /*
1722 * If the hash entry has the same address we are looking for
1723 * then ... if we are removing and the skip bit is set, its
1724 * already been removed. if are adding and the skip bit is
1725 * clear, then its already added. In either return EBUSY
1726 * indicating the op has already been done. Otherwise flip
1727 * the skip bit and return 0.
1728 */
1729 if (((he ^ thishe) & HSH_ADDR_MASK) == 0) {
1730 if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) ||
1731 ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0))
1732 return EBUSY;
1733 *he_p = thishe ^ HSH_S;
1734 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map,
1735 hash * sizeof(he), sizeof(he),
1736 BUS_DMASYNC_PREWRITE);
1737 GE_FUNC_EXIT(sc, "^");
1738 return 0;
1739 }
1740
1741 /*
1742 * If we haven't found a slot for the entry and this entry
1743 * is currently being skipped, return this entry.
1744 */
1745 if (maybe_he_p == NULL && (thishe & HSH_S)) {
1746 maybe_he_p = he_p;
1747 maybe_hash = hash;
1748 }
1749
1750 hash = (hash + 1) & (sc->sc_hashmask / sizeof(he));
1751 }
1752
1753 /*
1754 * If we got here, then there was no entry to remove.
1755 */
1756 if (op == GE_HASH_REMOVE) {
1757 GE_FUNC_EXIT(sc, "?");
1758 return ENOENT;
1759 }
1760
1761 /*
1762 * If we couldn't find a slot, return an error.
1763 */
1764 if (maybe_he_p == NULL) {
1765 GE_FUNC_EXIT(sc, "!");
1766 return ENOSPC;
1767 }
1768
1769 /* Update the entry.
1770 */
1771 *maybe_he_p = he;
1772 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map,
1773 maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE);
1774 GE_FUNC_EXIT(sc, "+");
1775 return 0;
1776 }
1777
1778 int
1779 gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd)
1780 {
1781 struct gfe_softc * const sc = ec->ec_if.if_softc;
1782 int error;
1783 enum gfe_hash_op op;
1784 enum gfe_rxprio prio;
1785
1786 GE_FUNC_ENTER(sc, "hash_multichg");
1787 /*
1788 * Is this a wildcard entry? If so and its being removed, recompute.
1789 */
1790 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) {
1791 if (cmd == SIOCDELMULTI) {
1792 GE_FUNC_EXIT(sc, "");
1793 return ENETRESET;
1794 }
1795
1796 /*
1797 * Switch in
1798 */
1799 sc->sc_flags |= GE_ALLMULTI;
1800 if ((sc->sc_pcr & ETH_EPCR_PM) == 0) {
1801 sc->sc_pcr |= ETH_EPCR_PM;
1802 GE_WRITE(sc, EPCR, sc->sc_pcr);
1803 GE_FUNC_EXIT(sc, "");
1804 return 0;
1805 }
1806 GE_FUNC_EXIT(sc, "");
1807 return ENETRESET;
1808 }
1809
1810 prio = GE_RXPRIO_MEDLO;
1811 op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD);
1812
1813 if (sc->sc_hashtable == NULL) {
1814 GE_FUNC_EXIT(sc, "");
1815 return 0;
1816 }
1817
1818 error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo);
1819 if (error == EBUSY) {
1820 printf("%s: multichg: tried to %s %s again\n",
1821 device_xname(&sc->sc_dev),
1822 cmd == SIOCDELMULTI ? "remove" : "add",
1823 ether_sprintf(enm->enm_addrlo));
1824 GE_FUNC_EXIT(sc, "");
1825 return 0;
1826 }
1827
1828 if (error == ENOENT) {
1829 printf("%s: multichg: failed to remove %s: not in table\n",
1830 device_xname(&sc->sc_dev),
1831 ether_sprintf(enm->enm_addrlo));
1832 GE_FUNC_EXIT(sc, "");
1833 return 0;
1834 }
1835
1836 if (error == ENOSPC) {
1837 printf("%s: multichg: failed to add %s: no space; regenerating table\n",
1838 device_xname(&sc->sc_dev),
1839 ether_sprintf(enm->enm_addrlo));
1840 GE_FUNC_EXIT(sc, "");
1841 return ENETRESET;
1842 }
1843 GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n",
1844 device_xname(&sc->sc_dev),
1845 cmd == SIOCDELMULTI ? "remove" : "add",
1846 ether_sprintf(enm->enm_addrlo)));
1847 GE_FUNC_EXIT(sc, "");
1848 return 0;
1849 }
1850
1851 int
1852 gfe_hash_fill(struct gfe_softc *sc)
1853 {
1854 struct ether_multistep step;
1855 struct ether_multi *enm;
1856 int error;
1857
1858 GE_FUNC_ENTER(sc, "gfe_hash_fill");
1859
1860 error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI,
1861 CLLADDR(sc->sc_ec.ec_if.if_sadl));
1862 if (error)
1863 GE_FUNC_EXIT(sc, "!");
1864 return error;
1865
1866 sc->sc_flags &= ~GE_ALLMULTI;
1867 if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0)
1868 sc->sc_pcr &= ~ETH_EPCR_PM;
1869 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
1870 while (enm != NULL) {
1871 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1872 sc->sc_flags |= GE_ALLMULTI;
1873 sc->sc_pcr |= ETH_EPCR_PM;
1874 } else {
1875 error = gfe_hash_entry_op(sc, GE_HASH_ADD,
1876 GE_RXPRIO_MEDLO, enm->enm_addrlo);
1877 if (error == ENOSPC)
1878 break;
1879 }
1880 ETHER_NEXT_MULTI(step, enm);
1881 }
1882
1883 GE_FUNC_EXIT(sc, "");
1884 return error;
1885 }
1886
1887 int
1888 gfe_hash_alloc(struct gfe_softc *sc)
1889 {
1890 int error;
1891 GE_FUNC_ENTER(sc, "gfe_hash_alloc");
1892 sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1;
1893 error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1,
1894 BUS_DMA_NOCACHE);
1895 if (error) {
1896 printf("%s: failed to allocate %d bytes for hash table: %d\n",
1897 device_xname(&sc->sc_dev), sc->sc_hashmask + 1, error);
1898 GE_FUNC_EXIT(sc, "");
1899 return error;
1900 }
1901 sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva;
1902 memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1);
1903 bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map,
1904 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE);
1905 GE_FUNC_EXIT(sc, "");
1906 return 0;
1907 }
1908