gem.c revision 1.41 1 /* $NetBSD: gem.c,v 1.41 2005/10/28 00:01:53 christos Exp $ */
2
3 /*
4 *
5 * Copyright (C) 2001 Eduardo Horvath.
6 * All rights reserved.
7 *
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 /*
33 * Driver for Sun GEM ethernet controllers.
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: gem.c,v 1.41 2005/10/28 00:01:53 christos Exp $");
38
39 #include "opt_inet.h"
40 #include "bpfilter.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/callout.h>
45 #include <sys/mbuf.h>
46 #include <sys/syslog.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/device.h>
53
54 #include <machine/endian.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_ether.h>
62
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/tcp.h>
69 #include <netinet/udp.h>
70 #endif
71
72 #if NBPFILTER > 0
73 #include <net/bpf.h>
74 #endif
75
76 #include <machine/bus.h>
77 #include <machine/intr.h>
78
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
81 #include <dev/mii/mii_bitbang.h>
82
83 #include <dev/ic/gemreg.h>
84 #include <dev/ic/gemvar.h>
85
86 #define TRIES 10000
87
88 static void gem_start(struct ifnet *);
89 static void gem_stop(struct ifnet *, int);
90 int gem_ioctl(struct ifnet *, u_long, caddr_t);
91 void gem_tick(void *);
92 void gem_watchdog(struct ifnet *);
93 void gem_shutdown(void *);
94 int gem_init(struct ifnet *);
95 void gem_init_regs(struct gem_softc *sc);
96 static int gem_ringsize(int sz);
97 static int gem_meminit(struct gem_softc *);
98 void gem_mifinit(struct gem_softc *);
99 static int gem_bitwait(struct gem_softc *sc, int, u_int32_t, u_int32_t);
100 void gem_reset(struct gem_softc *);
101 int gem_reset_rx(struct gem_softc *sc);
102 int gem_reset_tx(struct gem_softc *sc);
103 int gem_disable_rx(struct gem_softc *sc);
104 int gem_disable_tx(struct gem_softc *sc);
105 static void gem_rxdrain(struct gem_softc *sc);
106 int gem_add_rxbuf(struct gem_softc *sc, int idx);
107 void gem_setladrf(struct gem_softc *);
108
109 /* MII methods & callbacks */
110 static int gem_mii_readreg(struct device *, int, int);
111 static void gem_mii_writereg(struct device *, int, int, int);
112 static void gem_mii_statchg(struct device *);
113
114 int gem_mediachange(struct ifnet *);
115 void gem_mediastatus(struct ifnet *, struct ifmediareq *);
116
117 struct mbuf *gem_get(struct gem_softc *, int, int);
118 int gem_put(struct gem_softc *, int, struct mbuf *);
119 void gem_read(struct gem_softc *, int, int);
120 int gem_eint(struct gem_softc *, u_int);
121 int gem_rint(struct gem_softc *);
122 int gem_tint(struct gem_softc *);
123 void gem_power(int, void *);
124
125 #ifdef GEM_DEBUG
126 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \
127 printf x
128 #else
129 #define DPRINTF(sc, x) /* nothing */
130 #endif
131
132 #define ETHER_MIN_TX (ETHERMIN + sizeof(struct ether_header))
133
134
135 /*
136 * gem_attach:
137 *
138 * Attach a Gem interface to the system.
139 */
140 void
141 gem_attach(sc, enaddr)
142 struct gem_softc *sc;
143 const uint8_t *enaddr;
144 {
145 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
146 struct mii_data *mii = &sc->sc_mii;
147 struct mii_softc *child;
148 struct ifmedia_entry *ifm;
149 int i, error;
150 u_int32_t v;
151 char *nullbuf;
152
153 /* Make sure the chip is stopped. */
154 ifp->if_softc = sc;
155 gem_stop(ifp, 0);
156 gem_reset(sc);
157
158 /*
159 * Allocate the control data structures, and create and load the
160 * DMA map for it. gem_control_data is 9216 bytes, we have space for
161 * the padding buffer in the bus_dmamem_alloc()'d memory.
162 */
163 if ((error = bus_dmamem_alloc(sc->sc_dmatag,
164 sizeof(struct gem_control_data) + ETHER_MIN_TX, PAGE_SIZE,
165 0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) {
166 aprint_error(
167 "%s: unable to allocate control data, error = %d\n",
168 sc->sc_dev.dv_xname, error);
169 goto fail_0;
170 }
171
172 /* XXX should map this in with correct endianness */
173 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg,
174 sizeof(struct gem_control_data), (caddr_t *)&sc->sc_control_data,
175 BUS_DMA_COHERENT)) != 0) {
176 aprint_error("%s: unable to map control data, error = %d\n",
177 sc->sc_dev.dv_xname, error);
178 goto fail_1;
179 }
180
181 nullbuf =
182 (caddr_t)sc->sc_control_data + sizeof(struct gem_control_data);
183
184 if ((error = bus_dmamap_create(sc->sc_dmatag,
185 sizeof(struct gem_control_data), 1,
186 sizeof(struct gem_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
187 aprint_error("%s: unable to create control data DMA map, "
188 "error = %d\n", sc->sc_dev.dv_xname, error);
189 goto fail_2;
190 }
191
192 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap,
193 sc->sc_control_data, sizeof(struct gem_control_data), NULL,
194 0)) != 0) {
195 aprint_error(
196 "%s: unable to load control data DMA map, error = %d\n",
197 sc->sc_dev.dv_xname, error);
198 goto fail_3;
199 }
200
201 memset(nullbuf, 0, ETHER_MIN_TX);
202 if ((error = bus_dmamap_create(sc->sc_dmatag,
203 ETHER_MIN_TX, 1, ETHER_MIN_TX, 0, 0, &sc->sc_nulldmamap)) != 0) {
204 aprint_error("%s: unable to create padding DMA map, "
205 "error = %d\n", sc->sc_dev.dv_xname, error);
206 goto fail_4;
207 }
208
209 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_nulldmamap,
210 nullbuf, ETHER_MIN_TX, NULL, 0)) != 0) {
211 aprint_error(
212 "%s: unable to load padding DMA map, error = %d\n",
213 sc->sc_dev.dv_xname, error);
214 goto fail_5;
215 }
216
217 bus_dmamap_sync(sc->sc_dmatag, sc->sc_nulldmamap, 0, ETHER_MIN_TX,
218 BUS_DMASYNC_PREWRITE);
219
220 /*
221 * Initialize the transmit job descriptors.
222 */
223 SIMPLEQ_INIT(&sc->sc_txfreeq);
224 SIMPLEQ_INIT(&sc->sc_txdirtyq);
225
226 /*
227 * Create the transmit buffer DMA maps.
228 */
229 for (i = 0; i < GEM_TXQUEUELEN; i++) {
230 struct gem_txsoft *txs;
231
232 txs = &sc->sc_txsoft[i];
233 txs->txs_mbuf = NULL;
234 if ((error = bus_dmamap_create(sc->sc_dmatag,
235 ETHER_MAX_LEN_JUMBO, GEM_NTXSEGS,
236 ETHER_MAX_LEN_JUMBO, 0, 0,
237 &txs->txs_dmamap)) != 0) {
238 aprint_error("%s: unable to create tx DMA map %d, "
239 "error = %d\n", sc->sc_dev.dv_xname, i, error);
240 goto fail_6;
241 }
242 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
243 }
244
245 /*
246 * Create the receive buffer DMA maps.
247 */
248 for (i = 0; i < GEM_NRXDESC; i++) {
249 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1,
250 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
251 aprint_error("%s: unable to create rx DMA map %d, "
252 "error = %d\n", sc->sc_dev.dv_xname, i, error);
253 goto fail_7;
254 }
255 sc->sc_rxsoft[i].rxs_mbuf = NULL;
256 }
257
258 /*
259 * From this point forward, the attachment cannot fail. A failure
260 * before this point releases all resources that may have been
261 * allocated.
262 */
263
264 /* Announce ourselves. */
265 aprint_normal("%s: Ethernet address %s", sc->sc_dev.dv_xname,
266 ether_sprintf(enaddr));
267
268 /* Get RX FIFO size */
269 sc->sc_rxfifosize = 64 *
270 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE);
271 aprint_normal(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
272
273 /* Get TX FIFO size */
274 v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE);
275 aprint_normal(", %uKB TX fifo\n", v / 16);
276
277 /* Initialize ifnet structure. */
278 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
279 ifp->if_softc = sc;
280 ifp->if_flags =
281 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
282 sc->sc_if_flags = ifp->if_flags;
283 ifp->if_capabilities |=
284 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
285 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
286 ifp->if_start = gem_start;
287 ifp->if_ioctl = gem_ioctl;
288 ifp->if_watchdog = gem_watchdog;
289 ifp->if_stop = gem_stop;
290 ifp->if_init = gem_init;
291 IFQ_SET_READY(&ifp->if_snd);
292
293 /* Initialize ifmedia structures and MII info */
294 mii->mii_ifp = ifp;
295 mii->mii_readreg = gem_mii_readreg;
296 mii->mii_writereg = gem_mii_writereg;
297 mii->mii_statchg = gem_mii_statchg;
298
299 ifmedia_init(&mii->mii_media, IFM_IMASK, gem_mediachange, gem_mediastatus);
300
301 gem_mifinit(sc);
302
303 mii_attach(&sc->sc_dev, mii, 0xffffffff,
304 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
305
306 child = LIST_FIRST(&mii->mii_phys);
307 if (child == NULL) {
308 /* No PHY attached */
309 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
310 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
311 } else {
312 /*
313 * Walk along the list of attached MII devices and
314 * establish an `MII instance' to `phy number'
315 * mapping. We'll use this mapping in media change
316 * requests to determine which phy to use to program
317 * the MIF configuration register.
318 */
319 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
320 /*
321 * Note: we support just two PHYs: the built-in
322 * internal device and an external on the MII
323 * connector.
324 */
325 if (child->mii_phy > 1 || child->mii_inst > 1) {
326 aprint_error(
327 "%s: cannot accomodate MII device %s"
328 " at phy %d, instance %d\n",
329 sc->sc_dev.dv_xname,
330 child->mii_dev.dv_xname,
331 child->mii_phy, child->mii_inst);
332 continue;
333 }
334
335 sc->sc_phys[child->mii_inst] = child->mii_phy;
336 }
337
338 /*
339 * Now select and activate the PHY we will use.
340 *
341 * The order of preference is External (MDI1),
342 * Internal (MDI0), Serial Link (no MII).
343 */
344 if (sc->sc_phys[1]) {
345 #ifdef GEM_DEBUG
346 aprint_debug("using external phy\n");
347 #endif
348 sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
349 } else {
350 #ifdef GEM_DEBUG
351 aprint_debug("using internal phy\n");
352 #endif
353 sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
354 }
355 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG,
356 sc->sc_mif_config);
357
358 /*
359 * XXX - we can really do the following ONLY if the
360 * phy indeed has the auto negotiation capability!!
361 */
362 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
363 }
364
365 /*
366 * If we support GigE media, we support jumbo frames too.
367 * Unless we are Apple.
368 */
369 TAILQ_FOREACH(ifm, &sc->sc_media.ifm_list, ifm_list) {
370 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_T ||
371 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_SX ||
372 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_LX ||
373 IFM_SUBTYPE(ifm->ifm_media) == IFM_1000_CX) {
374 if (sc->sc_variant != GEM_APPLE_GMAC)
375 sc->sc_ethercom.ec_capabilities
376 |= ETHERCAP_JUMBO_MTU;
377
378 sc->sc_flags |= GEM_GIGABIT;
379 break;
380 }
381 }
382
383 /* claim 802.1q capability */
384 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
385
386 /* Attach the interface. */
387 if_attach(ifp);
388 ether_ifattach(ifp, enaddr);
389
390 sc->sc_sh = shutdownhook_establish(gem_shutdown, sc);
391 if (sc->sc_sh == NULL)
392 panic("gem_config: can't establish shutdownhook");
393
394 #if NRND > 0
395 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
396 RND_TYPE_NET, 0);
397 #endif
398
399 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR,
400 NULL, sc->sc_dev.dv_xname, "interrupts");
401 #ifdef GEM_COUNTERS
402 evcnt_attach_dynamic(&sc->sc_ev_txint, EVCNT_TYPE_INTR,
403 &sc->sc_ev_intr, sc->sc_dev.dv_xname, "tx interrupts");
404 evcnt_attach_dynamic(&sc->sc_ev_rxint, EVCNT_TYPE_INTR,
405 &sc->sc_ev_intr, sc->sc_dev.dv_xname, "rx interrupts");
406 evcnt_attach_dynamic(&sc->sc_ev_rxfull, EVCNT_TYPE_INTR,
407 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx ring full");
408 evcnt_attach_dynamic(&sc->sc_ev_rxnobuf, EVCNT_TYPE_INTR,
409 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx malloc failure");
410 evcnt_attach_dynamic(&sc->sc_ev_rxhist[0], EVCNT_TYPE_INTR,
411 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 0desc");
412 evcnt_attach_dynamic(&sc->sc_ev_rxhist[1], EVCNT_TYPE_INTR,
413 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 1desc");
414 evcnt_attach_dynamic(&sc->sc_ev_rxhist[2], EVCNT_TYPE_INTR,
415 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 2desc");
416 evcnt_attach_dynamic(&sc->sc_ev_rxhist[3], EVCNT_TYPE_INTR,
417 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx 3desc");
418 evcnt_attach_dynamic(&sc->sc_ev_rxhist[4], EVCNT_TYPE_INTR,
419 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >3desc");
420 evcnt_attach_dynamic(&sc->sc_ev_rxhist[5], EVCNT_TYPE_INTR,
421 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >7desc");
422 evcnt_attach_dynamic(&sc->sc_ev_rxhist[6], EVCNT_TYPE_INTR,
423 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >15desc");
424 evcnt_attach_dynamic(&sc->sc_ev_rxhist[7], EVCNT_TYPE_INTR,
425 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >31desc");
426 evcnt_attach_dynamic(&sc->sc_ev_rxhist[8], EVCNT_TYPE_INTR,
427 &sc->sc_ev_rxint, sc->sc_dev.dv_xname, "rx >63desc");
428 #endif
429
430 #if notyet
431 /*
432 * Add a suspend hook to make sure we come back up after a
433 * resume.
434 */
435 sc->sc_powerhook = powerhook_establish(gem_power, sc);
436 if (sc->sc_powerhook == NULL)
437 aprint_error("%s: WARNING: unable to establish power hook\n",
438 sc->sc_dev.dv_xname);
439 #endif
440
441 callout_init(&sc->sc_tick_ch);
442 return;
443
444 /*
445 * Free any resources we've allocated during the failed attach
446 * attempt. Do this in reverse order and fall through.
447 */
448 fail_7:
449 for (i = 0; i < GEM_NRXDESC; i++) {
450 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
451 bus_dmamap_destroy(sc->sc_dmatag,
452 sc->sc_rxsoft[i].rxs_dmamap);
453 }
454 fail_6:
455 for (i = 0; i < GEM_TXQUEUELEN; i++) {
456 if (sc->sc_txsoft[i].txs_dmamap != NULL)
457 bus_dmamap_destroy(sc->sc_dmatag,
458 sc->sc_txsoft[i].txs_dmamap);
459 }
460 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap);
461 fail_5:
462 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_nulldmamap);
463 fail_4:
464 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)nullbuf, ETHER_MIN_TX);
465 fail_3:
466 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap);
467 fail_2:
468 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sc_control_data,
469 sizeof(struct gem_control_data));
470 fail_1:
471 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg);
472 fail_0:
473 return;
474 }
475
476
477 void
478 gem_tick(arg)
479 void *arg;
480 {
481 struct gem_softc *sc = arg;
482 int s;
483
484 s = splnet();
485 mii_tick(&sc->sc_mii);
486 splx(s);
487
488 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
489
490 }
491
492 static int
493 gem_bitwait(sc, r, clr, set)
494 struct gem_softc *sc;
495 int r;
496 u_int32_t clr;
497 u_int32_t set;
498 {
499 int i;
500 u_int32_t reg;
501
502 for (i = TRIES; i--; DELAY(100)) {
503 reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r);
504 if ((r & clr) == 0 && (r & set) == set)
505 return (1);
506 }
507 return (0);
508 }
509
510 void
511 gem_reset(sc)
512 struct gem_softc *sc;
513 {
514 bus_space_tag_t t = sc->sc_bustag;
515 bus_space_handle_t h = sc->sc_h;
516 int s;
517
518 s = splnet();
519 DPRINTF(sc, ("%s: gem_reset\n", sc->sc_dev.dv_xname));
520 gem_reset_rx(sc);
521 gem_reset_tx(sc);
522
523 /* Do a full reset */
524 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX|GEM_RESET_TX);
525 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
526 printf("%s: cannot reset device\n", sc->sc_dev.dv_xname);
527 splx(s);
528 }
529
530
531 /*
532 * gem_rxdrain:
533 *
534 * Drain the receive queue.
535 */
536 static void
537 gem_rxdrain(struct gem_softc *sc)
538 {
539 struct gem_rxsoft *rxs;
540 int i;
541
542 for (i = 0; i < GEM_NRXDESC; i++) {
543 rxs = &sc->sc_rxsoft[i];
544 if (rxs->rxs_mbuf != NULL) {
545 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
546 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
547 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
548 m_freem(rxs->rxs_mbuf);
549 rxs->rxs_mbuf = NULL;
550 }
551 }
552 }
553
554 /*
555 * Reset the whole thing.
556 */
557 static void
558 gem_stop(struct ifnet *ifp, int disable)
559 {
560 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
561 struct gem_txsoft *txs;
562
563 DPRINTF(sc, ("%s: gem_stop\n", sc->sc_dev.dv_xname));
564
565 callout_stop(&sc->sc_tick_ch);
566 mii_down(&sc->sc_mii);
567
568 /* XXX - Should we reset these instead? */
569 gem_disable_rx(sc);
570 gem_disable_tx(sc);
571
572 /*
573 * Release any queued transmit buffers.
574 */
575 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
576 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
577 if (txs->txs_mbuf != NULL) {
578 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, 0,
579 txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
580 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
581 m_freem(txs->txs_mbuf);
582 txs->txs_mbuf = NULL;
583 }
584 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
585 }
586
587 if (disable) {
588 gem_rxdrain(sc);
589 }
590
591 /*
592 * Mark the interface down and cancel the watchdog timer.
593 */
594 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
595 sc->sc_if_flags = ifp->if_flags;
596 ifp->if_timer = 0;
597 }
598
599
600 /*
601 * Reset the receiver
602 */
603 int
604 gem_reset_rx(struct gem_softc *sc)
605 {
606 bus_space_tag_t t = sc->sc_bustag;
607 bus_space_handle_t h = sc->sc_h;
608
609 /*
610 * Resetting while DMA is in progress can cause a bus hang, so we
611 * disable DMA first.
612 */
613 gem_disable_rx(sc);
614 bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
615 /* Wait till it finishes */
616 if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0))
617 printf("%s: cannot disable read dma\n", sc->sc_dev.dv_xname);
618 /* Wait 5ms extra. */
619 delay(5000);
620
621 /* Finally, reset the ERX */
622 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX);
623 /* Wait till it finishes */
624 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
625 printf("%s: cannot reset receiver\n", sc->sc_dev.dv_xname);
626 return (1);
627 }
628 return (0);
629 }
630
631
632 /*
633 * Reset the transmitter
634 */
635 int
636 gem_reset_tx(struct gem_softc *sc)
637 {
638 bus_space_tag_t t = sc->sc_bustag;
639 bus_space_handle_t h = sc->sc_h;
640
641 /*
642 * Resetting while DMA is in progress can cause a bus hang, so we
643 * disable DMA first.
644 */
645 gem_disable_tx(sc);
646 bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
647 /* Wait till it finishes */
648 if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0))
649 printf("%s: cannot disable read dma\n", sc->sc_dev.dv_xname);
650 /* Wait 5ms extra. */
651 delay(5000);
652
653 /* Finally, reset the ETX */
654 bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX);
655 /* Wait till it finishes */
656 if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
657 printf("%s: cannot reset receiver\n",
658 sc->sc_dev.dv_xname);
659 return (1);
660 }
661 return (0);
662 }
663
664 /*
665 * disable receiver.
666 */
667 int
668 gem_disable_rx(struct gem_softc *sc)
669 {
670 bus_space_tag_t t = sc->sc_bustag;
671 bus_space_handle_t h = sc->sc_h;
672 u_int32_t cfg;
673
674 /* Flip the enable bit */
675 cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
676 cfg &= ~GEM_MAC_RX_ENABLE;
677 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
678
679 /* Wait for it to finish */
680 return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
681 }
682
683 /*
684 * disable transmitter.
685 */
686 int
687 gem_disable_tx(struct gem_softc *sc)
688 {
689 bus_space_tag_t t = sc->sc_bustag;
690 bus_space_handle_t h = sc->sc_h;
691 u_int32_t cfg;
692
693 /* Flip the enable bit */
694 cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
695 cfg &= ~GEM_MAC_TX_ENABLE;
696 bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
697
698 /* Wait for it to finish */
699 return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
700 }
701
702 /*
703 * Initialize interface.
704 */
705 int
706 gem_meminit(struct gem_softc *sc)
707 {
708 struct gem_rxsoft *rxs;
709 int i, error;
710
711 /*
712 * Initialize the transmit descriptor ring.
713 */
714 memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
715 for (i = 0; i < GEM_NTXDESC; i++) {
716 sc->sc_txdescs[i].gd_flags = 0;
717 sc->sc_txdescs[i].gd_addr = 0;
718 }
719 GEM_CDTXSYNC(sc, 0, GEM_NTXDESC,
720 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
721 sc->sc_txfree = GEM_NTXDESC-1;
722 sc->sc_txnext = 0;
723 sc->sc_txwin = 0;
724
725 /*
726 * Initialize the receive descriptor and receive job
727 * descriptor rings.
728 */
729 for (i = 0; i < GEM_NRXDESC; i++) {
730 rxs = &sc->sc_rxsoft[i];
731 if (rxs->rxs_mbuf == NULL) {
732 if ((error = gem_add_rxbuf(sc, i)) != 0) {
733 printf("%s: unable to allocate or map rx "
734 "buffer %d, error = %d\n",
735 sc->sc_dev.dv_xname, i, error);
736 /*
737 * XXX Should attempt to run with fewer receive
738 * XXX buffers instead of just failing.
739 */
740 gem_rxdrain(sc);
741 return (1);
742 }
743 } else
744 GEM_INIT_RXDESC(sc, i);
745 }
746 sc->sc_rxptr = 0;
747
748 return (0);
749 }
750
751 static int
752 gem_ringsize(int sz)
753 {
754 switch (sz) {
755 case 32:
756 return GEM_RING_SZ_32;
757 case 64:
758 return GEM_RING_SZ_64;
759 case 128:
760 return GEM_RING_SZ_128;
761 case 256:
762 return GEM_RING_SZ_256;
763 case 512:
764 return GEM_RING_SZ_512;
765 case 1024:
766 return GEM_RING_SZ_1024;
767 case 2048:
768 return GEM_RING_SZ_2048;
769 case 4096:
770 return GEM_RING_SZ_4096;
771 case 8192:
772 return GEM_RING_SZ_8192;
773 default:
774 printf("gem: invalid Receive Descriptor ring size %d\n", sz);
775 return GEM_RING_SZ_32;
776 }
777 }
778
779 /*
780 * Initialization of interface; set up initialization block
781 * and transmit/receive descriptor rings.
782 */
783 int
784 gem_init(struct ifnet *ifp)
785 {
786 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
787 bus_space_tag_t t = sc->sc_bustag;
788 bus_space_handle_t h = sc->sc_h;
789 int s;
790 u_int max_frame_size;
791 u_int32_t v;
792
793 s = splnet();
794
795 DPRINTF(sc, ("%s: gem_init: calling stop\n", sc->sc_dev.dv_xname));
796 /*
797 * Initialization sequence. The numbered steps below correspond
798 * to the sequence outlined in section 6.3.5.1 in the Ethernet
799 * Channel Engine manual (part of the PCIO manual).
800 * See also the STP2002-STQ document from Sun Microsystems.
801 */
802
803 /* step 1 & 2. Reset the Ethernet Channel */
804 gem_stop(ifp, 0);
805 gem_reset(sc);
806 DPRINTF(sc, ("%s: gem_init: restarting\n", sc->sc_dev.dv_xname));
807
808 /* Re-initialize the MIF */
809 gem_mifinit(sc);
810
811 /* Call MI reset function if any */
812 if (sc->sc_hwreset)
813 (*sc->sc_hwreset)(sc);
814
815 /* step 3. Setup data structures in host memory */
816 gem_meminit(sc);
817
818 /* step 4. TX MAC registers & counters */
819 gem_init_regs(sc);
820 max_frame_size = max(sc->sc_ethercom.ec_if.if_mtu, ETHERMTU);
821 max_frame_size += ETHER_HDR_LEN + ETHER_CRC_LEN;
822 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)
823 max_frame_size += ETHER_VLAN_ENCAP_LEN;
824 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
825 max_frame_size|/* burst size */(0x2000<<16));
826
827 /* step 5. RX MAC registers & counters */
828 gem_setladrf(sc);
829
830 /* step 6 & 7. Program Descriptor Ring Base Addresses */
831 /* NOTE: we use only 32-bit DMA addresses here. */
832 bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0);
833 bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
834
835 bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
836 bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
837
838 /* step 8. Global Configuration & Interrupt Mask */
839 bus_space_write_4(t, h, GEM_INTMASK,
840 ~(GEM_INTR_TX_INTME|
841 GEM_INTR_TX_EMPTY|
842 GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
843 GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
844 GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
845 GEM_INTR_BERR));
846 bus_space_write_4(t, h, GEM_MAC_RX_MASK,
847 GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
848 bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
849 bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
850
851 /* step 9. ETX Configuration: use mostly default values */
852
853 /* Enable DMA */
854 v = gem_ringsize(GEM_NTXDESC /*XXX*/);
855 bus_space_write_4(t, h, GEM_TX_CONFIG,
856 v|GEM_TX_CONFIG_TXDMA_EN|
857 ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
858 bus_space_write_4(t, h, GEM_TX_KICK, sc->sc_txnext);
859
860 /* step 10. ERX Configuration */
861
862 /* Encode Receive Descriptor ring size: four possible values */
863 v = gem_ringsize(GEM_NRXDESC /*XXX*/);
864
865 /* Set receive h/w checksum offset */
866 #ifdef INET
867 v |= (ETHER_HDR_LEN + sizeof(struct ip) +
868 ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
869 ETHER_VLAN_ENCAP_LEN : 0)) << GEM_RX_CONFIG_CXM_START_SHFT;
870 #endif
871
872 /* Enable DMA */
873 bus_space_write_4(t, h, GEM_RX_CONFIG,
874 v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
875 (2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
876
877 /*
878 * The following value is for an OFF Threshold of about 3/4 full
879 * and an ON Threshold of 1/4 full.
880 */
881 bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
882 (3 * sc->sc_rxfifosize / 256) |
883 ( (sc->sc_rxfifosize / 256) << 12));
884 bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6);
885
886 /* step 11. Configure Media */
887 mii_mediachg(&sc->sc_mii);
888
889 /* XXXX Serial link needs a whole different setup. */
890
891
892 /* step 12. RX_MAC Configuration Register */
893 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
894 v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
895 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
896
897 /* step 14. Issue Transmit Pending command */
898
899 /* Call MI initialization function if any */
900 if (sc->sc_hwinit)
901 (*sc->sc_hwinit)(sc);
902
903
904 /* step 15. Give the reciever a swift kick */
905 bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
906
907 /* Start the one second timer. */
908 callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
909
910 ifp->if_flags |= IFF_RUNNING;
911 ifp->if_flags &= ~IFF_OACTIVE;
912 ifp->if_timer = 0;
913 sc->sc_if_flags = ifp->if_flags;
914 splx(s);
915
916 return (0);
917 }
918
919 void
920 gem_init_regs(struct gem_softc *sc)
921 {
922 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
923 bus_space_tag_t t = sc->sc_bustag;
924 bus_space_handle_t h = sc->sc_h;
925 const u_char *laddr = LLADDR(ifp->if_sadl);
926 u_int32_t v;
927
928 /* These regs are not cleared on reset */
929 if (!sc->sc_inited) {
930
931 /* Wooo. Magic values. */
932 bus_space_write_4(t, h, GEM_MAC_IPG0, 0);
933 bus_space_write_4(t, h, GEM_MAC_IPG1, 8);
934 bus_space_write_4(t, h, GEM_MAC_IPG2, 4);
935
936 bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
937 /* Max frame and max burst size */
938 bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
939 ETHER_MAX_LEN | (0x2000<<16));
940
941 bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7);
942 bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4);
943 bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
944 /* Dunno.... */
945 bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
946 bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
947 ((laddr[5]<<8)|laddr[4])&0x3ff);
948
949 /* Secondary MAC addr set to 0:0:0:0:0:0 */
950 bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
951 bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
952 bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
953
954 /* MAC control addr set to 01:80:c2:00:00:01 */
955 bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
956 bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
957 bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
958
959 /* MAC filter addr set to 0:0:0:0:0:0 */
960 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
961 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
962 bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
963
964 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
965 bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
966
967 sc->sc_inited = 1;
968 }
969
970 /* Counters need to be zeroed */
971 bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
972 bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
973 bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
974 bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
975 bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
976 bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
977 bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
978 bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
979 bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
980 bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
981 bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
982
983 /* Un-pause stuff */
984 #if 0
985 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
986 #else
987 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
988 #endif
989
990 /*
991 * Set the station address.
992 */
993 bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
994 bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
995 bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
996
997 #if 0
998 if (sc->sc_variant != APPLE_GMAC)
999 return;
1000 #endif
1001
1002 /*
1003 * Enable MII outputs. Enable GMII if there is a gigabit PHY.
1004 */
1005 sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
1006 v = GEM_MAC_XIF_TX_MII_ENA;
1007 if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1008 v |= GEM_MAC_XIF_FDPLX_LED;
1009 if (sc->sc_flags & GEM_GIGABIT)
1010 v |= GEM_MAC_XIF_GMII_MODE;
1011 }
1012 bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
1013 }
1014
1015 static void
1016 gem_start(ifp)
1017 struct ifnet *ifp;
1018 {
1019 struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1020 struct mbuf *m0, *m;
1021 struct gem_txsoft *txs, *last_txs;
1022 bus_dmamap_t dmamap;
1023 int error, firsttx, nexttx, lasttx = -1, ofree, seg;
1024 uint64_t flags = 0;
1025
1026 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1027 return;
1028
1029 /*
1030 * Remember the previous number of free descriptors and
1031 * the first descriptor we'll use.
1032 */
1033 ofree = sc->sc_txfree;
1034 firsttx = sc->sc_txnext;
1035
1036 DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n",
1037 sc->sc_dev.dv_xname, ofree, firsttx));
1038
1039 /*
1040 * Loop through the send queue, setting up transmit descriptors
1041 * until we drain the queue, or use up all available transmit
1042 * descriptors.
1043 */
1044 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL &&
1045 sc->sc_txfree != 0) {
1046 /*
1047 * Grab a packet off the queue.
1048 */
1049 IFQ_POLL(&ifp->if_snd, m0);
1050 if (m0 == NULL)
1051 break;
1052 m = NULL;
1053
1054 dmamap = txs->txs_dmamap;
1055
1056 /*
1057 * Load the DMA map. If this fails, the packet either
1058 * didn't fit in the alloted number of segments, or we were
1059 * short on resources. In this case, we'll copy and try
1060 * again.
1061 */
1062 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m0,
1063 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0 ||
1064 (m0->m_pkthdr.len < ETHER_MIN_TX &&
1065 dmamap->dm_nsegs == GEM_NTXSEGS)) {
1066 if (m0->m_pkthdr.len > MCLBYTES) {
1067 printf("%s: unable to allocate jumbo Tx "
1068 "cluster\n", sc->sc_dev.dv_xname);
1069 IFQ_DEQUEUE(&ifp->if_snd, m0);
1070 m_freem(m0);
1071 continue;
1072 }
1073 MGETHDR(m, M_DONTWAIT, MT_DATA);
1074 if (m == NULL) {
1075 printf("%s: unable to allocate Tx mbuf\n",
1076 sc->sc_dev.dv_xname);
1077 break;
1078 }
1079 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1080 if (m0->m_pkthdr.len > MHLEN) {
1081 MCLGET(m, M_DONTWAIT);
1082 if ((m->m_flags & M_EXT) == 0) {
1083 printf("%s: unable to allocate Tx "
1084 "cluster\n", sc->sc_dev.dv_xname);
1085 m_freem(m);
1086 break;
1087 }
1088 }
1089 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
1090 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1091 error = bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap,
1092 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1093 if (error) {
1094 printf("%s: unable to load Tx buffer, "
1095 "error = %d\n", sc->sc_dev.dv_xname, error);
1096 break;
1097 }
1098 }
1099
1100 /*
1101 * Ensure we have enough descriptors free to describe
1102 * the packet.
1103 */
1104 if (dmamap->dm_nsegs > ((m0->m_pkthdr.len < ETHER_MIN_TX) ?
1105 (sc->sc_txfree - 1) : sc->sc_txfree)) {
1106 /*
1107 * Not enough free descriptors to transmit this
1108 * packet. We haven't committed to anything yet,
1109 * so just unload the DMA map, put the packet
1110 * back on the queue, and punt. Notify the upper
1111 * layer that there are no more slots left.
1112 *
1113 * XXX We could allocate an mbuf and copy, but
1114 * XXX it is worth it?
1115 */
1116 ifp->if_flags |= IFF_OACTIVE;
1117 sc->sc_if_flags = ifp->if_flags;
1118 bus_dmamap_unload(sc->sc_dmatag, dmamap);
1119 if (m != NULL)
1120 m_freem(m);
1121 break;
1122 }
1123
1124 IFQ_DEQUEUE(&ifp->if_snd, m0);
1125 if (m != NULL) {
1126 m_freem(m0);
1127 m0 = m;
1128 }
1129
1130 /*
1131 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1132 */
1133
1134 /* Sync the DMA map. */
1135 bus_dmamap_sync(sc->sc_dmatag, dmamap, 0, dmamap->dm_mapsize,
1136 BUS_DMASYNC_PREWRITE);
1137
1138 /*
1139 * Initialize the transmit descriptors.
1140 */
1141 for (nexttx = sc->sc_txnext, seg = 0;
1142 seg < dmamap->dm_nsegs;
1143 seg++, nexttx = GEM_NEXTTX(nexttx)) {
1144
1145 /*
1146 * If this is the first descriptor we're
1147 * enqueueing, set the start of packet flag,
1148 * and the checksum stuff if we want the hardware
1149 * to do it.
1150 */
1151 sc->sc_txdescs[nexttx].gd_addr =
1152 GEM_DMA_WRITE(sc, dmamap->dm_segs[seg].ds_addr);
1153 flags = dmamap->dm_segs[seg].ds_len & GEM_TD_BUFSIZE;
1154 if (nexttx == firsttx) {
1155 flags |= GEM_TD_START_OF_PACKET;
1156 if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
1157 sc->sc_txwin = 0;
1158 flags |= GEM_TD_INTERRUPT_ME;
1159 }
1160
1161 #ifdef INET
1162 /* h/w checksum */
1163 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 |
1164 M_CSUM_UDPv4) && m0->m_pkthdr.csum_flags &
1165 (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1166 struct ether_header *eh;
1167 uint16_t offset, start;
1168
1169 eh = mtod(m0, struct ether_header *);
1170 switch (ntohs(eh->ether_type)) {
1171 case ETHERTYPE_IP:
1172 start = ETHER_HDR_LEN;
1173 break;
1174 case ETHERTYPE_VLAN:
1175 start = ETHER_HDR_LEN +
1176 ETHER_VLAN_ENCAP_LEN;
1177 break;
1178 default:
1179 /* unsupported, drop it */
1180 m_free(m0);
1181 continue;
1182 }
1183 start += M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1184 offset = M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data) + start;
1185 flags |= (start <<
1186 GEM_TD_CXSUM_STARTSHFT) |
1187 (offset <<
1188 GEM_TD_CXSUM_STUFFSHFT) |
1189 GEM_TD_CXSUM_ENABLE;
1190 }
1191 #endif
1192 }
1193 if (seg == dmamap->dm_nsegs - 1) {
1194 flags |= GEM_TD_END_OF_PACKET;
1195 } else {
1196 /* last flag set outside of loop */
1197 sc->sc_txdescs[nexttx].gd_flags =
1198 GEM_DMA_WRITE(sc, flags);
1199 }
1200 lasttx = nexttx;
1201 }
1202 if (m0->m_pkthdr.len < ETHER_MIN_TX) {
1203 /* add padding buffer at end of chain */
1204 flags &= ~GEM_TD_END_OF_PACKET;
1205 sc->sc_txdescs[lasttx].gd_flags =
1206 GEM_DMA_WRITE(sc, flags);
1207
1208 sc->sc_txdescs[nexttx].gd_addr =
1209 GEM_DMA_WRITE(sc,
1210 sc->sc_nulldmamap->dm_segs[0].ds_addr);
1211 flags = ((ETHER_MIN_TX - m0->m_pkthdr.len) &
1212 GEM_TD_BUFSIZE) | GEM_TD_END_OF_PACKET;
1213 lasttx = nexttx;
1214 nexttx = GEM_NEXTTX(nexttx);
1215 seg++;
1216 }
1217 sc->sc_txdescs[lasttx].gd_flags = GEM_DMA_WRITE(sc, flags);
1218
1219 KASSERT(lasttx != -1);
1220
1221 /*
1222 * Store a pointer to the packet so we can free it later,
1223 * and remember what txdirty will be once the packet is
1224 * done.
1225 */
1226 txs->txs_mbuf = m0;
1227 txs->txs_firstdesc = sc->sc_txnext;
1228 txs->txs_lastdesc = lasttx;
1229 txs->txs_ndescs = seg;
1230
1231 #ifdef GEM_DEBUG
1232 if (ifp->if_flags & IFF_DEBUG) {
1233 printf(" gem_start %p transmit chain:\n", txs);
1234 for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) {
1235 printf("descriptor %d:\t", seg);
1236 printf("gd_flags: 0x%016llx\t", (long long)
1237 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags));
1238 printf("gd_addr: 0x%016llx\n", (long long)
1239 GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr));
1240 if (seg == lasttx)
1241 break;
1242 }
1243 }
1244 #endif
1245
1246 /* Sync the descriptors we're using. */
1247 GEM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1248 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1249
1250 /* Advance the tx pointer. */
1251 sc->sc_txfree -= txs->txs_ndescs;
1252 sc->sc_txnext = nexttx;
1253
1254 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1255 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1256
1257 last_txs = txs;
1258
1259 #if NBPFILTER > 0
1260 /*
1261 * Pass the packet to any BPF listeners.
1262 */
1263 if (ifp->if_bpf)
1264 bpf_mtap(ifp->if_bpf, m0);
1265 #endif /* NBPFILTER > 0 */
1266 }
1267
1268 if (txs == NULL || sc->sc_txfree == 0) {
1269 /* No more slots left; notify upper layer. */
1270 ifp->if_flags |= IFF_OACTIVE;
1271 sc->sc_if_flags = ifp->if_flags;
1272 }
1273
1274 if (sc->sc_txfree != ofree) {
1275 DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n",
1276 sc->sc_dev.dv_xname, lasttx, firsttx));
1277 /*
1278 * The entire packet chain is set up.
1279 * Kick the transmitter.
1280 */
1281 DPRINTF(sc, ("%s: gem_start: kicking tx %d\n",
1282 sc->sc_dev.dv_xname, nexttx));
1283 bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK,
1284 sc->sc_txnext);
1285
1286 /* Set a watchdog timer in case the chip flakes out. */
1287 ifp->if_timer = 5;
1288 DPRINTF(sc, ("%s: gem_start: watchdog %d\n",
1289 sc->sc_dev.dv_xname, ifp->if_timer));
1290 }
1291 }
1292
1293 /*
1294 * Transmit interrupt.
1295 */
1296 int
1297 gem_tint(sc)
1298 struct gem_softc *sc;
1299 {
1300 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1301 bus_space_tag_t t = sc->sc_bustag;
1302 bus_space_handle_t mac = sc->sc_h;
1303 struct gem_txsoft *txs;
1304 int txlast;
1305 int progress = 0;
1306
1307
1308 DPRINTF(sc, ("%s: gem_tint\n", sc->sc_dev.dv_xname));
1309
1310 /*
1311 * Unload collision counters
1312 */
1313 ifp->if_collisions +=
1314 bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
1315 bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) +
1316 bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
1317 bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
1318
1319 /*
1320 * then clear the hardware counters.
1321 */
1322 bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
1323 bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
1324 bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
1325 bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
1326
1327 /*
1328 * Go through our Tx list and free mbufs for those
1329 * frames that have been transmitted.
1330 */
1331 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1332 GEM_CDTXSYNC(sc, txs->txs_lastdesc,
1333 txs->txs_ndescs,
1334 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1335
1336 #ifdef GEM_DEBUG
1337 if (ifp->if_flags & IFF_DEBUG) {
1338 int i;
1339 printf(" txsoft %p transmit chain:\n", txs);
1340 for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1341 printf("descriptor %d: ", i);
1342 printf("gd_flags: 0x%016llx\t", (long long)
1343 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1344 printf("gd_addr: 0x%016llx\n", (long long)
1345 GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1346 if (i == txs->txs_lastdesc)
1347 break;
1348 }
1349 }
1350 #endif
1351
1352 /*
1353 * In theory, we could harveast some descriptors before
1354 * the ring is empty, but that's a bit complicated.
1355 *
1356 * GEM_TX_COMPLETION points to the last descriptor
1357 * processed +1.
1358 */
1359 txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
1360 DPRINTF(sc,
1361 ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n",
1362 txs->txs_lastdesc, txlast));
1363 if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1364 if ((txlast >= txs->txs_firstdesc) &&
1365 (txlast <= txs->txs_lastdesc))
1366 break;
1367 } else {
1368 /* Ick -- this command wraps */
1369 if ((txlast >= txs->txs_firstdesc) ||
1370 (txlast <= txs->txs_lastdesc))
1371 break;
1372 }
1373
1374 DPRINTF(sc, ("gem_tint: releasing a desc\n"));
1375 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1376
1377 sc->sc_txfree += txs->txs_ndescs;
1378
1379 if (txs->txs_mbuf == NULL) {
1380 #ifdef DIAGNOSTIC
1381 panic("gem_txintr: null mbuf");
1382 #endif
1383 }
1384
1385 bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap,
1386 0, txs->txs_dmamap->dm_mapsize,
1387 BUS_DMASYNC_POSTWRITE);
1388 bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap);
1389 m_freem(txs->txs_mbuf);
1390 txs->txs_mbuf = NULL;
1391
1392 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1393
1394 ifp->if_opackets++;
1395 progress = 1;
1396 }
1397
1398 #if 0
1399 DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x "
1400 "GEM_TX_DATA_PTR %llx "
1401 "GEM_TX_COMPLETION %x\n",
1402 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE),
1403 ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h,
1404 GEM_TX_DATA_PTR_HI) << 32) |
1405 bus_space_read_4(sc->sc_bustag, sc->sc_h,
1406 GEM_TX_DATA_PTR_LO),
1407 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)));
1408 #endif
1409
1410 if (progress) {
1411 if (sc->sc_txfree == GEM_NTXDESC - 1)
1412 sc->sc_txwin = 0;
1413
1414 ifp->if_flags &= ~IFF_OACTIVE;
1415 sc->sc_if_flags = ifp->if_flags;
1416 gem_start(ifp);
1417
1418 if (SIMPLEQ_EMPTY(&sc->sc_txdirtyq))
1419 ifp->if_timer = 0;
1420 }
1421 DPRINTF(sc, ("%s: gem_tint: watchdog %d\n",
1422 sc->sc_dev.dv_xname, ifp->if_timer));
1423
1424 return (1);
1425 }
1426
1427 /*
1428 * Receive interrupt.
1429 */
1430 int
1431 gem_rint(sc)
1432 struct gem_softc *sc;
1433 {
1434 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1435 bus_space_tag_t t = sc->sc_bustag;
1436 bus_space_handle_t h = sc->sc_h;
1437 struct gem_rxsoft *rxs;
1438 struct mbuf *m;
1439 u_int64_t rxstat;
1440 u_int32_t rxcomp;
1441 int i, len, progress = 0;
1442
1443 DPRINTF(sc, ("%s: gem_rint\n", sc->sc_dev.dv_xname));
1444
1445 /*
1446 * Read the completion register once. This limits
1447 * how long the following loop can execute.
1448 */
1449 rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
1450
1451 /*
1452 * XXXX Read the lastrx only once at the top for speed.
1453 */
1454 DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n",
1455 sc->sc_rxptr, rxcomp));
1456
1457 /*
1458 * Go into the loop at least once.
1459 */
1460 for (i = sc->sc_rxptr; i == sc->sc_rxptr || i != rxcomp;
1461 i = GEM_NEXTRX(i)) {
1462 rxs = &sc->sc_rxsoft[i];
1463
1464 GEM_CDRXSYNC(sc, i,
1465 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1466
1467 rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1468
1469 if (rxstat & GEM_RD_OWN) {
1470 /*
1471 * We have processed all of the receive buffers.
1472 */
1473 break;
1474 }
1475
1476 progress++;
1477 ifp->if_ipackets++;
1478
1479 if (rxstat & GEM_RD_BAD_CRC) {
1480 ifp->if_ierrors++;
1481 printf("%s: receive error: CRC error\n",
1482 sc->sc_dev.dv_xname);
1483 GEM_INIT_RXDESC(sc, i);
1484 continue;
1485 }
1486
1487 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1488 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1489 #ifdef GEM_DEBUG
1490 if (ifp->if_flags & IFF_DEBUG) {
1491 printf(" rxsoft %p descriptor %d: ", rxs, i);
1492 printf("gd_flags: 0x%016llx\t", (long long)
1493 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1494 printf("gd_addr: 0x%016llx\n", (long long)
1495 GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1496 }
1497 #endif
1498
1499 /* No errors; receive the packet. */
1500 len = GEM_RD_BUFLEN(rxstat);
1501
1502 /*
1503 * Allocate a new mbuf cluster. If that fails, we are
1504 * out of memory, and must drop the packet and recycle
1505 * the buffer that's already attached to this descriptor.
1506 */
1507 m = rxs->rxs_mbuf;
1508 if (gem_add_rxbuf(sc, i) != 0) {
1509 GEM_COUNTER_INCR(sc, sc_ev_rxnobuf);
1510 ifp->if_ierrors++;
1511 GEM_INIT_RXDESC(sc, i);
1512 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1513 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1514 continue;
1515 }
1516 m->m_data += 2; /* We're already off by two */
1517
1518 m->m_pkthdr.rcvif = ifp;
1519 m->m_pkthdr.len = m->m_len = len;
1520
1521 #if NBPFILTER > 0
1522 /*
1523 * Pass this up to any BPF listeners, but only
1524 * pass it up the stack if its for us.
1525 */
1526 if (ifp->if_bpf)
1527 bpf_mtap(ifp->if_bpf, m);
1528 #endif /* NPBFILTER > 0 */
1529
1530 #ifdef INET
1531 /* hardware checksum */
1532 if (ifp->if_csum_flags_rx & (M_CSUM_UDPv4 | M_CSUM_TCPv4)) {
1533 struct ether_header *eh;
1534 struct ip *ip;
1535 struct udphdr *uh;
1536 int32_t hlen, pktlen;
1537
1538 if (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) {
1539 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN -
1540 ETHER_VLAN_ENCAP_LEN;
1541 eh = (struct ether_header *) mtod(m, caddr_t) +
1542 ETHER_VLAN_ENCAP_LEN;
1543 } else {
1544 pktlen = m->m_pkthdr.len - ETHER_HDR_LEN;
1545 eh = mtod(m, struct ether_header *);
1546 }
1547 if (ntohs(eh->ether_type) != ETHERTYPE_IP)
1548 goto swcsum;
1549 ip = (struct ip *) ((caddr_t)eh + ETHER_HDR_LEN);
1550
1551 /* IPv4 only */
1552 if (ip->ip_v != IPVERSION)
1553 goto swcsum;
1554
1555 hlen = ip->ip_hl << 2;
1556 if (hlen < sizeof(struct ip))
1557 goto swcsum;
1558
1559 /*
1560 * bail if too short, has random trailing garbage,
1561 * truncated, fragment, or has ethernet pad.
1562 */
1563 if ((ntohs(ip->ip_len) < hlen) ||
1564 (ntohs(ip->ip_len) != pktlen) ||
1565 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)))
1566 goto swcsum;
1567
1568 switch (ip->ip_p) {
1569 case IPPROTO_TCP:
1570 if (! (ifp->if_csum_flags_rx & M_CSUM_TCPv4))
1571 goto swcsum;
1572 if (pktlen < (hlen + sizeof(struct tcphdr)))
1573 goto swcsum;
1574 m->m_pkthdr.csum_flags = M_CSUM_TCPv4;
1575 break;
1576 case IPPROTO_UDP:
1577 if (! (ifp->if_csum_flags_rx & M_CSUM_UDPv4))
1578 goto swcsum;
1579 if (pktlen < (hlen + sizeof(struct udphdr)))
1580 goto swcsum;
1581 uh = (struct udphdr *)((caddr_t)ip + hlen);
1582 /* no checksum */
1583 if (uh->uh_sum == 0)
1584 goto swcsum;
1585 m->m_pkthdr.csum_flags = M_CSUM_UDPv4;
1586 break;
1587 default:
1588 goto swcsum;
1589 }
1590
1591 /* the uncomplemented sum is expected */
1592 m->m_pkthdr.csum_data = (~rxstat) & GEM_RD_CHECKSUM;
1593
1594 /* if the pkt had ip options, we have to deduct them */
1595 if (hlen > sizeof(struct ip)) {
1596 uint16_t *opts;
1597 uint32_t optsum, temp;
1598
1599 optsum = 0;
1600 temp = hlen - sizeof(struct ip);
1601 opts = (uint16_t *) ((caddr_t) ip +
1602 sizeof(struct ip));
1603
1604 while (temp > 1) {
1605 optsum += ntohs(*opts++);
1606 temp -= 2;
1607 }
1608 while (optsum >> 16)
1609 optsum = (optsum >> 16) +
1610 (optsum & 0xffff);
1611
1612 /* Deduct ip opts sum from hwsum (rfc 1624). */
1613 m->m_pkthdr.csum_data =
1614 ~((~m->m_pkthdr.csum_data) - ~optsum);
1615
1616 while (m->m_pkthdr.csum_data >> 16)
1617 m->m_pkthdr.csum_data =
1618 (m->m_pkthdr.csum_data >> 16) +
1619 (m->m_pkthdr.csum_data &
1620 0xffff);
1621 }
1622
1623 m->m_pkthdr.csum_flags |= M_CSUM_DATA |
1624 M_CSUM_NO_PSEUDOHDR;
1625 } else
1626 swcsum:
1627 m->m_pkthdr.csum_flags = 0;
1628 #endif
1629 /* Pass it on. */
1630 (*ifp->if_input)(ifp, m);
1631 }
1632
1633 if (progress) {
1634 /* Update the receive pointer. */
1635 if (i == sc->sc_rxptr) {
1636 GEM_COUNTER_INCR(sc, sc_ev_rxfull);
1637 #ifdef GEM_DEBUG
1638 if (ifp->if_flags & IFF_DEBUG)
1639 printf("%s: rint: ring wrap\n",
1640 sc->sc_dev.dv_xname);
1641 #endif
1642 }
1643 sc->sc_rxptr = i;
1644 bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
1645 }
1646 #ifdef GEM_COUNTERS
1647 if (progress <= 4) {
1648 GEM_COUNTER_INCR(sc, sc_ev_rxhist[progress]);
1649 } else if (progress < 32) {
1650 if (progress < 16)
1651 GEM_COUNTER_INCR(sc, sc_ev_rxhist[5]);
1652 else
1653 GEM_COUNTER_INCR(sc, sc_ev_rxhist[6]);
1654
1655 } else {
1656 if (progress < 64)
1657 GEM_COUNTER_INCR(sc, sc_ev_rxhist[7]);
1658 else
1659 GEM_COUNTER_INCR(sc, sc_ev_rxhist[8]);
1660 }
1661 #endif
1662
1663 DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n",
1664 sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)));
1665
1666 return (1);
1667 }
1668
1669
1670 /*
1671 * gem_add_rxbuf:
1672 *
1673 * Add a receive buffer to the indicated descriptor.
1674 */
1675 int
1676 gem_add_rxbuf(struct gem_softc *sc, int idx)
1677 {
1678 struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1679 struct mbuf *m;
1680 int error;
1681
1682 MGETHDR(m, M_DONTWAIT, MT_DATA);
1683 if (m == NULL)
1684 return (ENOBUFS);
1685
1686 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
1687 MCLGET(m, M_DONTWAIT);
1688 if ((m->m_flags & M_EXT) == 0) {
1689 m_freem(m);
1690 return (ENOBUFS);
1691 }
1692
1693 #ifdef GEM_DEBUG
1694 /* bzero the packet to check DMA */
1695 memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1696 #endif
1697
1698 if (rxs->rxs_mbuf != NULL)
1699 bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap);
1700
1701 rxs->rxs_mbuf = m;
1702
1703 error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap,
1704 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1705 BUS_DMA_READ|BUS_DMA_NOWAIT);
1706 if (error) {
1707 printf("%s: can't load rx DMA map %d, error = %d\n",
1708 sc->sc_dev.dv_xname, idx, error);
1709 panic("gem_add_rxbuf"); /* XXX */
1710 }
1711
1712 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0,
1713 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1714
1715 GEM_INIT_RXDESC(sc, idx);
1716
1717 return (0);
1718 }
1719
1720
1721 int
1722 gem_eint(sc, status)
1723 struct gem_softc *sc;
1724 u_int status;
1725 {
1726 char bits[128];
1727
1728 if ((status & GEM_INTR_MIF) != 0) {
1729 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
1730 return (1);
1731 }
1732
1733 printf("%s: status=%s\n", sc->sc_dev.dv_xname,
1734 bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits)));
1735 return (1);
1736 }
1737
1738
1739 int
1740 gem_intr(v)
1741 void *v;
1742 {
1743 struct gem_softc *sc = (struct gem_softc *)v;
1744 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1745 bus_space_tag_t t = sc->sc_bustag;
1746 bus_space_handle_t seb = sc->sc_h;
1747 u_int32_t status;
1748 int r = 0;
1749 #ifdef GEM_DEBUG
1750 char bits[128];
1751 #endif
1752
1753 sc->sc_ev_intr.ev_count++;
1754
1755 status = bus_space_read_4(t, seb, GEM_STATUS);
1756 DPRINTF(sc, ("%s: gem_intr: cplt 0x%x status %s\n",
1757 sc->sc_dev.dv_xname, (status >> 19),
1758 bitmask_snprintf(status, GEM_INTR_BITS, bits, sizeof(bits))));
1759
1760 if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1761 r |= gem_eint(sc, status);
1762
1763 if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) {
1764 GEM_COUNTER_INCR(sc, sc_ev_txint);
1765 r |= gem_tint(sc);
1766 }
1767
1768 if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) {
1769 GEM_COUNTER_INCR(sc, sc_ev_rxint);
1770 r |= gem_rint(sc);
1771 }
1772
1773 /* We should eventually do more than just print out error stats. */
1774 if (status & GEM_INTR_TX_MAC) {
1775 int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1776 if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1777 printf("%s: MAC tx fault, status %x\n",
1778 sc->sc_dev.dv_xname, txstat);
1779 if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1780 gem_init(ifp);
1781 }
1782 if (status & GEM_INTR_RX_MAC) {
1783 int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1784 if (rxstat & ~GEM_MAC_RX_DONE)
1785 printf("%s: MAC rx fault, status %x\n",
1786 sc->sc_dev.dv_xname, rxstat);
1787 /*
1788 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often
1789 * due to a silicon bug so handle them silently.
1790 */
1791 if (rxstat & GEM_MAC_RX_OVERFLOW)
1792 gem_init(ifp);
1793 else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1794 printf("%s: MAC rx fault, status %x\n",
1795 sc->sc_dev.dv_xname, rxstat);
1796 }
1797 return (r);
1798 }
1799
1800
1801 void
1802 gem_watchdog(ifp)
1803 struct ifnet *ifp;
1804 {
1805 struct gem_softc *sc = ifp->if_softc;
1806
1807 DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1808 "GEM_MAC_RX_CONFIG %x\n",
1809 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG),
1810 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS),
1811 bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)));
1812
1813 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1814 ++ifp->if_oerrors;
1815
1816 /* Try to get more packets going. */
1817 gem_start(ifp);
1818 }
1819
1820 /*
1821 * Initialize the MII Management Interface
1822 */
1823 void
1824 gem_mifinit(sc)
1825 struct gem_softc *sc;
1826 {
1827 bus_space_tag_t t = sc->sc_bustag;
1828 bus_space_handle_t mif = sc->sc_h;
1829
1830 /* Configure the MIF in frame mode */
1831 sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1832 sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1833 bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1834 }
1835
1836 /*
1837 * MII interface
1838 *
1839 * The GEM MII interface supports at least three different operating modes:
1840 *
1841 * Bitbang mode is implemented using data, clock and output enable registers.
1842 *
1843 * Frame mode is implemented by loading a complete frame into the frame
1844 * register and polling the valid bit for completion.
1845 *
1846 * Polling mode uses the frame register but completion is indicated by
1847 * an interrupt.
1848 *
1849 */
1850 static int
1851 gem_mii_readreg(self, phy, reg)
1852 struct device *self;
1853 int phy, reg;
1854 {
1855 struct gem_softc *sc = (void *)self;
1856 bus_space_tag_t t = sc->sc_bustag;
1857 bus_space_handle_t mif = sc->sc_h;
1858 int n;
1859 u_int32_t v;
1860
1861 #ifdef GEM_DEBUG1
1862 if (sc->sc_debug)
1863 printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1864 #endif
1865
1866 #if 0
1867 /* Select the desired PHY in the MIF configuration register */
1868 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1869 /* Clear PHY select bit */
1870 v &= ~GEM_MIF_CONFIG_PHY_SEL;
1871 if (phy == GEM_PHYAD_EXTERNAL)
1872 /* Set PHY select bit to get at external device */
1873 v |= GEM_MIF_CONFIG_PHY_SEL;
1874 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
1875 #endif
1876
1877 /* Construct the frame command */
1878 v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) |
1879 GEM_MIF_FRAME_READ;
1880
1881 bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1882 for (n = 0; n < 100; n++) {
1883 DELAY(1);
1884 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1885 if (v & GEM_MIF_FRAME_TA0)
1886 return (v & GEM_MIF_FRAME_DATA);
1887 }
1888
1889 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1890 return (0);
1891 }
1892
1893 static void
1894 gem_mii_writereg(self, phy, reg, val)
1895 struct device *self;
1896 int phy, reg, val;
1897 {
1898 struct gem_softc *sc = (void *)self;
1899 bus_space_tag_t t = sc->sc_bustag;
1900 bus_space_handle_t mif = sc->sc_h;
1901 int n;
1902 u_int32_t v;
1903
1904 #ifdef GEM_DEBUG1
1905 if (sc->sc_debug)
1906 printf("gem_mii_writereg: phy %d reg %d val %x\n",
1907 phy, reg, val);
1908 #endif
1909
1910 #if 0
1911 /* Select the desired PHY in the MIF configuration register */
1912 v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1913 /* Clear PHY select bit */
1914 v &= ~GEM_MIF_CONFIG_PHY_SEL;
1915 if (phy == GEM_PHYAD_EXTERNAL)
1916 /* Set PHY select bit to get at external device */
1917 v |= GEM_MIF_CONFIG_PHY_SEL;
1918 bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
1919 #endif
1920 /* Construct the frame command */
1921 v = GEM_MIF_FRAME_WRITE |
1922 (phy << GEM_MIF_PHY_SHIFT) |
1923 (reg << GEM_MIF_REG_SHIFT) |
1924 (val & GEM_MIF_FRAME_DATA);
1925
1926 bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1927 for (n = 0; n < 100; n++) {
1928 DELAY(1);
1929 v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1930 if (v & GEM_MIF_FRAME_TA0)
1931 return;
1932 }
1933
1934 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1935 }
1936
1937 static void
1938 gem_mii_statchg(dev)
1939 struct device *dev;
1940 {
1941 struct gem_softc *sc = (void *)dev;
1942 #ifdef GEM_DEBUG
1943 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1944 #endif
1945 bus_space_tag_t t = sc->sc_bustag;
1946 bus_space_handle_t mac = sc->sc_h;
1947 u_int32_t v;
1948
1949 #ifdef GEM_DEBUG
1950 if (sc->sc_debug)
1951 printf("gem_mii_statchg: status change: phy = %d\n",
1952 sc->sc_phys[instance]);
1953 #endif
1954
1955
1956 /* Set tx full duplex options */
1957 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1958 delay(10000); /* reg must be cleared and delay before changing. */
1959 v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1960 GEM_MAC_TX_ENABLE;
1961 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) {
1962 v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1963 }
1964 bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1965
1966 /* XIF Configuration */
1967 /* We should really calculate all this rather than rely on defaults */
1968 v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG);
1969 v = GEM_MAC_XIF_LINK_LED;
1970 v |= GEM_MAC_XIF_TX_MII_ENA;
1971
1972 /* If an external transceiver is connected, enable its MII drivers */
1973 sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
1974 if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
1975 /* External MII needs echo disable if half duplex. */
1976 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1977 /* turn on full duplex LED */
1978 v |= GEM_MAC_XIF_FDPLX_LED;
1979 else
1980 /* half duplex -- disable echo */
1981 v |= GEM_MAC_XIF_ECHO_DISABL;
1982
1983 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000))
1984 v |= GEM_MAC_XIF_GMII_MODE;
1985 else
1986 v &= ~GEM_MAC_XIF_GMII_MODE;
1987 } else
1988 /* Internal MII needs buf enable */
1989 v |= GEM_MAC_XIF_MII_BUF_ENA;
1990 bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1991 }
1992
1993 int
1994 gem_mediachange(ifp)
1995 struct ifnet *ifp;
1996 {
1997 struct gem_softc *sc = ifp->if_softc;
1998
1999 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
2000 return (EINVAL);
2001
2002 return (mii_mediachg(&sc->sc_mii));
2003 }
2004
2005 void
2006 gem_mediastatus(ifp, ifmr)
2007 struct ifnet *ifp;
2008 struct ifmediareq *ifmr;
2009 {
2010 struct gem_softc *sc = ifp->if_softc;
2011
2012 if ((ifp->if_flags & IFF_UP) == 0)
2013 return;
2014
2015 mii_pollstat(&sc->sc_mii);
2016 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2017 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2018 }
2019
2020 int gem_ioctldebug = 0;
2021 /*
2022 * Process an ioctl request.
2023 */
2024 int
2025 gem_ioctl(ifp, cmd, data)
2026 struct ifnet *ifp;
2027 u_long cmd;
2028 caddr_t data;
2029 {
2030 struct gem_softc *sc = ifp->if_softc;
2031 struct ifreq *ifr = (struct ifreq *)data;
2032 int s, error = 0;
2033
2034 s = splnet();
2035
2036 switch (cmd) {
2037 case SIOCADDMULTI:
2038 case SIOCDELMULTI:
2039 gem_setladrf(sc);
2040 break;
2041 case SIOCGIFMEDIA:
2042 case SIOCSIFMEDIA:
2043 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2044 break;
2045 case SIOCSIFFLAGS:
2046 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
2047 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
2048 == (IFF_UP|IFF_RUNNING))
2049 && ((ifp->if_flags & (~RESETIGN))
2050 == (sc->sc_if_flags & (~RESETIGN)))) {
2051 gem_setladrf(sc);
2052 break;
2053 }
2054 #undef RESETIGN
2055 /*FALLTHROUGH*/
2056 default:
2057 error = ether_ioctl(ifp, cmd, data);
2058 if (error == ENETRESET) {
2059 /*
2060 * Multicast list has changed; set the hardware filter
2061 * accordingly.
2062 */
2063 if (ifp->if_flags & IFF_RUNNING) {
2064 if (gem_ioctldebug) printf("reset1\n");
2065 gem_init(ifp);
2066 delay(50000);
2067 }
2068 error = 0;
2069 }
2070 break;
2071 }
2072
2073 /* Try to get things going again */
2074 if (ifp->if_flags & IFF_UP) {
2075 if (gem_ioctldebug) printf("start\n");
2076 gem_start(ifp);
2077 }
2078 splx(s);
2079 return (error);
2080 }
2081
2082
2083 void
2084 gem_shutdown(arg)
2085 void *arg;
2086 {
2087 struct gem_softc *sc = (struct gem_softc *)arg;
2088 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2089
2090 gem_stop(ifp, 1);
2091 }
2092
2093 /*
2094 * Set up the logical address filter.
2095 */
2096 void
2097 gem_setladrf(sc)
2098 struct gem_softc *sc;
2099 {
2100 struct ethercom *ec = &sc->sc_ethercom;
2101 struct ifnet *ifp = &ec->ec_if;
2102 struct ether_multi *enm;
2103 struct ether_multistep step;
2104 bus_space_tag_t t = sc->sc_bustag;
2105 bus_space_handle_t h = sc->sc_h;
2106 u_int32_t crc;
2107 u_int32_t hash[16];
2108 u_int32_t v;
2109 int i;
2110
2111 /* Get current RX configuration */
2112 v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
2113
2114 /*
2115 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2116 * and hash filter. Depending on the case, the right bit will be
2117 * enabled.
2118 */
2119 v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2120 GEM_MAC_RX_PROMISC_GRP);
2121
2122 if ((ifp->if_flags & IFF_PROMISC) != 0) {
2123 /* Turn on promiscuous mode */
2124 v |= GEM_MAC_RX_PROMISCUOUS;
2125 ifp->if_flags |= IFF_ALLMULTI;
2126 goto chipit;
2127 }
2128
2129 /*
2130 * Set up multicast address filter by passing all multicast addresses
2131 * through a crc generator, and then using the high order 8 bits as an
2132 * index into the 256 bit logical address filter. The high order 4
2133 * bits selects the word, while the other 4 bits select the bit within
2134 * the word (where bit 0 is the MSB).
2135 */
2136
2137 /* Clear hash table */
2138 memset(hash, 0, sizeof(hash));
2139
2140 ETHER_FIRST_MULTI(step, ec, enm);
2141 while (enm != NULL) {
2142 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2143 /*
2144 * We must listen to a range of multicast addresses.
2145 * For now, just accept all multicasts, rather than
2146 * trying to set only those filter bits needed to match
2147 * the range. (At this time, the only use of address
2148 * ranges is for IP multicast routing, for which the
2149 * range is big enough to require all bits set.)
2150 * XXX use the addr filter for this
2151 */
2152 ifp->if_flags |= IFF_ALLMULTI;
2153 v |= GEM_MAC_RX_PROMISC_GRP;
2154 goto chipit;
2155 }
2156
2157 /* Get the LE CRC32 of the address */
2158 crc = ether_crc32_le(enm->enm_addrlo, sizeof(enm->enm_addrlo));
2159
2160 /* Just want the 8 most significant bits. */
2161 crc >>= 24;
2162
2163 /* Set the corresponding bit in the filter. */
2164 hash[crc >> 4] |= 1 << (15 - (crc & 15));
2165
2166 ETHER_NEXT_MULTI(step, enm);
2167 }
2168
2169 v |= GEM_MAC_RX_HASH_FILTER;
2170 ifp->if_flags &= ~IFF_ALLMULTI;
2171
2172 /* Now load the hash table into the chip (if we are using it) */
2173 for (i = 0; i < 16; i++) {
2174 bus_space_write_4(t, h,
2175 GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2176 hash[i]);
2177 }
2178
2179 chipit:
2180 sc->sc_if_flags = ifp->if_flags;
2181 bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
2182 }
2183
2184 #if notyet
2185
2186 /*
2187 * gem_power:
2188 *
2189 * Power management (suspend/resume) hook.
2190 */
2191 void
2192 gem_power(why, arg)
2193 int why;
2194 void *arg;
2195 {
2196 struct gem_softc *sc = arg;
2197 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2198 int s;
2199
2200 s = splnet();
2201 switch (why) {
2202 case PWR_SUSPEND:
2203 case PWR_STANDBY:
2204 gem_stop(ifp, 1);
2205 if (sc->sc_power != NULL)
2206 (*sc->sc_power)(sc, why);
2207 break;
2208 case PWR_RESUME:
2209 if (ifp->if_flags & IFF_UP) {
2210 if (sc->sc_power != NULL)
2211 (*sc->sc_power)(sc, why);
2212 gem_init(ifp);
2213 }
2214 break;
2215 case PWR_SOFTSUSPEND:
2216 case PWR_SOFTSTANDBY:
2217 case PWR_SOFTRESUME:
2218 break;
2219 }
2220 splx(s);
2221 }
2222 #endif
2223