elinkxl.c revision 1.34 1 /* $NetBSD: elinkxl.c,v 1.34 2000/05/29 17:37:13 jhawk Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/syslog.h>
53 #include <sys/select.h>
54 #include <sys/device.h>
55 #if NRND > 0
56 #include <sys/rnd.h>
57 #endif
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #ifdef NS
73 #include <netns/ns.h>
74 #include <netns/ns_if.h>
75 #endif
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
81
82 #include <machine/cpu.h>
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <vm/vm.h>
88 #include <vm/pmap.h>
89
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/mii.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/ic/elink3reg.h>
95 /* #include <dev/ic/elink3var.h> */
96 #include <dev/ic/elinkxlreg.h>
97 #include <dev/ic/elinkxlvar.h>
98
99 #ifdef DEBUG
100 int exdebug = 0;
101 #endif
102
103 /* ifmedia callbacks */
104 int ex_media_chg __P((struct ifnet *ifp));
105 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
106
107 void ex_probe_media __P((struct ex_softc *));
108 void ex_set_filter __P((struct ex_softc *));
109 void ex_set_media __P((struct ex_softc *));
110 struct mbuf *ex_get __P((struct ex_softc *, int));
111 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
112 void ex_init __P((struct ex_softc *));
113 void ex_read __P((struct ex_softc *));
114 void ex_reset __P((struct ex_softc *));
115 void ex_set_mc __P((struct ex_softc *));
116 void ex_getstats __P((struct ex_softc *));
117 void ex_printstats __P((struct ex_softc *));
118 void ex_tick __P((void *));
119
120 static int ex_eeprom_busy __P((struct ex_softc *));
121 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
122 static void ex_init_txdescs __P((struct ex_softc *));
123
124 static void ex_shutdown __P((void *));
125 static void ex_start __P((struct ifnet *));
126 static void ex_txstat __P((struct ex_softc *));
127
128 int ex_mii_readreg __P((struct device *, int, int));
129 void ex_mii_writereg __P((struct device *, int, int, int));
130 void ex_mii_statchg __P((struct device *));
131
132 void ex_probemedia __P((struct ex_softc *));
133
134 /*
135 * Structure to map media-present bits in boards to ifmedia codes and
136 * printable media names. Used for table-driven ifmedia initialization.
137 */
138 struct ex_media {
139 int exm_mpbit; /* media present bit */
140 const char *exm_name; /* name of medium */
141 int exm_ifmedia; /* ifmedia word for medium */
142 int exm_epmedia; /* ELINKMEDIA_* constant */
143 };
144
145 /*
146 * Media table for 3c90x chips. Note that chips with MII have no
147 * `native' media.
148 */
149 struct ex_media ex_native_media[] = {
150 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
151 ELINKMEDIA_10BASE_T },
152 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
153 ELINKMEDIA_10BASE_T },
154 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
155 ELINKMEDIA_AUI },
156 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
157 ELINKMEDIA_10BASE_2 },
158 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
159 ELINKMEDIA_100BASE_TX },
160 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
161 ELINKMEDIA_100BASE_TX },
162 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
163 ELINKMEDIA_100BASE_FX },
164 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
165 ELINKMEDIA_MII },
166 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
167 ELINKMEDIA_100BASE_T4 },
168 { 0, NULL, 0,
169 0 },
170 };
171
172 /*
173 * MII bit-bang glue.
174 */
175 u_int32_t ex_mii_bitbang_read __P((struct device *));
176 void ex_mii_bitbang_write __P((struct device *, u_int32_t));
177
178 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
179 ex_mii_bitbang_read,
180 ex_mii_bitbang_write,
181 {
182 ELINK_PHY_DATA, /* MII_BIT_MDO */
183 ELINK_PHY_DATA, /* MII_BIT_MDI */
184 ELINK_PHY_CLK, /* MII_BIT_MDC */
185 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
186 0, /* MII_BIT_DIR_PHY_HOST */
187 }
188 };
189
190 /*
191 * Back-end attach and configure.
192 */
193 void
194 ex_config(sc)
195 struct ex_softc *sc;
196 {
197 struct ifnet *ifp;
198 u_int16_t val;
199 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
200 bus_space_tag_t iot = sc->sc_iot;
201 bus_space_handle_t ioh = sc->sc_ioh;
202 int i, error, attach_stage;
203
204 callout_init(&sc->ex_mii_callout);
205
206 ex_reset(sc);
207
208 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
209 macaddr[0] = val >> 8;
210 macaddr[1] = val & 0xff;
211 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
212 macaddr[2] = val >> 8;
213 macaddr[3] = val & 0xff;
214 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
215 macaddr[4] = val >> 8;
216 macaddr[5] = val & 0xff;
217
218 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
219 ether_sprintf(macaddr));
220
221 if (sc->intr_ack) { /* 3C575BTX specific */
222 GO_WINDOW(2);
223 bus_space_write_2(sc->sc_iot, ioh, 12, 0x10|bus_space_read_2(sc->sc_iot, ioh, 12));
224 }
225
226 attach_stage = 0;
227
228 /*
229 * Allocate the upload descriptors, and create and load the DMA
230 * map for them.
231 */
232 if ((error = bus_dmamem_alloc(sc->sc_dmat,
233 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &sc->sc_useg, 1,
234 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
235 printf("%s: can't allocate upload descriptors, error = %d\n",
236 sc->sc_dev.dv_xname, error);
237 goto fail;
238 }
239
240 attach_stage = 1;
241
242 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
243 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
244 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
245 printf("%s: can't map upload descriptors, error = %d\n",
246 sc->sc_dev.dv_xname, error);
247 goto fail;
248 }
249
250 attach_stage = 2;
251
252 if ((error = bus_dmamap_create(sc->sc_dmat,
253 EX_NUPD * sizeof (struct ex_upd), 1,
254 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
255 &sc->sc_upd_dmamap)) != 0) {
256 printf("%s: can't create upload desc. DMA map, error = %d\n",
257 sc->sc_dev.dv_xname, error);
258 goto fail;
259 }
260
261 attach_stage = 3;
262
263 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
264 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
265 BUS_DMA_NOWAIT)) != 0) {
266 printf("%s: can't load upload desc. DMA map, error = %d\n",
267 sc->sc_dev.dv_xname, error);
268 goto fail;
269 }
270
271 attach_stage = 4;
272
273 /*
274 * Allocate the download descriptors, and create and load the DMA
275 * map for them.
276 */
277 if ((error = bus_dmamem_alloc(sc->sc_dmat,
278 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &sc->sc_dseg, 1,
279 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
280 printf("%s: can't allocate download descriptors, error = %d\n",
281 sc->sc_dev.dv_xname, error);
282 goto fail;
283 }
284
285 attach_stage = 5;
286
287 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
288 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
289 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
290 printf("%s: can't map download descriptors, error = %d\n",
291 sc->sc_dev.dv_xname, error);
292 goto fail;
293 }
294 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
295
296 attach_stage = 6;
297
298 if ((error = bus_dmamap_create(sc->sc_dmat,
299 EX_NDPD * sizeof (struct ex_dpd), 1,
300 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
301 &sc->sc_dpd_dmamap)) != 0) {
302 printf("%s: can't create download desc. DMA map, error = %d\n",
303 sc->sc_dev.dv_xname, error);
304 goto fail;
305 }
306
307 attach_stage = 7;
308
309 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
310 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
311 BUS_DMA_NOWAIT)) != 0) {
312 printf("%s: can't load download desc. DMA map, error = %d\n",
313 sc->sc_dev.dv_xname, error);
314 goto fail;
315 }
316
317 attach_stage = 8;
318
319
320 /*
321 * Create the transmit buffer DMA maps.
322 */
323 for (i = 0; i < EX_NDPD; i++) {
324 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
325 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
326 &sc->sc_tx_dmamaps[i])) != 0) {
327 printf("%s: can't create tx DMA map %d, error = %d\n",
328 sc->sc_dev.dv_xname, i, error);
329 goto fail;
330 }
331 }
332
333 attach_stage = 9;
334
335 /*
336 * Create the receive buffer DMA maps.
337 */
338 for (i = 0; i < EX_NUPD; i++) {
339 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
340 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
341 &sc->sc_rx_dmamaps[i])) != 0) {
342 printf("%s: can't create rx DMA map %d, error = %d\n",
343 sc->sc_dev.dv_xname, i, error);
344 goto fail;
345 }
346 }
347
348 attach_stage = 10;
349
350 /*
351 * Create ring of upload descriptors, only once. The DMA engine
352 * will loop over this when receiving packets, stalling if it
353 * hits an UPD with a finished receive.
354 */
355 for (i = 0; i < EX_NUPD; i++) {
356 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
357 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
358 sc->sc_upd[i].upd_frags[0].fr_len =
359 htole32((MCLBYTES - 2) | EX_FR_LAST);
360 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
361 printf("%s: can't allocate or map rx buffers\n",
362 sc->sc_dev.dv_xname);
363 goto fail;
364 }
365 }
366
367 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
368 EX_NUPD * sizeof (struct ex_upd),
369 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
370
371 ex_init_txdescs(sc);
372
373 attach_stage = 11;
374
375
376 GO_WINDOW(3);
377 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
378 if (val & ELINK_MEDIACAP_MII)
379 sc->ex_conf |= EX_CONF_MII;
380
381 ifp = &sc->sc_ethercom.ec_if;
382
383 /*
384 * Initialize our media structures and MII info. We'll
385 * probe the MII if we discover that we have one.
386 */
387 sc->ex_mii.mii_ifp = ifp;
388 sc->ex_mii.mii_readreg = ex_mii_readreg;
389 sc->ex_mii.mii_writereg = ex_mii_writereg;
390 sc->ex_mii.mii_statchg = ex_mii_statchg;
391 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
392 ex_media_stat);
393
394 if (sc->ex_conf & EX_CONF_MII) {
395 /*
396 * Find PHY, extract media information from it.
397 * First, select the right transceiver.
398 */
399 u_int32_t icfg;
400
401 GO_WINDOW(3);
402 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
403 icfg &= ~(CONFIG_XCVR_SEL << 16);
404 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
405 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
406 if (val & ELINK_MEDIACAP_100BASETX)
407 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
408 if (val & ELINK_MEDIACAP_100BASEFX)
409 icfg |= ELINKMEDIA_100BASE_FX
410 << (CONFIG_XCVR_SEL_SHIFT + 16);
411 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
412
413 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
414 MII_PHY_ANY, MII_OFFSET_ANY, 0);
415 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
416 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
417 0, NULL);
418 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
419 } else {
420 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
421 }
422 } else
423 ex_probemedia(sc);
424
425 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
426 ifp->if_softc = sc;
427 ifp->if_start = ex_start;
428 ifp->if_ioctl = ex_ioctl;
429 ifp->if_watchdog = ex_watchdog;
430 ifp->if_flags =
431 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
432
433 if_attach(ifp);
434 ether_ifattach(ifp, macaddr);
435
436 GO_WINDOW(1);
437
438 sc->tx_start_thresh = 20;
439 sc->tx_succ_ok = 0;
440
441 /* TODO: set queues to 0 */
442
443 #if NBPFILTER > 0
444 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
445 sizeof(struct ether_header));
446 #endif
447
448 #if NRND > 0
449 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
450 RND_TYPE_NET, 0);
451 #endif
452
453 /* Establish callback to reset card when we reboot. */
454 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
455
456 /* The attach is successful. */
457 sc->ex_flags |= EX_FLAGS_ATTACHED;
458 return;
459
460 fail:
461 /*
462 * Free any resources we've allocated during the failed attach
463 * attempt. Do this in reverse order and fall though.
464 */
465 switch (attach_stage) {
466 case 11:
467 {
468 struct ex_rxdesc *rxd;
469
470 for (i = 0; i < EX_NUPD; i++) {
471 rxd = &sc->sc_rxdescs[i];
472 if (rxd->rx_mbhead != NULL) {
473 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
474 m_freem(rxd->rx_mbhead);
475 }
476 }
477 }
478 /* FALLTHROUGH */
479
480 case 10:
481 for (i = 0; i < EX_NUPD; i++)
482 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
483 /* FALLTHROUGH */
484
485 case 9:
486 for (i = 0; i < EX_NDPD; i++)
487 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
488 /* FALLTHROUGH */
489 case 8:
490 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
491 /* FALLTHROUGH */
492
493 case 7:
494 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
495 /* FALLTHROUGH */
496
497 case 6:
498 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
499 EX_NDPD * sizeof (struct ex_dpd));
500 /* FALLTHROUGH */
501
502 case 5:
503 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
504 break;
505
506 case 4:
507 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
508 /* FALLTHROUGH */
509
510 case 3:
511 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
512 /* FALLTHROUGH */
513
514 case 2:
515 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
516 EX_NUPD * sizeof (struct ex_upd));
517 /* FALLTHROUGH */
518
519 case 1:
520 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
521 break;
522 }
523
524 }
525
526 /*
527 * Find the media present on non-MII chips.
528 */
529 void
530 ex_probemedia(sc)
531 struct ex_softc *sc;
532 {
533 bus_space_tag_t iot = sc->sc_iot;
534 bus_space_handle_t ioh = sc->sc_ioh;
535 struct ifmedia *ifm = &sc->ex_mii.mii_media;
536 struct ex_media *exm;
537 u_int16_t config1, reset_options, default_media;
538 int defmedia = 0;
539 const char *sep = "", *defmedianame = NULL;
540
541 GO_WINDOW(3);
542 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
543 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
544 GO_WINDOW(0);
545
546 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
547
548 printf("%s: ", sc->sc_dev.dv_xname);
549
550 /* Sanity check that there are any media! */
551 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
552 printf("no media present!\n");
553 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
554 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
555 return;
556 }
557
558 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
559
560 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
561 if (reset_options & exm->exm_mpbit) {
562 /*
563 * Default media is a little complicated. We
564 * support full-duplex which uses the same
565 * reset options bit.
566 *
567 * XXX Check EEPROM for default to FDX?
568 */
569 if (exm->exm_epmedia == default_media) {
570 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
571 defmedia = exm->exm_ifmedia;
572 defmedianame = exm->exm_name;
573 }
574 } else if (defmedia == 0) {
575 defmedia = exm->exm_ifmedia;
576 defmedianame = exm->exm_name;
577 }
578 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
579 NULL);
580 PRINT(exm->exm_name);
581 }
582 }
583
584 #undef PRINT
585
586 #ifdef DIAGNOSTIC
587 if (defmedia == 0)
588 panic("ex_probemedia: impossible");
589 #endif
590
591 printf(", default %s\n", defmedianame);
592 ifmedia_set(ifm, defmedia);
593 }
594
595 /*
596 * Bring device up.
597 */
598 void
599 ex_init(sc)
600 struct ex_softc *sc;
601 {
602 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
603 bus_space_tag_t iot = sc->sc_iot;
604 bus_space_handle_t ioh = sc->sc_ioh;
605 int s, i;
606
607 s = splnet();
608
609 ex_waitcmd(sc);
610 ex_stop(sc);
611
612 /*
613 * Set the station address and clear the station mask. The latter
614 * is needed for 90x cards, 0 is the default for 90xB cards.
615 */
616 GO_WINDOW(2);
617 for (i = 0; i < ETHER_ADDR_LEN; i++) {
618 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
619 LLADDR(ifp->if_sadl)[i]);
620 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
621 }
622
623 GO_WINDOW(3);
624
625 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
626 ex_waitcmd(sc);
627 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
628 ex_waitcmd(sc);
629
630 /*
631 * Disable reclaim threshold for 90xB, set free threshold to
632 * 6 * 256 = 1536 for 90x.
633 */
634 if (sc->ex_conf & EX_CONF_90XB)
635 bus_space_write_2(iot, ioh, ELINK_COMMAND,
636 ELINK_TXRECLTHRESH | 255);
637 else
638 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
639
640 bus_space_write_2(iot, ioh, ELINK_COMMAND,
641 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
642
643 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
644 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
645
646 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
647 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
648
649 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
650 if (sc->intr_ack)
651 (* sc->intr_ack)(sc);
652 ex_set_media(sc);
653 ex_set_mc(sc);
654
655
656 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
657 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
658 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
659 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
660 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
661
662 ifp->if_flags |= IFF_RUNNING;
663 ifp->if_flags &= ~IFF_OACTIVE;
664 ex_start(ifp);
665
666 GO_WINDOW(1);
667
668 splx(s);
669
670 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
671 }
672
673 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & 0xff)
674
675 /*
676 * Set multicast receive filter. Also take care of promiscuous mode
677 * here (XXX).
678 */
679 void
680 ex_set_mc(sc)
681 struct ex_softc *sc;
682 {
683 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
684 struct ethercom *ec = &sc->sc_ethercom;
685 struct ether_multi *enm;
686 struct ether_multistep estep;
687 int i;
688 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
689
690 if (ifp->if_flags & IFF_PROMISC)
691 mask |= FIL_PROMISC;
692
693 if (!(ifp->if_flags & IFF_MULTICAST))
694 goto out;
695
696 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
697 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
698 } else {
699 ETHER_FIRST_MULTI(estep, ec, enm);
700 while (enm != NULL) {
701 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
702 ETHER_ADDR_LEN) != 0)
703 goto out;
704 i = ex_mchash(enm->enm_addrlo);
705 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
706 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
707 ETHER_NEXT_MULTI(estep, enm);
708 }
709 mask |= FIL_MULTIHASH;
710 }
711 out:
712 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
713 SET_RX_FILTER | mask);
714 }
715
716
717 static void
718 ex_txstat(sc)
719 struct ex_softc *sc;
720 {
721 bus_space_tag_t iot = sc->sc_iot;
722 bus_space_handle_t ioh = sc->sc_ioh;
723 int i;
724
725 /*
726 * We need to read+write TX_STATUS until we get a 0 status
727 * in order to turn off the interrupt flag.
728 */
729 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
730 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
731
732 if (i & TXS_JABBER) {
733 ++sc->sc_ethercom.ec_if.if_oerrors;
734 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
735 printf("%s: jabber (%x)\n",
736 sc->sc_dev.dv_xname, i);
737 ex_init(sc);
738 /* TODO: be more subtle here */
739 } else if (i & TXS_UNDERRUN) {
740 ++sc->sc_ethercom.ec_if.if_oerrors;
741 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
742 printf("%s: fifo underrun (%x) @%d\n",
743 sc->sc_dev.dv_xname, i,
744 sc->tx_start_thresh);
745 if (sc->tx_succ_ok < 100)
746 sc->tx_start_thresh = min(ETHER_MAX_LEN,
747 sc->tx_start_thresh + 20);
748 sc->tx_succ_ok = 0;
749 ex_init(sc);
750 /* TODO: be more subtle here */
751 } else if (i & TXS_MAX_COLLISION) {
752 ++sc->sc_ethercom.ec_if.if_collisions;
753 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
754 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
755 } else
756 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
757 }
758 }
759
760 int
761 ex_media_chg(ifp)
762 struct ifnet *ifp;
763 {
764 struct ex_softc *sc = ifp->if_softc;
765
766 if (ifp->if_flags & IFF_UP)
767 ex_init(sc);
768 return 0;
769 }
770
771 void
772 ex_set_media(sc)
773 struct ex_softc *sc;
774 {
775 bus_space_tag_t iot = sc->sc_iot;
776 bus_space_handle_t ioh = sc->sc_ioh;
777 int config0, config1;
778
779 if (((sc->ex_conf & EX_CONF_MII) &&
780 (sc->ex_mii.mii_media_active & IFM_FDX))
781 || (!(sc->ex_conf & EX_CONF_MII) &&
782 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
783 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
784 MAC_CONTROL_FDX);
785 } else {
786 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
787 }
788
789 /*
790 * If the device has MII, select it, and then tell the
791 * PHY which media to use.
792 */
793 if (sc->ex_conf & EX_CONF_MII) {
794 GO_WINDOW(3);
795
796 config0 = (u_int)bus_space_read_2(iot, ioh,
797 ELINK_W3_INTERNAL_CONFIG);
798 config1 = (u_int)bus_space_read_2(iot, ioh,
799 ELINK_W3_INTERNAL_CONFIG + 2);
800
801 config1 = config1 & ~CONFIG_MEDIAMASK;
802 config1 |= (ELINKMEDIA_MII << CONFIG_MEDIAMASK_SHIFT);
803
804 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
805 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
806 mii_mediachg(&sc->ex_mii);
807 return;
808 }
809
810 GO_WINDOW(4);
811 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
812 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
813 delay(800);
814
815 /*
816 * Now turn on the selected media/transceiver.
817 */
818 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
819 case IFM_10_T:
820 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
821 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
822 break;
823
824 case IFM_10_2:
825 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
826 DELAY(800);
827 break;
828
829 case IFM_100_TX:
830 case IFM_100_FX:
831 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
832 LINKBEAT_ENABLE);
833 DELAY(800);
834 break;
835
836 case IFM_10_5:
837 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
838 SQE_ENABLE);
839 DELAY(800);
840 break;
841
842 case IFM_MANUAL:
843 break;
844
845 case IFM_NONE:
846 return;
847
848 default:
849 panic("ex_set_media: impossible");
850 }
851
852 GO_WINDOW(3);
853 config0 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
854 config1 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
855
856 config1 = config1 & ~CONFIG_MEDIAMASK;
857 config1 |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
858 CONFIG_MEDIAMASK_SHIFT);
859
860 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
861 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
862 }
863
864 /*
865 * Get currently-selected media from card.
866 * (if_media callback, may be called before interface is brought up).
867 */
868 void
869 ex_media_stat(ifp, req)
870 struct ifnet *ifp;
871 struct ifmediareq *req;
872 {
873 struct ex_softc *sc = ifp->if_softc;
874
875 if (sc->ex_conf & EX_CONF_MII) {
876 mii_pollstat(&sc->ex_mii);
877 req->ifm_status = sc->ex_mii.mii_media_status;
878 req->ifm_active = sc->ex_mii.mii_media_active;
879 } else {
880 GO_WINDOW(4);
881 req->ifm_status = IFM_AVALID;
882 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
883 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
884 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
885 req->ifm_status |= IFM_ACTIVE;
886 GO_WINDOW(1);
887 }
888 }
889
890
891
892 /*
893 * Start outputting on the interface.
894 */
895 static void
896 ex_start(ifp)
897 struct ifnet *ifp;
898 {
899 struct ex_softc *sc = ifp->if_softc;
900 bus_space_tag_t iot = sc->sc_iot;
901 bus_space_handle_t ioh = sc->sc_ioh;
902 volatile struct ex_fraghdr *fr = NULL;
903 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
904 struct ex_txdesc *txp;
905 bus_dmamap_t dmamap;
906 int offset, totlen;
907
908 if (sc->tx_head || sc->tx_free == NULL)
909 return;
910
911 txp = NULL;
912
913 /*
914 * We're finished if there is nothing more to add to the list or if
915 * we're all filled up with buffers to transmit.
916 */
917 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
918 struct mbuf *mb_head;
919 int segment, error;
920
921 /*
922 * Grab a packet to transmit.
923 */
924 IF_DEQUEUE(&ifp->if_snd, mb_head);
925
926 /*
927 * Get pointer to next available tx desc.
928 */
929 txp = sc->tx_free;
930 sc->tx_free = txp->tx_next;
931 txp->tx_next = NULL;
932 dmamap = txp->tx_dmamap;
933
934 /*
935 * Go through each of the mbufs in the chain and initialize
936 * the transmit buffer descriptors with the physical address
937 * and size of the mbuf.
938 */
939 reload:
940 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
941 mb_head, BUS_DMA_NOWAIT);
942 switch (error) {
943 case 0:
944 /* Success. */
945 break;
946
947 case EFBIG:
948 {
949 struct mbuf *mn;
950
951 /*
952 * We ran out of segments. We have to recopy this
953 * mbuf chain first. Bail out if we can't get the
954 * new buffers.
955 */
956 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
957
958 MGETHDR(mn, M_DONTWAIT, MT_DATA);
959 if (mn == NULL) {
960 m_freem(mb_head);
961 printf("aborting\n");
962 goto out;
963 }
964 if (mb_head->m_pkthdr.len > MHLEN) {
965 MCLGET(mn, M_DONTWAIT);
966 if ((mn->m_flags & M_EXT) == 0) {
967 m_freem(mn);
968 m_freem(mb_head);
969 printf("aborting\n");
970 goto out;
971 }
972 }
973 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
974 mtod(mn, caddr_t));
975 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
976 m_freem(mb_head);
977 mb_head = mn;
978 printf("retrying\n");
979 goto reload;
980 }
981
982 default:
983 /*
984 * Some other problem; report it.
985 */
986 printf("%s: can't load mbuf chain, error = %d\n",
987 sc->sc_dev.dv_xname, error);
988 m_freem(mb_head);
989 goto out;
990 }
991
992 fr = &txp->tx_dpd->dpd_frags[0];
993 totlen = 0;
994 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
995 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
996 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
997 totlen += dmamap->dm_segs[segment].ds_len;
998 }
999 fr--;
1000 fr->fr_len |= htole32(EX_FR_LAST);
1001 txp->tx_mbhead = mb_head;
1002
1003 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1004 BUS_DMASYNC_PREWRITE);
1005
1006 dpd = txp->tx_dpd;
1007 dpd->dpd_nextptr = 0;
1008 dpd->dpd_fsh = htole32(totlen);
1009
1010 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1011 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1012 sizeof (struct ex_dpd),
1013 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1014
1015 /*
1016 * No need to stall the download engine, we know it's
1017 * not busy right now.
1018 *
1019 * Fix up pointers in both the "soft" tx and the physical
1020 * tx list.
1021 */
1022 if (sc->tx_head != NULL) {
1023 prevdpd = sc->tx_tail->tx_dpd;
1024 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1025 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1026 offset, sizeof (struct ex_dpd),
1027 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1028 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1029 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1030 offset, sizeof (struct ex_dpd),
1031 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1032 sc->tx_tail->tx_next = txp;
1033 sc->tx_tail = txp;
1034 } else {
1035 sc->tx_tail = sc->tx_head = txp;
1036 }
1037
1038 #if NBPFILTER > 0
1039 /*
1040 * Pass packet to bpf if there is a listener.
1041 */
1042 if (ifp->if_bpf)
1043 bpf_mtap(ifp->if_bpf, mb_head);
1044 #endif
1045 }
1046 out:
1047 if (sc->tx_head) {
1048 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1049 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1050 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1051 sizeof (struct ex_dpd),
1052 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1053 ifp->if_flags |= IFF_OACTIVE;
1054 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1055 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1056 DPD_DMADDR(sc, sc->tx_head));
1057
1058 /* trigger watchdog */
1059 ifp->if_timer = 5;
1060 }
1061 }
1062
1063
1064 int
1065 ex_intr(arg)
1066 void *arg;
1067 {
1068 struct ex_softc *sc = arg;
1069 bus_space_tag_t iot = sc->sc_iot;
1070 bus_space_handle_t ioh = sc->sc_ioh;
1071 u_int16_t stat;
1072 int ret = 0;
1073 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1074
1075 if (sc->enabled == 0 ||
1076 (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1077 return (0);
1078
1079 for (;;) {
1080 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1081
1082 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1083
1084 if ((stat & S_MASK) == 0) {
1085 if ((stat & S_INTR_LATCH) == 0) {
1086 #if 0
1087 printf("%s: intr latch cleared\n",
1088 sc->sc_dev.dv_xname);
1089 #endif
1090 break;
1091 }
1092 }
1093
1094 ret = 1;
1095
1096 /*
1097 * Acknowledge interrupts.
1098 */
1099 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1100 (stat & S_MASK));
1101 if (sc->intr_ack)
1102 (*sc->intr_ack)(sc);
1103
1104 if (stat & S_HOST_ERROR) {
1105 printf("%s: adapter failure (%x)\n",
1106 sc->sc_dev.dv_xname, stat);
1107 ex_reset(sc);
1108 ex_init(sc);
1109 return 1;
1110 }
1111 if (stat & S_TX_COMPLETE) {
1112 ex_txstat(sc);
1113 }
1114 if (stat & S_UPD_STATS) {
1115 ex_getstats(sc);
1116 }
1117 if (stat & S_DN_COMPLETE) {
1118 struct ex_txdesc *txp, *ptxp = NULL;
1119 bus_dmamap_t txmap;
1120
1121 /* reset watchdog timer, was set in ex_start() */
1122 ifp->if_timer = 0;
1123
1124 for (txp = sc->tx_head; txp != NULL;
1125 txp = txp->tx_next) {
1126 bus_dmamap_sync(sc->sc_dmat,
1127 sc->sc_dpd_dmamap,
1128 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1129 sizeof (struct ex_dpd),
1130 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1131 if (txp->tx_mbhead != NULL) {
1132 txmap = txp->tx_dmamap;
1133 bus_dmamap_sync(sc->sc_dmat, txmap,
1134 0, txmap->dm_mapsize,
1135 BUS_DMASYNC_POSTWRITE);
1136 bus_dmamap_unload(sc->sc_dmat, txmap);
1137 m_freem(txp->tx_mbhead);
1138 txp->tx_mbhead = NULL;
1139 }
1140 ptxp = txp;
1141 }
1142
1143 /*
1144 * Move finished tx buffers back to the tx free list.
1145 */
1146 if (sc->tx_free) {
1147 sc->tx_ftail->tx_next = sc->tx_head;
1148 sc->tx_ftail = ptxp;
1149 } else
1150 sc->tx_ftail = sc->tx_free = sc->tx_head;
1151
1152 sc->tx_head = sc->tx_tail = NULL;
1153 ifp->if_flags &= ~IFF_OACTIVE;
1154 }
1155
1156 if (stat & S_UP_COMPLETE) {
1157 struct ex_rxdesc *rxd;
1158 struct mbuf *m;
1159 struct ex_upd *upd;
1160 bus_dmamap_t rxmap;
1161 u_int32_t pktstat;
1162
1163 rcvloop:
1164 rxd = sc->rx_head;
1165 rxmap = rxd->rx_dmamap;
1166 m = rxd->rx_mbhead;
1167 upd = rxd->rx_upd;
1168
1169 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1170 rxmap->dm_mapsize,
1171 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1172 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1173 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1174 sizeof (struct ex_upd),
1175 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1176 pktstat = le32toh(upd->upd_pktstatus);
1177
1178 if (pktstat & EX_UPD_COMPLETE) {
1179 /*
1180 * Remove first packet from the chain.
1181 */
1182 sc->rx_head = rxd->rx_next;
1183 rxd->rx_next = NULL;
1184
1185 /*
1186 * Add a new buffer to the receive chain.
1187 * If this fails, the old buffer is recycled
1188 * instead.
1189 */
1190 if (ex_add_rxbuf(sc, rxd) == 0) {
1191 struct ether_header *eh;
1192 u_int16_t total_len;
1193
1194
1195 if (pktstat & EX_UPD_ERR) {
1196 ifp->if_ierrors++;
1197 m_freem(m);
1198 goto rcvloop;
1199 }
1200
1201 total_len = pktstat & EX_UPD_PKTLENMASK;
1202 if (total_len <
1203 sizeof(struct ether_header)) {
1204 m_freem(m);
1205 goto rcvloop;
1206 }
1207 m->m_pkthdr.rcvif = ifp;
1208 m->m_pkthdr.len = m->m_len = total_len;
1209 eh = mtod(m, struct ether_header *);
1210 #if NBPFILTER > 0
1211 if (ifp->if_bpf) {
1212 bpf_tap(ifp->if_bpf,
1213 mtod(m, caddr_t),
1214 total_len);
1215 /*
1216 * Only pass this packet up
1217 * if it is for us.
1218 */
1219 if ((ifp->if_flags &
1220 IFF_PROMISC) &&
1221 (eh->ether_dhost[0] & 1)
1222 == 0 &&
1223 bcmp(eh->ether_dhost,
1224 LLADDR(ifp->if_sadl),
1225 sizeof(eh->ether_dhost))
1226 != 0) {
1227 m_freem(m);
1228 goto rcvloop;
1229 }
1230 }
1231 #endif /* NBPFILTER > 0 */
1232 (*ifp->if_input)(ifp, m);
1233 }
1234 goto rcvloop;
1235 }
1236 /*
1237 * Just in case we filled up all UPDs and the DMA engine
1238 * stalled. We could be more subtle about this.
1239 */
1240 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1241 printf("%s: uplistptr was 0\n",
1242 sc->sc_dev.dv_xname);
1243 ex_init(sc);
1244 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1245 & 0x2000) {
1246 printf("%s: receive stalled\n",
1247 sc->sc_dev.dv_xname);
1248 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1249 ELINK_UPUNSTALL);
1250 }
1251 }
1252 }
1253
1254 /* no more interrupts */
1255 if (ret && ifp->if_snd.ifq_head)
1256 ex_start(ifp);
1257 return ret;
1258 }
1259
1260 int
1261 ex_ioctl(ifp, cmd, data)
1262 struct ifnet *ifp;
1263 u_long cmd;
1264 caddr_t data;
1265 {
1266 struct ex_softc *sc = ifp->if_softc;
1267 struct ifaddr *ifa = (struct ifaddr *)data;
1268 struct ifreq *ifr = (struct ifreq *)data;
1269 int s, error = 0;
1270
1271 s = splnet();
1272
1273 switch (cmd) {
1274
1275 case SIOCSIFADDR:
1276 ifp->if_flags |= IFF_UP;
1277 switch (ifa->ifa_addr->sa_family) {
1278 #ifdef INET
1279 case AF_INET:
1280 ex_init(sc);
1281 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1282 break;
1283 #endif
1284 #ifdef NS
1285 case AF_NS:
1286 {
1287 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1288
1289 if (ns_nullhost(*ina))
1290 ina->x_host = *(union ns_host *)
1291 LLADDR(ifp->if_sadl);
1292 else
1293 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1294 ifp->if_addrlen);
1295 /* Set new address. */
1296 ex_init(sc);
1297 break;
1298 }
1299 #endif
1300 default:
1301 ex_init(sc);
1302 break;
1303 }
1304 break;
1305 case SIOCSIFMEDIA:
1306 case SIOCGIFMEDIA:
1307 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1308 break;
1309
1310 case SIOCSIFFLAGS:
1311 if ((ifp->if_flags & IFF_UP) == 0 &&
1312 (ifp->if_flags & IFF_RUNNING) != 0) {
1313 /*
1314 * If interface is marked down and it is running, then
1315 * stop it.
1316 */
1317 ex_stop(sc);
1318 ifp->if_flags &= ~IFF_RUNNING;
1319 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1320 (ifp->if_flags & IFF_RUNNING) == 0) {
1321 /*
1322 * If interface is marked up and it is stopped, then
1323 * start it.
1324 */
1325 ex_init(sc);
1326 } else if ((ifp->if_flags & IFF_UP) != 0) {
1327 /*
1328 * Deal with other flags that change hardware
1329 * state, i.e. IFF_PROMISC.
1330 */
1331 ex_set_mc(sc);
1332 }
1333 break;
1334
1335 case SIOCADDMULTI:
1336 case SIOCDELMULTI:
1337 error = (cmd == SIOCADDMULTI) ?
1338 ether_addmulti(ifr, &sc->sc_ethercom) :
1339 ether_delmulti(ifr, &sc->sc_ethercom);
1340
1341 if (error == ENETRESET) {
1342 /*
1343 * Multicast list has changed; set the hardware filter
1344 * accordingly.
1345 */
1346 ex_set_mc(sc);
1347 error = 0;
1348 }
1349 break;
1350
1351 default:
1352 error = EINVAL;
1353 break;
1354 }
1355
1356 splx(s);
1357 return (error);
1358 }
1359
1360 void
1361 ex_getstats(sc)
1362 struct ex_softc *sc;
1363 {
1364 bus_space_handle_t ioh = sc->sc_ioh;
1365 bus_space_tag_t iot = sc->sc_iot;
1366 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1367 u_int8_t upperok;
1368
1369 GO_WINDOW(6);
1370 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1371 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1372 ifp->if_ipackets += (upperok & 0x03) << 8;
1373 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1374 ifp->if_opackets += (upperok & 0x30) << 4;
1375 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1376 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1377 /*
1378 * There seems to be no way to get the exact number of collisions,
1379 * this is the number that occured at the very least.
1380 */
1381 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1382 TX_AFTER_X_COLLISIONS);
1383 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1384 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1385
1386 /*
1387 * Clear the following to avoid stats overflow interrupts
1388 */
1389 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1390 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1391 bus_space_read_1(iot, ioh, TX_NO_SQE);
1392 bus_space_read_1(iot, ioh, TX_CD_LOST);
1393 GO_WINDOW(4);
1394 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1395 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1396 ifp->if_ibytes += (upperok & 0x0f) << 16;
1397 ifp->if_obytes += (upperok & 0xf0) << 12;
1398 GO_WINDOW(1);
1399 }
1400
1401 void
1402 ex_printstats(sc)
1403 struct ex_softc *sc;
1404 {
1405 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1406
1407 ex_getstats(sc);
1408 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1409 "%llu\n", (unsigned long long)ifp->if_ipackets,
1410 (unsigned long long)ifp->if_opackets,
1411 (unsigned long long)ifp->if_ierrors,
1412 (unsigned long long)ifp->if_oerrors,
1413 (unsigned long long)ifp->if_ibytes,
1414 (unsigned long long)ifp->if_obytes);
1415 }
1416
1417 void
1418 ex_tick(arg)
1419 void *arg;
1420 {
1421 struct ex_softc *sc = arg;
1422 int s;
1423
1424 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1425 return;
1426
1427 s = splnet();
1428
1429 if (sc->ex_conf & EX_CONF_MII)
1430 mii_tick(&sc->ex_mii);
1431
1432 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1433 & S_COMMAND_IN_PROGRESS))
1434 ex_getstats(sc);
1435
1436 splx(s);
1437
1438 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1439 }
1440
1441 void
1442 ex_reset(sc)
1443 struct ex_softc *sc;
1444 {
1445 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, GLOBAL_RESET);
1446 delay(400);
1447 ex_waitcmd(sc);
1448 }
1449
1450 void
1451 ex_watchdog(ifp)
1452 struct ifnet *ifp;
1453 {
1454 struct ex_softc *sc = ifp->if_softc;
1455
1456 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1457 ++sc->sc_ethercom.ec_if.if_oerrors;
1458
1459 ex_reset(sc);
1460 ex_init(sc);
1461 }
1462
1463 void
1464 ex_stop(sc)
1465 struct ex_softc *sc;
1466 {
1467 bus_space_tag_t iot = sc->sc_iot;
1468 bus_space_handle_t ioh = sc->sc_ioh;
1469 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1470 struct ex_txdesc *tx;
1471 struct ex_rxdesc *rx;
1472 int i;
1473
1474 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1475 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1476 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1477
1478 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1479 if (tx->tx_mbhead == NULL)
1480 continue;
1481 m_freem(tx->tx_mbhead);
1482 tx->tx_mbhead = NULL;
1483 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1484 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1485 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1486 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1487 sizeof (struct ex_dpd),
1488 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1489 }
1490 sc->tx_tail = sc->tx_head = NULL;
1491 ex_init_txdescs(sc);
1492
1493 sc->rx_tail = sc->rx_head = 0;
1494 for (i = 0; i < EX_NUPD; i++) {
1495 rx = &sc->sc_rxdescs[i];
1496 if (rx->rx_mbhead != NULL) {
1497 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1498 m_freem(rx->rx_mbhead);
1499 rx->rx_mbhead = NULL;
1500 }
1501 ex_add_rxbuf(sc, rx);
1502 }
1503
1504 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1505
1506 callout_stop(&sc->ex_mii_callout);
1507 if (sc->ex_conf & EX_CONF_MII)
1508 mii_down(&sc->ex_mii);
1509
1510 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1511 ifp->if_timer = 0;
1512 }
1513
1514 static void
1515 ex_init_txdescs(sc)
1516 struct ex_softc *sc;
1517 {
1518 int i;
1519
1520 for (i = 0; i < EX_NDPD; i++) {
1521 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1522 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1523 if (i < EX_NDPD - 1)
1524 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1525 else
1526 sc->sc_txdescs[i].tx_next = NULL;
1527 }
1528 sc->tx_free = &sc->sc_txdescs[0];
1529 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1530 }
1531
1532
1533 int
1534 ex_activate(self, act)
1535 struct device *self;
1536 enum devact act;
1537 {
1538 struct ex_softc *sc = (void *) self;
1539 int s, error = 0;
1540
1541 s = splnet();
1542 switch (act) {
1543 case DVACT_ACTIVATE:
1544 error = EOPNOTSUPP;
1545 break;
1546
1547 case DVACT_DEACTIVATE:
1548 if (sc->ex_conf & EX_CONF_MII)
1549 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1550 MII_OFFSET_ANY);
1551 if_deactivate(&sc->sc_ethercom.ec_if);
1552 break;
1553 }
1554 splx(s);
1555
1556 return (error);
1557 }
1558
1559 int
1560 ex_detach(sc)
1561 struct ex_softc *sc;
1562 {
1563 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1564 struct ex_rxdesc *rxd;
1565 int i;
1566
1567 /* Succeed now if there's no work to do. */
1568 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1569 return (0);
1570
1571 /* Unhook our tick handler. */
1572 callout_stop(&sc->ex_mii_callout);
1573
1574 if (sc->ex_conf & EX_CONF_MII) {
1575 /* Detach all PHYs */
1576 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1577 }
1578
1579 /* Delete all remaining media. */
1580 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1581
1582 #if NRND > 0
1583 rnd_detach_source(&sc->rnd_source);
1584 #endif
1585 #if NBPFILTER > 0
1586 bpfdetach(ifp);
1587 #endif
1588 ether_ifdetach(ifp);
1589 if_detach(ifp);
1590
1591 for (i = 0; i < EX_NUPD; i++) {
1592 rxd = &sc->sc_rxdescs[i];
1593 if (rxd->rx_mbhead != NULL) {
1594 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1595 m_freem(rxd->rx_mbhead);
1596 rxd->rx_mbhead = NULL;
1597 }
1598 }
1599 for (i = 0; i < EX_NUPD; i++)
1600 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1601 for (i = 0; i < EX_NDPD; i++)
1602 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1603 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1604 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1605 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1606 EX_NDPD * sizeof (struct ex_dpd));
1607 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1608 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1609 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1610 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1611 EX_NUPD * sizeof (struct ex_upd));
1612 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1613
1614 shutdownhook_disestablish(sc->sc_sdhook);
1615
1616 return (0);
1617 }
1618
1619 /*
1620 * Before reboots, reset card completely.
1621 */
1622 static void
1623 ex_shutdown(arg)
1624 void *arg;
1625 {
1626 struct ex_softc *sc = arg;
1627
1628 ex_stop(sc);
1629 }
1630
1631 /*
1632 * Read EEPROM data.
1633 * XXX what to do if EEPROM doesn't unbusy?
1634 */
1635 u_int16_t
1636 ex_read_eeprom(sc, offset)
1637 struct ex_softc *sc;
1638 int offset;
1639 {
1640 bus_space_tag_t iot = sc->sc_iot;
1641 bus_space_handle_t ioh = sc->sc_ioh;
1642 u_int16_t data = 0;
1643
1644 GO_WINDOW(0);
1645 if (ex_eeprom_busy(sc))
1646 goto out;
1647 switch (sc->ex_bustype) {
1648 case EX_BUS_PCI:
1649 bus_space_write_1(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1650 READ_EEPROM | (offset & 0x3f));
1651 break;
1652 case EX_BUS_CARDBUS:
1653 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1654 0x230 + (offset & 0x3f));
1655 break;
1656 }
1657 if (ex_eeprom_busy(sc))
1658 goto out;
1659 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1660 out:
1661 return data;
1662 }
1663
1664 static int
1665 ex_eeprom_busy(sc)
1666 struct ex_softc *sc;
1667 {
1668 bus_space_tag_t iot = sc->sc_iot;
1669 bus_space_handle_t ioh = sc->sc_ioh;
1670 int i = 100;
1671
1672 while (i--) {
1673 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1674 EEPROM_BUSY))
1675 return 0;
1676 delay(100);
1677 }
1678 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1679 return (1);
1680 }
1681
1682 /*
1683 * Create a new rx buffer and add it to the 'soft' rx list.
1684 */
1685 static int
1686 ex_add_rxbuf(sc, rxd)
1687 struct ex_softc *sc;
1688 struct ex_rxdesc *rxd;
1689 {
1690 struct mbuf *m, *oldm;
1691 bus_dmamap_t rxmap;
1692 int error, rval = 0;
1693
1694 oldm = rxd->rx_mbhead;
1695 rxmap = rxd->rx_dmamap;
1696
1697 MGETHDR(m, M_DONTWAIT, MT_DATA);
1698 if (m != NULL) {
1699 MCLGET(m, M_DONTWAIT);
1700 if ((m->m_flags & M_EXT) == 0) {
1701 m_freem(m);
1702 if (oldm == NULL)
1703 return 1;
1704 m = oldm;
1705 m->m_data = m->m_ext.ext_buf;
1706 rval = 1;
1707 }
1708 } else {
1709 if (oldm == NULL)
1710 return 1;
1711 m = oldm;
1712 m->m_data = m->m_ext.ext_buf;
1713 rval = 1;
1714 }
1715
1716 /*
1717 * Setup the DMA map for this receive buffer.
1718 */
1719 if (m != oldm) {
1720 if (oldm != NULL)
1721 bus_dmamap_unload(sc->sc_dmat, rxmap);
1722 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1723 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1724 if (error) {
1725 printf("%s: can't load rx buffer, error = %d\n",
1726 sc->sc_dev.dv_xname, error);
1727 panic("ex_add_rxbuf"); /* XXX */
1728 }
1729 }
1730
1731 /*
1732 * Align for data after 14 byte header.
1733 */
1734 m->m_data += 2;
1735
1736 rxd->rx_mbhead = m;
1737 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1738 rxd->rx_upd->upd_frags[0].fr_addr =
1739 htole32(rxmap->dm_segs[0].ds_addr + 2);
1740 rxd->rx_upd->upd_nextptr = 0;
1741
1742 /*
1743 * Attach it to the end of the list.
1744 */
1745 if (sc->rx_head != NULL) {
1746 sc->rx_tail->rx_next = rxd;
1747 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1748 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1749 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1750 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1751 sizeof (struct ex_upd),
1752 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1753 } else {
1754 sc->rx_head = rxd;
1755 }
1756 sc->rx_tail = rxd;
1757
1758 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1759 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1760 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1761 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1762 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1763 return (rval);
1764 }
1765
1766 u_int32_t
1767 ex_mii_bitbang_read(self)
1768 struct device *self;
1769 {
1770 struct ex_softc *sc = (void *) self;
1771
1772 /* We're already in Window 4. */
1773 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1774 }
1775
1776 void
1777 ex_mii_bitbang_write(self, val)
1778 struct device *self;
1779 u_int32_t val;
1780 {
1781 struct ex_softc *sc = (void *) self;
1782
1783 /* We're already in Window 4. */
1784 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1785 }
1786
1787 int
1788 ex_mii_readreg(v, phy, reg)
1789 struct device *v;
1790 int phy, reg;
1791 {
1792 struct ex_softc *sc = (struct ex_softc *)v;
1793 int val;
1794
1795 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1796 return 0;
1797
1798 GO_WINDOW(4);
1799
1800 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1801
1802 GO_WINDOW(1);
1803
1804 return (val);
1805 }
1806
1807 void
1808 ex_mii_writereg(v, phy, reg, data)
1809 struct device *v;
1810 int phy;
1811 int reg;
1812 int data;
1813 {
1814 struct ex_softc *sc = (struct ex_softc *)v;
1815
1816 GO_WINDOW(4);
1817
1818 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1819
1820 GO_WINDOW(1);
1821 }
1822
1823 void
1824 ex_mii_statchg(v)
1825 struct device *v;
1826 {
1827 struct ex_softc *sc = (struct ex_softc *)v;
1828 bus_space_tag_t iot = sc->sc_iot;
1829 bus_space_handle_t ioh = sc->sc_ioh;
1830 int mctl;
1831
1832 GO_WINDOW(3);
1833 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1834 if (sc->ex_mii.mii_media_active & IFM_FDX)
1835 mctl |= MAC_CONTROL_FDX;
1836 else
1837 mctl &= ~MAC_CONTROL_FDX;
1838 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1839 GO_WINDOW(1); /* back to operating window */
1840 }
1841