elinkxl.c revision 1.32 1 /* $NetBSD: elinkxl.c,v 1.32 2000/05/12 15:22:33 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/syslog.h>
53 #include <sys/select.h>
54 #include <sys/device.h>
55 #if NRND > 0
56 #include <sys/rnd.h>
57 #endif
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #ifdef NS
73 #include <netns/ns.h>
74 #include <netns/ns_if.h>
75 #endif
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
81
82 #include <machine/cpu.h>
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <vm/vm.h>
88 #include <vm/pmap.h>
89
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/mii.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/ic/elink3reg.h>
95 /* #include <dev/ic/elink3var.h> */
96 #include <dev/ic/elinkxlreg.h>
97 #include <dev/ic/elinkxlvar.h>
98
99 #ifdef DEBUG
100 int exdebug = 0;
101 #endif
102
103 /* ifmedia callbacks */
104 int ex_media_chg __P((struct ifnet *ifp));
105 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
106
107 void ex_probe_media __P((struct ex_softc *));
108 void ex_set_filter __P((struct ex_softc *));
109 void ex_set_media __P((struct ex_softc *));
110 struct mbuf *ex_get __P((struct ex_softc *, int));
111 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
112 void ex_init __P((struct ex_softc *));
113 void ex_read __P((struct ex_softc *));
114 void ex_reset __P((struct ex_softc *));
115 void ex_set_mc __P((struct ex_softc *));
116 void ex_getstats __P((struct ex_softc *));
117 void ex_printstats __P((struct ex_softc *));
118 void ex_tick __P((void *));
119
120 static int ex_eeprom_busy __P((struct ex_softc *));
121 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
122 static void ex_init_txdescs __P((struct ex_softc *));
123
124 static void ex_shutdown __P((void *));
125 static void ex_start __P((struct ifnet *));
126 static void ex_txstat __P((struct ex_softc *));
127 static u_int16_t ex_mchash __P((u_char *));
128
129 int ex_mii_readreg __P((struct device *, int, int));
130 void ex_mii_writereg __P((struct device *, int, int, int));
131 void ex_mii_statchg __P((struct device *));
132
133 void ex_probemedia __P((struct ex_softc *));
134
135 /*
136 * Structure to map media-present bits in boards to ifmedia codes and
137 * printable media names. Used for table-driven ifmedia initialization.
138 */
139 struct ex_media {
140 int exm_mpbit; /* media present bit */
141 const char *exm_name; /* name of medium */
142 int exm_ifmedia; /* ifmedia word for medium */
143 int exm_epmedia; /* ELINKMEDIA_* constant */
144 };
145
146 /*
147 * Media table for 3c90x chips. Note that chips with MII have no
148 * `native' media.
149 */
150 struct ex_media ex_native_media[] = {
151 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
152 ELINKMEDIA_10BASE_T },
153 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
154 ELINKMEDIA_10BASE_T },
155 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
156 ELINKMEDIA_AUI },
157 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
158 ELINKMEDIA_10BASE_2 },
159 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
160 ELINKMEDIA_100BASE_TX },
161 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
162 ELINKMEDIA_100BASE_TX },
163 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
164 ELINKMEDIA_100BASE_FX },
165 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
166 ELINKMEDIA_MII },
167 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
168 ELINKMEDIA_100BASE_T4 },
169 { 0, NULL, 0,
170 0 },
171 };
172
173 /*
174 * MII bit-bang glue.
175 */
176 u_int32_t ex_mii_bitbang_read __P((struct device *));
177 void ex_mii_bitbang_write __P((struct device *, u_int32_t));
178
179 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
180 ex_mii_bitbang_read,
181 ex_mii_bitbang_write,
182 {
183 ELINK_PHY_DATA, /* MII_BIT_MDO */
184 ELINK_PHY_DATA, /* MII_BIT_MDI */
185 ELINK_PHY_CLK, /* MII_BIT_MDC */
186 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
187 0, /* MII_BIT_DIR_PHY_HOST */
188 }
189 };
190
191 /*
192 * Back-end attach and configure.
193 */
194 void
195 ex_config(sc)
196 struct ex_softc *sc;
197 {
198 struct ifnet *ifp;
199 u_int16_t val;
200 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
201 bus_space_tag_t iot = sc->sc_iot;
202 bus_space_handle_t ioh = sc->sc_ioh;
203 int i, error, attach_stage;
204
205 callout_init(&sc->ex_mii_callout);
206
207 ex_reset(sc);
208
209 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
210 macaddr[0] = val >> 8;
211 macaddr[1] = val & 0xff;
212 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
213 macaddr[2] = val >> 8;
214 macaddr[3] = val & 0xff;
215 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
216 macaddr[4] = val >> 8;
217 macaddr[5] = val & 0xff;
218
219 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
220 ether_sprintf(macaddr));
221
222 if (sc->intr_ack) { /* 3C575BTX specific */
223 GO_WINDOW(2);
224 bus_space_write_2(sc->sc_iot, ioh, 12, 0x10|bus_space_read_2(sc->sc_iot, ioh, 12));
225 }
226
227 attach_stage = 0;
228
229 /*
230 * Allocate the upload descriptors, and create and load the DMA
231 * map for them.
232 */
233 if ((error = bus_dmamem_alloc(sc->sc_dmat,
234 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &sc->sc_useg, 1,
235 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
236 printf("%s: can't allocate upload descriptors, error = %d\n",
237 sc->sc_dev.dv_xname, error);
238 goto fail;
239 }
240
241 attach_stage = 1;
242
243 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
244 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
245 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
246 printf("%s: can't map upload descriptors, error = %d\n",
247 sc->sc_dev.dv_xname, error);
248 goto fail;
249 }
250
251 attach_stage = 2;
252
253 if ((error = bus_dmamap_create(sc->sc_dmat,
254 EX_NUPD * sizeof (struct ex_upd), 1,
255 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
256 &sc->sc_upd_dmamap)) != 0) {
257 printf("%s: can't create upload desc. DMA map, error = %d\n",
258 sc->sc_dev.dv_xname, error);
259 goto fail;
260 }
261
262 attach_stage = 3;
263
264 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
265 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
266 BUS_DMA_NOWAIT)) != 0) {
267 printf("%s: can't load upload desc. DMA map, error = %d\n",
268 sc->sc_dev.dv_xname, error);
269 goto fail;
270 }
271
272 attach_stage = 4;
273
274 /*
275 * Allocate the download descriptors, and create and load the DMA
276 * map for them.
277 */
278 if ((error = bus_dmamem_alloc(sc->sc_dmat,
279 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &sc->sc_dseg, 1,
280 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
281 printf("%s: can't allocate download descriptors, error = %d\n",
282 sc->sc_dev.dv_xname, error);
283 goto fail;
284 }
285
286 attach_stage = 5;
287
288 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
289 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
290 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
291 printf("%s: can't map download descriptors, error = %d\n",
292 sc->sc_dev.dv_xname, error);
293 goto fail;
294 }
295 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
296
297 attach_stage = 6;
298
299 if ((error = bus_dmamap_create(sc->sc_dmat,
300 EX_NDPD * sizeof (struct ex_dpd), 1,
301 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
302 &sc->sc_dpd_dmamap)) != 0) {
303 printf("%s: can't create download desc. DMA map, error = %d\n",
304 sc->sc_dev.dv_xname, error);
305 goto fail;
306 }
307
308 attach_stage = 7;
309
310 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
311 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
312 BUS_DMA_NOWAIT)) != 0) {
313 printf("%s: can't load download desc. DMA map, error = %d\n",
314 sc->sc_dev.dv_xname, error);
315 goto fail;
316 }
317
318 attach_stage = 8;
319
320
321 /*
322 * Create the transmit buffer DMA maps.
323 */
324 for (i = 0; i < EX_NDPD; i++) {
325 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
326 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
327 &sc->sc_tx_dmamaps[i])) != 0) {
328 printf("%s: can't create tx DMA map %d, error = %d\n",
329 sc->sc_dev.dv_xname, i, error);
330 goto fail;
331 }
332 }
333
334 attach_stage = 9;
335
336 /*
337 * Create the receive buffer DMA maps.
338 */
339 for (i = 0; i < EX_NUPD; i++) {
340 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
341 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
342 &sc->sc_rx_dmamaps[i])) != 0) {
343 printf("%s: can't create rx DMA map %d, error = %d\n",
344 sc->sc_dev.dv_xname, i, error);
345 goto fail;
346 }
347 }
348
349 attach_stage = 10;
350
351 /*
352 * Create ring of upload descriptors, only once. The DMA engine
353 * will loop over this when receiving packets, stalling if it
354 * hits an UPD with a finished receive.
355 */
356 for (i = 0; i < EX_NUPD; i++) {
357 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
358 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
359 sc->sc_upd[i].upd_frags[0].fr_len =
360 htole32((MCLBYTES - 2) | EX_FR_LAST);
361 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
362 printf("%s: can't allocate or map rx buffers\n",
363 sc->sc_dev.dv_xname);
364 goto fail;
365 }
366 }
367
368 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
369 EX_NUPD * sizeof (struct ex_upd),
370 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
371
372 ex_init_txdescs(sc);
373
374 attach_stage = 11;
375
376
377 GO_WINDOW(3);
378 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
379 if (val & ELINK_MEDIACAP_MII)
380 sc->ex_conf |= EX_CONF_MII;
381
382 ifp = &sc->sc_ethercom.ec_if;
383
384 /*
385 * Initialize our media structures and MII info. We'll
386 * probe the MII if we discover that we have one.
387 */
388 sc->ex_mii.mii_ifp = ifp;
389 sc->ex_mii.mii_readreg = ex_mii_readreg;
390 sc->ex_mii.mii_writereg = ex_mii_writereg;
391 sc->ex_mii.mii_statchg = ex_mii_statchg;
392 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
393 ex_media_stat);
394
395 if (sc->ex_conf & EX_CONF_MII) {
396 /*
397 * Find PHY, extract media information from it.
398 * First, select the right transceiver.
399 */
400 u_int32_t icfg;
401
402 GO_WINDOW(3);
403 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
404 icfg &= ~(CONFIG_XCVR_SEL << 16);
405 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
406 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
407 if (val & ELINK_MEDIACAP_100BASETX)
408 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
409 if (val & ELINK_MEDIACAP_100BASEFX)
410 icfg |= ELINKMEDIA_100BASE_FX
411 << (CONFIG_XCVR_SEL_SHIFT + 16);
412 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
413
414 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
415 MII_PHY_ANY, MII_OFFSET_ANY, 0);
416 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
417 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
418 0, NULL);
419 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
420 } else {
421 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
422 }
423 } else
424 ex_probemedia(sc);
425
426 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
427 ifp->if_softc = sc;
428 ifp->if_start = ex_start;
429 ifp->if_ioctl = ex_ioctl;
430 ifp->if_watchdog = ex_watchdog;
431 ifp->if_flags =
432 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
433
434 if_attach(ifp);
435 ether_ifattach(ifp, macaddr);
436
437 GO_WINDOW(1);
438
439 sc->tx_start_thresh = 20;
440 sc->tx_succ_ok = 0;
441
442 /* TODO: set queues to 0 */
443
444 #if NBPFILTER > 0
445 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
446 sizeof(struct ether_header));
447 #endif
448
449 #if NRND > 0
450 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
451 RND_TYPE_NET, 0);
452 #endif
453
454 /* Establish callback to reset card when we reboot. */
455 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
456 return;
457
458 fail:
459 /*
460 * Free any resources we've allocated during the failed attach
461 * attempt. Do this in reverse order and fall though.
462 */
463 switch (attach_stage) {
464 case 11:
465 {
466 struct ex_rxdesc *rxd;
467
468 for (i = 0; i < EX_NUPD; i++) {
469 rxd = &sc->sc_rxdescs[i];
470 if (rxd->rx_mbhead != NULL) {
471 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
472 m_freem(rxd->rx_mbhead);
473 }
474 }
475 }
476 /* FALLTHROUGH */
477
478 case 10:
479 for (i = 0; i < EX_NUPD; i++)
480 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
481 /* FALLTHROUGH */
482
483 case 9:
484 for (i = 0; i < EX_NDPD; i++)
485 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
486 /* FALLTHROUGH */
487 case 8:
488 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
489 /* FALLTHROUGH */
490
491 case 7:
492 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
493 /* FALLTHROUGH */
494
495 case 6:
496 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
497 EX_NDPD * sizeof (struct ex_dpd));
498 /* FALLTHROUGH */
499
500 case 5:
501 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
502 break;
503
504 case 4:
505 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
506 /* FALLTHROUGH */
507
508 case 3:
509 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
510 /* FALLTHROUGH */
511
512 case 2:
513 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
514 EX_NUPD * sizeof (struct ex_upd));
515 /* FALLTHROUGH */
516
517 case 1:
518 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
519 break;
520 }
521
522 }
523
524 /*
525 * Find the media present on non-MII chips.
526 */
527 void
528 ex_probemedia(sc)
529 struct ex_softc *sc;
530 {
531 bus_space_tag_t iot = sc->sc_iot;
532 bus_space_handle_t ioh = sc->sc_ioh;
533 struct ifmedia *ifm = &sc->ex_mii.mii_media;
534 struct ex_media *exm;
535 u_int16_t config1, reset_options, default_media;
536 int defmedia = 0;
537 const char *sep = "", *defmedianame = NULL;
538
539 GO_WINDOW(3);
540 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
541 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
542 GO_WINDOW(0);
543
544 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
545
546 printf("%s: ", sc->sc_dev.dv_xname);
547
548 /* Sanity check that there are any media! */
549 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
550 printf("no media present!\n");
551 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
552 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
553 return;
554 }
555
556 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
557
558 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
559 if (reset_options & exm->exm_mpbit) {
560 /*
561 * Default media is a little complicated. We
562 * support full-duplex which uses the same
563 * reset options bit.
564 *
565 * XXX Check EEPROM for default to FDX?
566 */
567 if (exm->exm_epmedia == default_media) {
568 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
569 defmedia = exm->exm_ifmedia;
570 defmedianame = exm->exm_name;
571 }
572 } else if (defmedia == 0) {
573 defmedia = exm->exm_ifmedia;
574 defmedianame = exm->exm_name;
575 }
576 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
577 NULL);
578 PRINT(exm->exm_name);
579 }
580 }
581
582 #undef PRINT
583
584 #ifdef DIAGNOSTIC
585 if (defmedia == 0)
586 panic("ex_probemedia: impossible");
587 #endif
588
589 printf(", default %s\n", defmedianame);
590 ifmedia_set(ifm, defmedia);
591 }
592
593 /*
594 * Bring device up.
595 */
596 void
597 ex_init(sc)
598 struct ex_softc *sc;
599 {
600 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
601 bus_space_tag_t iot = sc->sc_iot;
602 bus_space_handle_t ioh = sc->sc_ioh;
603 int s, i;
604
605 s = splnet();
606
607 ex_waitcmd(sc);
608 ex_stop(sc);
609
610 /*
611 * Set the station address and clear the station mask. The latter
612 * is needed for 90x cards, 0 is the default for 90xB cards.
613 */
614 GO_WINDOW(2);
615 for (i = 0; i < ETHER_ADDR_LEN; i++) {
616 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
617 LLADDR(ifp->if_sadl)[i]);
618 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
619 }
620
621 GO_WINDOW(3);
622
623 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
624 ex_waitcmd(sc);
625 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
626 ex_waitcmd(sc);
627
628 /*
629 * Disable reclaim threshold for 90xB, set free threshold to
630 * 6 * 256 = 1536 for 90x.
631 */
632 if (sc->ex_conf & EX_CONF_90XB)
633 bus_space_write_2(iot, ioh, ELINK_COMMAND,
634 ELINK_TXRECLTHRESH | 255);
635 else
636 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
637
638 bus_space_write_2(iot, ioh, ELINK_COMMAND,
639 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
640
641 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
642 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
643
644 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
645 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
646
647 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
648 if (sc->intr_ack)
649 (* sc->intr_ack)(sc);
650 ex_set_media(sc);
651 ex_set_mc(sc);
652
653
654 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
655 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
656 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
657 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
658 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
659
660 ifp->if_flags |= IFF_RUNNING;
661 ifp->if_flags &= ~IFF_OACTIVE;
662 ex_start(ifp);
663
664 GO_WINDOW(1);
665
666 splx(s);
667
668 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
669 }
670
671 /*
672 * Multicast hash filter according to the 3Com spec.
673 */
674 static u_int16_t
675 ex_mchash(addr)
676 u_char *addr;
677 {
678 u_int32_t crc, carry;
679 int i, j;
680 u_char c;
681
682 /* Compute CRC for the address value. */
683 crc = 0xffffffff; /* initial value */
684
685 for (i = 0; i < 6; i++) {
686 c = addr[i];
687 for (j = 0; j < 8; j++) {
688 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
689 crc <<= 1;
690 c >>= 1;
691 if (carry)
692 crc = (crc ^ 0x04c11db6) | carry;
693 }
694 }
695
696 /* Return the filter bit position. */
697 return(crc & 0x000000ff);
698 }
699
700
701 /*
702 * Set multicast receive filter. Also take care of promiscuous mode
703 * here (XXX).
704 */
705 void
706 ex_set_mc(sc)
707 struct ex_softc *sc;
708 {
709 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
710 struct ethercom *ec = &sc->sc_ethercom;
711 struct ether_multi *enm;
712 struct ether_multistep estep;
713 int i;
714 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
715
716 if (ifp->if_flags & IFF_PROMISC)
717 mask |= FIL_PROMISC;
718
719 if (!(ifp->if_flags & IFF_MULTICAST))
720 goto out;
721
722 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
723 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
724 } else {
725 ETHER_FIRST_MULTI(estep, ec, enm);
726 while (enm != NULL) {
727 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
728 ETHER_ADDR_LEN) != 0)
729 goto out;
730 i = ex_mchash(enm->enm_addrlo);
731 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
732 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
733 ETHER_NEXT_MULTI(estep, enm);
734 }
735 mask |= FIL_MULTIHASH;
736 }
737 out:
738 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
739 SET_RX_FILTER | mask);
740 }
741
742
743 static void
744 ex_txstat(sc)
745 struct ex_softc *sc;
746 {
747 bus_space_tag_t iot = sc->sc_iot;
748 bus_space_handle_t ioh = sc->sc_ioh;
749 int i;
750
751 /*
752 * We need to read+write TX_STATUS until we get a 0 status
753 * in order to turn off the interrupt flag.
754 */
755 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
756 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
757
758 if (i & TXS_JABBER) {
759 ++sc->sc_ethercom.ec_if.if_oerrors;
760 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
761 printf("%s: jabber (%x)\n",
762 sc->sc_dev.dv_xname, i);
763 ex_init(sc);
764 /* TODO: be more subtle here */
765 } else if (i & TXS_UNDERRUN) {
766 ++sc->sc_ethercom.ec_if.if_oerrors;
767 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
768 printf("%s: fifo underrun (%x) @%d\n",
769 sc->sc_dev.dv_xname, i,
770 sc->tx_start_thresh);
771 if (sc->tx_succ_ok < 100)
772 sc->tx_start_thresh = min(ETHER_MAX_LEN,
773 sc->tx_start_thresh + 20);
774 sc->tx_succ_ok = 0;
775 ex_init(sc);
776 /* TODO: be more subtle here */
777 } else if (i & TXS_MAX_COLLISION) {
778 ++sc->sc_ethercom.ec_if.if_collisions;
779 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
780 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
781 } else
782 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
783 }
784 }
785
786 int
787 ex_media_chg(ifp)
788 struct ifnet *ifp;
789 {
790 struct ex_softc *sc = ifp->if_softc;
791
792 if (ifp->if_flags & IFF_UP)
793 ex_init(sc);
794 return 0;
795 }
796
797 void
798 ex_set_media(sc)
799 struct ex_softc *sc;
800 {
801 bus_space_tag_t iot = sc->sc_iot;
802 bus_space_handle_t ioh = sc->sc_ioh;
803 int config0, config1;
804
805 if (((sc->ex_conf & EX_CONF_MII) &&
806 (sc->ex_mii.mii_media_active & IFM_FDX))
807 || (!(sc->ex_conf & EX_CONF_MII) &&
808 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
809 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
810 MAC_CONTROL_FDX);
811 } else {
812 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
813 }
814
815 /*
816 * If the device has MII, select it, and then tell the
817 * PHY which media to use.
818 */
819 if (sc->ex_conf & EX_CONF_MII) {
820 GO_WINDOW(3);
821
822 config0 = (u_int)bus_space_read_2(iot, ioh,
823 ELINK_W3_INTERNAL_CONFIG);
824 config1 = (u_int)bus_space_read_2(iot, ioh,
825 ELINK_W3_INTERNAL_CONFIG + 2);
826
827 config1 = config1 & ~CONFIG_MEDIAMASK;
828 config1 |= (ELINKMEDIA_MII << CONFIG_MEDIAMASK_SHIFT);
829
830 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
831 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
832 mii_mediachg(&sc->ex_mii);
833 return;
834 }
835
836 GO_WINDOW(4);
837 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
838 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
839 delay(800);
840
841 /*
842 * Now turn on the selected media/transceiver.
843 */
844 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
845 case IFM_10_T:
846 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
847 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
848 break;
849
850 case IFM_10_2:
851 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
852 DELAY(800);
853 break;
854
855 case IFM_100_TX:
856 case IFM_100_FX:
857 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
858 LINKBEAT_ENABLE);
859 DELAY(800);
860 break;
861
862 case IFM_10_5:
863 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
864 SQE_ENABLE);
865 DELAY(800);
866 break;
867
868 case IFM_MANUAL:
869 break;
870
871 case IFM_NONE:
872 return;
873
874 default:
875 panic("ex_set_media: impossible");
876 }
877
878 GO_WINDOW(3);
879 config0 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
880 config1 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
881
882 config1 = config1 & ~CONFIG_MEDIAMASK;
883 config1 |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
884 CONFIG_MEDIAMASK_SHIFT);
885
886 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
887 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
888 }
889
890 /*
891 * Get currently-selected media from card.
892 * (if_media callback, may be called before interface is brought up).
893 */
894 void
895 ex_media_stat(ifp, req)
896 struct ifnet *ifp;
897 struct ifmediareq *req;
898 {
899 struct ex_softc *sc = ifp->if_softc;
900
901 if (sc->ex_conf & EX_CONF_MII) {
902 mii_pollstat(&sc->ex_mii);
903 req->ifm_status = sc->ex_mii.mii_media_status;
904 req->ifm_active = sc->ex_mii.mii_media_active;
905 } else {
906 GO_WINDOW(4);
907 req->ifm_status = IFM_AVALID;
908 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
909 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
910 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
911 req->ifm_status |= IFM_ACTIVE;
912 GO_WINDOW(1);
913 }
914 }
915
916
917
918 /*
919 * Start outputting on the interface.
920 */
921 static void
922 ex_start(ifp)
923 struct ifnet *ifp;
924 {
925 struct ex_softc *sc = ifp->if_softc;
926 bus_space_tag_t iot = sc->sc_iot;
927 bus_space_handle_t ioh = sc->sc_ioh;
928 volatile struct ex_fraghdr *fr = NULL;
929 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
930 struct ex_txdesc *txp;
931 bus_dmamap_t dmamap;
932 int offset, totlen;
933
934 if (sc->tx_head || sc->tx_free == NULL)
935 return;
936
937 txp = NULL;
938
939 /*
940 * We're finished if there is nothing more to add to the list or if
941 * we're all filled up with buffers to transmit.
942 */
943 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
944 struct mbuf *mb_head;
945 int segment, error;
946
947 /*
948 * Grab a packet to transmit.
949 */
950 IF_DEQUEUE(&ifp->if_snd, mb_head);
951
952 /*
953 * Get pointer to next available tx desc.
954 */
955 txp = sc->tx_free;
956 sc->tx_free = txp->tx_next;
957 txp->tx_next = NULL;
958 dmamap = txp->tx_dmamap;
959
960 /*
961 * Go through each of the mbufs in the chain and initialize
962 * the transmit buffer descriptors with the physical address
963 * and size of the mbuf.
964 */
965 reload:
966 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
967 mb_head, BUS_DMA_NOWAIT);
968 switch (error) {
969 case 0:
970 /* Success. */
971 break;
972
973 case EFBIG:
974 {
975 struct mbuf *mn;
976
977 /*
978 * We ran out of segments. We have to recopy this
979 * mbuf chain first. Bail out if we can't get the
980 * new buffers.
981 */
982 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
983
984 MGETHDR(mn, M_DONTWAIT, MT_DATA);
985 if (mn == NULL) {
986 m_freem(mb_head);
987 printf("aborting\n");
988 goto out;
989 }
990 if (mb_head->m_pkthdr.len > MHLEN) {
991 MCLGET(mn, M_DONTWAIT);
992 if ((mn->m_flags & M_EXT) == 0) {
993 m_freem(mn);
994 m_freem(mb_head);
995 printf("aborting\n");
996 goto out;
997 }
998 }
999 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1000 mtod(mn, caddr_t));
1001 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1002 m_freem(mb_head);
1003 mb_head = mn;
1004 printf("retrying\n");
1005 goto reload;
1006 }
1007
1008 default:
1009 /*
1010 * Some other problem; report it.
1011 */
1012 printf("%s: can't load mbuf chain, error = %d\n",
1013 sc->sc_dev.dv_xname, error);
1014 m_freem(mb_head);
1015 goto out;
1016 }
1017
1018 fr = &txp->tx_dpd->dpd_frags[0];
1019 totlen = 0;
1020 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1021 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1022 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
1023 totlen += dmamap->dm_segs[segment].ds_len;
1024 }
1025 fr--;
1026 fr->fr_len |= htole32(EX_FR_LAST);
1027 txp->tx_mbhead = mb_head;
1028
1029 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1030 BUS_DMASYNC_PREWRITE);
1031
1032 dpd = txp->tx_dpd;
1033 dpd->dpd_nextptr = 0;
1034 dpd->dpd_fsh = htole32(totlen);
1035
1036 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1037 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1038 sizeof (struct ex_dpd),
1039 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1040
1041 /*
1042 * No need to stall the download engine, we know it's
1043 * not busy right now.
1044 *
1045 * Fix up pointers in both the "soft" tx and the physical
1046 * tx list.
1047 */
1048 if (sc->tx_head != NULL) {
1049 prevdpd = sc->tx_tail->tx_dpd;
1050 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1051 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1052 offset, sizeof (struct ex_dpd),
1053 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1054 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1055 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1056 offset, sizeof (struct ex_dpd),
1057 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1058 sc->tx_tail->tx_next = txp;
1059 sc->tx_tail = txp;
1060 } else {
1061 sc->tx_tail = sc->tx_head = txp;
1062 }
1063
1064 #if NBPFILTER > 0
1065 /*
1066 * Pass packet to bpf if there is a listener.
1067 */
1068 if (ifp->if_bpf)
1069 bpf_mtap(ifp->if_bpf, mb_head);
1070 #endif
1071 }
1072 out:
1073 if (sc->tx_head) {
1074 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1075 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1076 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1077 sizeof (struct ex_dpd),
1078 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1079 ifp->if_flags |= IFF_OACTIVE;
1080 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1081 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1082 DPD_DMADDR(sc, sc->tx_head));
1083
1084 /* trigger watchdog */
1085 ifp->if_timer = 5;
1086 }
1087 }
1088
1089
1090 int
1091 ex_intr(arg)
1092 void *arg;
1093 {
1094 struct ex_softc *sc = arg;
1095 bus_space_tag_t iot = sc->sc_iot;
1096 bus_space_handle_t ioh = sc->sc_ioh;
1097 u_int16_t stat;
1098 int ret = 0;
1099 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1100
1101 if (sc->enabled == 0 ||
1102 (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1103 return (0);
1104
1105 for (;;) {
1106 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1107
1108 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1109
1110 if ((stat & S_MASK) == 0) {
1111 if ((stat & S_INTR_LATCH) == 0) {
1112 #if 0
1113 printf("%s: intr latch cleared\n",
1114 sc->sc_dev.dv_xname);
1115 #endif
1116 break;
1117 }
1118 }
1119
1120 ret = 1;
1121
1122 /*
1123 * Acknowledge interrupts.
1124 */
1125 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1126 (stat & S_MASK));
1127 if (sc->intr_ack)
1128 (*sc->intr_ack)(sc);
1129
1130 if (stat & S_HOST_ERROR) {
1131 printf("%s: adapter failure (%x)\n",
1132 sc->sc_dev.dv_xname, stat);
1133 ex_reset(sc);
1134 ex_init(sc);
1135 return 1;
1136 }
1137 if (stat & S_TX_COMPLETE) {
1138 ex_txstat(sc);
1139 }
1140 if (stat & S_UPD_STATS) {
1141 ex_getstats(sc);
1142 }
1143 if (stat & S_DN_COMPLETE) {
1144 struct ex_txdesc *txp, *ptxp = NULL;
1145 bus_dmamap_t txmap;
1146
1147 /* reset watchdog timer, was set in ex_start() */
1148 ifp->if_timer = 0;
1149
1150 for (txp = sc->tx_head; txp != NULL;
1151 txp = txp->tx_next) {
1152 bus_dmamap_sync(sc->sc_dmat,
1153 sc->sc_dpd_dmamap,
1154 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1155 sizeof (struct ex_dpd),
1156 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1157 if (txp->tx_mbhead != NULL) {
1158 txmap = txp->tx_dmamap;
1159 bus_dmamap_sync(sc->sc_dmat, txmap,
1160 0, txmap->dm_mapsize,
1161 BUS_DMASYNC_POSTWRITE);
1162 bus_dmamap_unload(sc->sc_dmat, txmap);
1163 m_freem(txp->tx_mbhead);
1164 txp->tx_mbhead = NULL;
1165 }
1166 ptxp = txp;
1167 }
1168
1169 /*
1170 * Move finished tx buffers back to the tx free list.
1171 */
1172 if (sc->tx_free) {
1173 sc->tx_ftail->tx_next = sc->tx_head;
1174 sc->tx_ftail = ptxp;
1175 } else
1176 sc->tx_ftail = sc->tx_free = sc->tx_head;
1177
1178 sc->tx_head = sc->tx_tail = NULL;
1179 ifp->if_flags &= ~IFF_OACTIVE;
1180 }
1181
1182 if (stat & S_UP_COMPLETE) {
1183 struct ex_rxdesc *rxd;
1184 struct mbuf *m;
1185 struct ex_upd *upd;
1186 bus_dmamap_t rxmap;
1187 u_int32_t pktstat;
1188
1189 rcvloop:
1190 rxd = sc->rx_head;
1191 rxmap = rxd->rx_dmamap;
1192 m = rxd->rx_mbhead;
1193 upd = rxd->rx_upd;
1194
1195 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1196 rxmap->dm_mapsize,
1197 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1198 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1199 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1200 sizeof (struct ex_upd),
1201 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1202 pktstat = le32toh(upd->upd_pktstatus);
1203
1204 if (pktstat & EX_UPD_COMPLETE) {
1205 /*
1206 * Remove first packet from the chain.
1207 */
1208 sc->rx_head = rxd->rx_next;
1209 rxd->rx_next = NULL;
1210
1211 /*
1212 * Add a new buffer to the receive chain.
1213 * If this fails, the old buffer is recycled
1214 * instead.
1215 */
1216 if (ex_add_rxbuf(sc, rxd) == 0) {
1217 struct ether_header *eh;
1218 u_int16_t total_len;
1219
1220
1221 if (pktstat & EX_UPD_ERR) {
1222 ifp->if_ierrors++;
1223 m_freem(m);
1224 goto rcvloop;
1225 }
1226
1227 total_len = pktstat & EX_UPD_PKTLENMASK;
1228 if (total_len <
1229 sizeof(struct ether_header)) {
1230 m_freem(m);
1231 goto rcvloop;
1232 }
1233 m->m_pkthdr.rcvif = ifp;
1234 m->m_pkthdr.len = m->m_len = total_len;
1235 eh = mtod(m, struct ether_header *);
1236 #if NBPFILTER > 0
1237 if (ifp->if_bpf) {
1238 bpf_tap(ifp->if_bpf,
1239 mtod(m, caddr_t),
1240 total_len);
1241 /*
1242 * Only pass this packet up
1243 * if it is for us.
1244 */
1245 if ((ifp->if_flags &
1246 IFF_PROMISC) &&
1247 (eh->ether_dhost[0] & 1)
1248 == 0 &&
1249 bcmp(eh->ether_dhost,
1250 LLADDR(ifp->if_sadl),
1251 sizeof(eh->ether_dhost))
1252 != 0) {
1253 m_freem(m);
1254 goto rcvloop;
1255 }
1256 }
1257 #endif /* NBPFILTER > 0 */
1258 (*ifp->if_input)(ifp, m);
1259 }
1260 goto rcvloop;
1261 }
1262 /*
1263 * Just in case we filled up all UPDs and the DMA engine
1264 * stalled. We could be more subtle about this.
1265 */
1266 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1267 printf("%s: uplistptr was 0\n",
1268 sc->sc_dev.dv_xname);
1269 ex_init(sc);
1270 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1271 & 0x2000) {
1272 printf("%s: receive stalled\n",
1273 sc->sc_dev.dv_xname);
1274 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1275 ELINK_UPUNSTALL);
1276 }
1277 }
1278 }
1279
1280 /* no more interrupts */
1281 if (ret && ifp->if_snd.ifq_head)
1282 ex_start(ifp);
1283 return ret;
1284 }
1285
1286 int
1287 ex_ioctl(ifp, cmd, data)
1288 struct ifnet *ifp;
1289 u_long cmd;
1290 caddr_t data;
1291 {
1292 struct ex_softc *sc = ifp->if_softc;
1293 struct ifaddr *ifa = (struct ifaddr *)data;
1294 struct ifreq *ifr = (struct ifreq *)data;
1295 int s, error = 0;
1296
1297 s = splnet();
1298
1299 switch (cmd) {
1300
1301 case SIOCSIFADDR:
1302 ifp->if_flags |= IFF_UP;
1303 switch (ifa->ifa_addr->sa_family) {
1304 #ifdef INET
1305 case AF_INET:
1306 ex_init(sc);
1307 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1308 break;
1309 #endif
1310 #ifdef NS
1311 case AF_NS:
1312 {
1313 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1314
1315 if (ns_nullhost(*ina))
1316 ina->x_host = *(union ns_host *)
1317 LLADDR(ifp->if_sadl);
1318 else
1319 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1320 ifp->if_addrlen);
1321 /* Set new address. */
1322 ex_init(sc);
1323 break;
1324 }
1325 #endif
1326 default:
1327 ex_init(sc);
1328 break;
1329 }
1330 break;
1331 case SIOCSIFMEDIA:
1332 case SIOCGIFMEDIA:
1333 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1334 break;
1335
1336 case SIOCSIFFLAGS:
1337 if ((ifp->if_flags & IFF_UP) == 0 &&
1338 (ifp->if_flags & IFF_RUNNING) != 0) {
1339 /*
1340 * If interface is marked down and it is running, then
1341 * stop it.
1342 */
1343 ex_stop(sc);
1344 ifp->if_flags &= ~IFF_RUNNING;
1345 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1346 (ifp->if_flags & IFF_RUNNING) == 0) {
1347 /*
1348 * If interface is marked up and it is stopped, then
1349 * start it.
1350 */
1351 ex_init(sc);
1352 } else if ((ifp->if_flags & IFF_UP) != 0) {
1353 /*
1354 * Deal with other flags that change hardware
1355 * state, i.e. IFF_PROMISC.
1356 */
1357 ex_set_mc(sc);
1358 }
1359 break;
1360
1361 case SIOCADDMULTI:
1362 case SIOCDELMULTI:
1363 error = (cmd == SIOCADDMULTI) ?
1364 ether_addmulti(ifr, &sc->sc_ethercom) :
1365 ether_delmulti(ifr, &sc->sc_ethercom);
1366
1367 if (error == ENETRESET) {
1368 /*
1369 * Multicast list has changed; set the hardware filter
1370 * accordingly.
1371 */
1372 ex_set_mc(sc);
1373 error = 0;
1374 }
1375 break;
1376
1377 default:
1378 error = EINVAL;
1379 break;
1380 }
1381
1382 splx(s);
1383 return (error);
1384 }
1385
1386 void
1387 ex_getstats(sc)
1388 struct ex_softc *sc;
1389 {
1390 bus_space_handle_t ioh = sc->sc_ioh;
1391 bus_space_tag_t iot = sc->sc_iot;
1392 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1393 u_int8_t upperok;
1394
1395 GO_WINDOW(6);
1396 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1397 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1398 ifp->if_ipackets += (upperok & 0x03) << 8;
1399 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1400 ifp->if_opackets += (upperok & 0x30) << 4;
1401 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1402 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1403 /*
1404 * There seems to be no way to get the exact number of collisions,
1405 * this is the number that occured at the very least.
1406 */
1407 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1408 TX_AFTER_X_COLLISIONS);
1409 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1410 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1411
1412 /*
1413 * Clear the following to avoid stats overflow interrupts
1414 */
1415 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1416 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1417 bus_space_read_1(iot, ioh, TX_NO_SQE);
1418 bus_space_read_1(iot, ioh, TX_CD_LOST);
1419 GO_WINDOW(4);
1420 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1421 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1422 ifp->if_ibytes += (upperok & 0x0f) << 16;
1423 ifp->if_obytes += (upperok & 0xf0) << 12;
1424 GO_WINDOW(1);
1425 }
1426
1427 void
1428 ex_printstats(sc)
1429 struct ex_softc *sc;
1430 {
1431 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1432
1433 ex_getstats(sc);
1434 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1435 "%llu\n", (unsigned long long)ifp->if_ipackets,
1436 (unsigned long long)ifp->if_opackets,
1437 (unsigned long long)ifp->if_ierrors,
1438 (unsigned long long)ifp->if_oerrors,
1439 (unsigned long long)ifp->if_ibytes,
1440 (unsigned long long)ifp->if_obytes);
1441 }
1442
1443 void
1444 ex_tick(arg)
1445 void *arg;
1446 {
1447 struct ex_softc *sc = arg;
1448 int s;
1449
1450 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1451 return;
1452
1453 s = splnet();
1454
1455 if (sc->ex_conf & EX_CONF_MII)
1456 mii_tick(&sc->ex_mii);
1457
1458 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1459 & S_COMMAND_IN_PROGRESS))
1460 ex_getstats(sc);
1461
1462 splx(s);
1463
1464 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1465 }
1466
1467 void
1468 ex_reset(sc)
1469 struct ex_softc *sc;
1470 {
1471 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, GLOBAL_RESET);
1472 delay(400);
1473 ex_waitcmd(sc);
1474 }
1475
1476 void
1477 ex_watchdog(ifp)
1478 struct ifnet *ifp;
1479 {
1480 struct ex_softc *sc = ifp->if_softc;
1481
1482 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1483 ++sc->sc_ethercom.ec_if.if_oerrors;
1484
1485 ex_reset(sc);
1486 ex_init(sc);
1487 }
1488
1489 void
1490 ex_stop(sc)
1491 struct ex_softc *sc;
1492 {
1493 bus_space_tag_t iot = sc->sc_iot;
1494 bus_space_handle_t ioh = sc->sc_ioh;
1495 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1496 struct ex_txdesc *tx;
1497 struct ex_rxdesc *rx;
1498 int i;
1499
1500 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1501 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1502 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1503
1504 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1505 if (tx->tx_mbhead == NULL)
1506 continue;
1507 m_freem(tx->tx_mbhead);
1508 tx->tx_mbhead = NULL;
1509 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1510 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1511 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1512 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1513 sizeof (struct ex_dpd),
1514 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1515 }
1516 sc->tx_tail = sc->tx_head = NULL;
1517 ex_init_txdescs(sc);
1518
1519 sc->rx_tail = sc->rx_head = 0;
1520 for (i = 0; i < EX_NUPD; i++) {
1521 rx = &sc->sc_rxdescs[i];
1522 if (rx->rx_mbhead != NULL) {
1523 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1524 m_freem(rx->rx_mbhead);
1525 rx->rx_mbhead = NULL;
1526 }
1527 ex_add_rxbuf(sc, rx);
1528 }
1529
1530 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1531
1532 callout_stop(&sc->ex_mii_callout);
1533 if (sc->ex_conf & EX_CONF_MII)
1534 mii_down(&sc->ex_mii);
1535
1536 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1537 ifp->if_timer = 0;
1538 }
1539
1540 static void
1541 ex_init_txdescs(sc)
1542 struct ex_softc *sc;
1543 {
1544 int i;
1545
1546 for (i = 0; i < EX_NDPD; i++) {
1547 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1548 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1549 if (i < EX_NDPD - 1)
1550 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1551 else
1552 sc->sc_txdescs[i].tx_next = NULL;
1553 }
1554 sc->tx_free = &sc->sc_txdescs[0];
1555 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1556 }
1557
1558
1559 int
1560 ex_activate(self, act)
1561 struct device *self;
1562 enum devact act;
1563 {
1564 struct ex_softc *sc = (void *) self;
1565 int s, error = 0;
1566
1567 s = splnet();
1568 switch (act) {
1569 case DVACT_ACTIVATE:
1570 error = EOPNOTSUPP;
1571 break;
1572
1573 case DVACT_DEACTIVATE:
1574 if (sc->ex_conf & EX_CONF_MII)
1575 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1576 MII_OFFSET_ANY);
1577 if_deactivate(&sc->sc_ethercom.ec_if);
1578 break;
1579 }
1580 splx(s);
1581
1582 return (error);
1583 }
1584
1585 int
1586 ex_detach(sc)
1587 struct ex_softc *sc;
1588 {
1589 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1590 struct ex_rxdesc *rxd;
1591 int i;
1592
1593 /* Unhook our tick handler. */
1594 callout_stop(&sc->ex_mii_callout);
1595
1596 if (sc->ex_conf & EX_CONF_MII) {
1597 /* Detach all PHYs */
1598 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1599 }
1600
1601 /* Delete all remaining media. */
1602 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1603
1604 #if NRND > 0
1605 rnd_detach_source(&sc->rnd_source);
1606 #endif
1607 #if NBPFILTER > 0
1608 bpfdetach(ifp);
1609 #endif
1610 ether_ifdetach(ifp);
1611 if_detach(ifp);
1612
1613 for (i = 0; i < EX_NUPD; i++) {
1614 rxd = &sc->sc_rxdescs[i];
1615 if (rxd->rx_mbhead != NULL) {
1616 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1617 m_freem(rxd->rx_mbhead);
1618 rxd->rx_mbhead = NULL;
1619 }
1620 }
1621 for (i = 0; i < EX_NUPD; i++)
1622 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1623 for (i = 0; i < EX_NDPD; i++)
1624 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1625 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1626 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1627 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1628 EX_NDPD * sizeof (struct ex_dpd));
1629 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1630 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1631 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1632 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1633 EX_NUPD * sizeof (struct ex_upd));
1634 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1635
1636 shutdownhook_disestablish(sc->sc_sdhook);
1637
1638 return (0);
1639 }
1640
1641 /*
1642 * Before reboots, reset card completely.
1643 */
1644 static void
1645 ex_shutdown(arg)
1646 void *arg;
1647 {
1648 struct ex_softc *sc = arg;
1649
1650 ex_stop(sc);
1651 }
1652
1653 /*
1654 * Read EEPROM data.
1655 * XXX what to do if EEPROM doesn't unbusy?
1656 */
1657 u_int16_t
1658 ex_read_eeprom(sc, offset)
1659 struct ex_softc *sc;
1660 int offset;
1661 {
1662 bus_space_tag_t iot = sc->sc_iot;
1663 bus_space_handle_t ioh = sc->sc_ioh;
1664 u_int16_t data = 0;
1665
1666 GO_WINDOW(0);
1667 if (ex_eeprom_busy(sc))
1668 goto out;
1669 switch (sc->ex_bustype) {
1670 case EX_BUS_PCI:
1671 bus_space_write_1(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1672 READ_EEPROM | (offset & 0x3f));
1673 break;
1674 case EX_BUS_CARDBUS:
1675 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1676 0x230 + (offset & 0x3f));
1677 break;
1678 }
1679 if (ex_eeprom_busy(sc))
1680 goto out;
1681 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1682 out:
1683 return data;
1684 }
1685
1686 static int
1687 ex_eeprom_busy(sc)
1688 struct ex_softc *sc;
1689 {
1690 bus_space_tag_t iot = sc->sc_iot;
1691 bus_space_handle_t ioh = sc->sc_ioh;
1692 int i = 100;
1693
1694 while (i--) {
1695 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1696 EEPROM_BUSY))
1697 return 0;
1698 delay(100);
1699 }
1700 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1701 return (1);
1702 }
1703
1704 /*
1705 * Create a new rx buffer and add it to the 'soft' rx list.
1706 */
1707 static int
1708 ex_add_rxbuf(sc, rxd)
1709 struct ex_softc *sc;
1710 struct ex_rxdesc *rxd;
1711 {
1712 struct mbuf *m, *oldm;
1713 bus_dmamap_t rxmap;
1714 int error, rval = 0;
1715
1716 oldm = rxd->rx_mbhead;
1717 rxmap = rxd->rx_dmamap;
1718
1719 MGETHDR(m, M_DONTWAIT, MT_DATA);
1720 if (m != NULL) {
1721 MCLGET(m, M_DONTWAIT);
1722 if ((m->m_flags & M_EXT) == 0) {
1723 m_freem(m);
1724 if (oldm == NULL)
1725 return 1;
1726 m = oldm;
1727 m->m_data = m->m_ext.ext_buf;
1728 rval = 1;
1729 }
1730 } else {
1731 if (oldm == NULL)
1732 return 1;
1733 m = oldm;
1734 m->m_data = m->m_ext.ext_buf;
1735 rval = 1;
1736 }
1737
1738 /*
1739 * Setup the DMA map for this receive buffer.
1740 */
1741 if (m != oldm) {
1742 if (oldm != NULL)
1743 bus_dmamap_unload(sc->sc_dmat, rxmap);
1744 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1745 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1746 if (error) {
1747 printf("%s: can't load rx buffer, error = %d\n",
1748 sc->sc_dev.dv_xname, error);
1749 panic("ex_add_rxbuf"); /* XXX */
1750 }
1751 }
1752
1753 /*
1754 * Align for data after 14 byte header.
1755 */
1756 m->m_data += 2;
1757
1758 rxd->rx_mbhead = m;
1759 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1760 rxd->rx_upd->upd_frags[0].fr_addr =
1761 htole32(rxmap->dm_segs[0].ds_addr + 2);
1762 rxd->rx_upd->upd_nextptr = 0;
1763
1764 /*
1765 * Attach it to the end of the list.
1766 */
1767 if (sc->rx_head != NULL) {
1768 sc->rx_tail->rx_next = rxd;
1769 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1770 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1771 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1772 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1773 sizeof (struct ex_upd),
1774 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1775 } else {
1776 sc->rx_head = rxd;
1777 }
1778 sc->rx_tail = rxd;
1779
1780 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1781 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1782 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1783 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1784 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1785 return (rval);
1786 }
1787
1788 u_int32_t
1789 ex_mii_bitbang_read(self)
1790 struct device *self;
1791 {
1792 struct ex_softc *sc = (void *) self;
1793
1794 /* We're already in Window 4. */
1795 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1796 }
1797
1798 void
1799 ex_mii_bitbang_write(self, val)
1800 struct device *self;
1801 u_int32_t val;
1802 {
1803 struct ex_softc *sc = (void *) self;
1804
1805 /* We're already in Window 4. */
1806 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1807 }
1808
1809 int
1810 ex_mii_readreg(v, phy, reg)
1811 struct device *v;
1812 int phy, reg;
1813 {
1814 struct ex_softc *sc = (struct ex_softc *)v;
1815 int val;
1816
1817 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1818 return 0;
1819
1820 GO_WINDOW(4);
1821
1822 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1823
1824 GO_WINDOW(1);
1825
1826 return (val);
1827 }
1828
1829 void
1830 ex_mii_writereg(v, phy, reg, data)
1831 struct device *v;
1832 int phy;
1833 int reg;
1834 int data;
1835 {
1836 struct ex_softc *sc = (struct ex_softc *)v;
1837
1838 GO_WINDOW(4);
1839
1840 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1841
1842 GO_WINDOW(1);
1843 }
1844
1845 void
1846 ex_mii_statchg(v)
1847 struct device *v;
1848 {
1849 struct ex_softc *sc = (struct ex_softc *)v;
1850 bus_space_tag_t iot = sc->sc_iot;
1851 bus_space_handle_t ioh = sc->sc_ioh;
1852 int mctl;
1853
1854 GO_WINDOW(3);
1855 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1856 if (sc->ex_mii.mii_media_active & IFM_FDX)
1857 mctl |= MAC_CONTROL_FDX;
1858 else
1859 mctl &= ~MAC_CONTROL_FDX;
1860 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1861 GO_WINDOW(1); /* back to operating window */
1862 }
1863