elinkxl.c revision 1.34.2.1 1 /* $NetBSD: elinkxl.c,v 1.34.2.1 2000/09/01 00:54:06 haya Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/syslog.h>
53 #include <sys/select.h>
54 #include <sys/device.h>
55 #if NRND > 0
56 #include <sys/rnd.h>
57 #endif
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #ifdef NS
73 #include <netns/ns.h>
74 #include <netns/ns_if.h>
75 #endif
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
81
82 #include <machine/cpu.h>
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <vm/vm.h>
88 #include <vm/pmap.h>
89
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/mii.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/ic/elink3reg.h>
95 /* #include <dev/ic/elink3var.h> */
96 #include <dev/ic/elinkxlreg.h>
97 #include <dev/ic/elinkxlvar.h>
98
99 #ifdef DEBUG
100 int exdebug = 0;
101 #endif
102
103 /* ifmedia callbacks */
104 int ex_media_chg __P((struct ifnet *ifp));
105 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
106
107 void ex_probe_media __P((struct ex_softc *));
108 void ex_set_filter __P((struct ex_softc *));
109 void ex_set_media __P((struct ex_softc *));
110 struct mbuf *ex_get __P((struct ex_softc *, int));
111 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
112 void ex_init __P((struct ex_softc *));
113 void ex_read __P((struct ex_softc *));
114 void ex_reset __P((struct ex_softc *));
115 void ex_set_mc __P((struct ex_softc *));
116 void ex_getstats __P((struct ex_softc *));
117 void ex_printstats __P((struct ex_softc *));
118 void ex_tick __P((void *));
119
120 static int ex_eeprom_busy __P((struct ex_softc *));
121 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
122 static void ex_init_txdescs __P((struct ex_softc *));
123
124 static void ex_shutdown __P((void *));
125 static void ex_start __P((struct ifnet *));
126 static void ex_txstat __P((struct ex_softc *));
127
128 int ex_mii_readreg __P((struct device *, int, int));
129 void ex_mii_writereg __P((struct device *, int, int, int));
130 void ex_mii_statchg __P((struct device *));
131
132 void ex_probemedia __P((struct ex_softc *));
133
134 /*
135 * Structure to map media-present bits in boards to ifmedia codes and
136 * printable media names. Used for table-driven ifmedia initialization.
137 */
138 struct ex_media {
139 int exm_mpbit; /* media present bit */
140 const char *exm_name; /* name of medium */
141 int exm_ifmedia; /* ifmedia word for medium */
142 int exm_epmedia; /* ELINKMEDIA_* constant */
143 };
144
145 /*
146 * Media table for 3c90x chips. Note that chips with MII have no
147 * `native' media.
148 */
149 struct ex_media ex_native_media[] = {
150 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
151 ELINKMEDIA_10BASE_T },
152 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
153 ELINKMEDIA_10BASE_T },
154 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
155 ELINKMEDIA_AUI },
156 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
157 ELINKMEDIA_10BASE_2 },
158 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
159 ELINKMEDIA_100BASE_TX },
160 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
161 ELINKMEDIA_100BASE_TX },
162 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
163 ELINKMEDIA_100BASE_FX },
164 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
165 ELINKMEDIA_MII },
166 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
167 ELINKMEDIA_100BASE_T4 },
168 { 0, NULL, 0,
169 0 },
170 };
171
172 /*
173 * MII bit-bang glue.
174 */
175 u_int32_t ex_mii_bitbang_read __P((struct device *));
176 void ex_mii_bitbang_write __P((struct device *, u_int32_t));
177
178 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
179 ex_mii_bitbang_read,
180 ex_mii_bitbang_write,
181 {
182 ELINK_PHY_DATA, /* MII_BIT_MDO */
183 ELINK_PHY_DATA, /* MII_BIT_MDI */
184 ELINK_PHY_CLK, /* MII_BIT_MDC */
185 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
186 0, /* MII_BIT_DIR_PHY_HOST */
187 }
188 };
189
190 /*
191 * Back-end attach and configure.
192 */
193 void
194 ex_config(sc)
195 struct ex_softc *sc;
196 {
197 struct ifnet *ifp;
198 u_int16_t val;
199 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
200 bus_space_tag_t iot = sc->sc_iot;
201 bus_space_handle_t ioh = sc->sc_ioh;
202 int i, error, attach_stage;
203
204 callout_init(&sc->ex_mii_callout);
205
206 ex_reset(sc);
207
208 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
209 macaddr[0] = val >> 8;
210 macaddr[1] = val & 0xff;
211 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
212 macaddr[2] = val >> 8;
213 macaddr[3] = val & 0xff;
214 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
215 macaddr[4] = val >> 8;
216 macaddr[5] = val & 0xff;
217
218 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
219 ether_sprintf(macaddr));
220
221 if (sc->intr_ack != NULL) { /* CardBus card specific */
222 GO_WINDOW(2);
223 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) {
224 bus_space_write_2(sc->sc_iot, ioh, 12,
225 0x10|bus_space_read_2(sc->sc_iot, ioh, 12));
226 }
227 if (sc->ex_conf & EX_CONF_PHY_POWER) {
228 bus_space_write_2(sc->sc_iot, ioh, 12,
229 0x4000|bus_space_read_2(sc->sc_iot, ioh, 12));
230 }
231 }
232
233 attach_stage = 0;
234
235 /*
236 * Allocate the upload descriptors, and create and load the DMA
237 * map for them.
238 */
239 if ((error = bus_dmamem_alloc(sc->sc_dmat,
240 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &sc->sc_useg, 1,
241 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
242 printf("%s: can't allocate upload descriptors, error = %d\n",
243 sc->sc_dev.dv_xname, error);
244 goto fail;
245 }
246
247 attach_stage = 1;
248
249 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
250 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
251 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
252 printf("%s: can't map upload descriptors, error = %d\n",
253 sc->sc_dev.dv_xname, error);
254 goto fail;
255 }
256
257 attach_stage = 2;
258
259 if ((error = bus_dmamap_create(sc->sc_dmat,
260 EX_NUPD * sizeof (struct ex_upd), 1,
261 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
262 &sc->sc_upd_dmamap)) != 0) {
263 printf("%s: can't create upload desc. DMA map, error = %d\n",
264 sc->sc_dev.dv_xname, error);
265 goto fail;
266 }
267
268 attach_stage = 3;
269
270 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
271 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
272 BUS_DMA_NOWAIT)) != 0) {
273 printf("%s: can't load upload desc. DMA map, error = %d\n",
274 sc->sc_dev.dv_xname, error);
275 goto fail;
276 }
277
278 attach_stage = 4;
279
280 /*
281 * Allocate the download descriptors, and create and load the DMA
282 * map for them.
283 */
284 if ((error = bus_dmamem_alloc(sc->sc_dmat,
285 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &sc->sc_dseg, 1,
286 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
287 printf("%s: can't allocate download descriptors, error = %d\n",
288 sc->sc_dev.dv_xname, error);
289 goto fail;
290 }
291
292 attach_stage = 5;
293
294 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
295 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
296 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
297 printf("%s: can't map download descriptors, error = %d\n",
298 sc->sc_dev.dv_xname, error);
299 goto fail;
300 }
301 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
302
303 attach_stage = 6;
304
305 if ((error = bus_dmamap_create(sc->sc_dmat,
306 EX_NDPD * sizeof (struct ex_dpd), 1,
307 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
308 &sc->sc_dpd_dmamap)) != 0) {
309 printf("%s: can't create download desc. DMA map, error = %d\n",
310 sc->sc_dev.dv_xname, error);
311 goto fail;
312 }
313
314 attach_stage = 7;
315
316 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
317 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
318 BUS_DMA_NOWAIT)) != 0) {
319 printf("%s: can't load download desc. DMA map, error = %d\n",
320 sc->sc_dev.dv_xname, error);
321 goto fail;
322 }
323
324 attach_stage = 8;
325
326
327 /*
328 * Create the transmit buffer DMA maps.
329 */
330 for (i = 0; i < EX_NDPD; i++) {
331 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
332 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
333 &sc->sc_tx_dmamaps[i])) != 0) {
334 printf("%s: can't create tx DMA map %d, error = %d\n",
335 sc->sc_dev.dv_xname, i, error);
336 goto fail;
337 }
338 }
339
340 attach_stage = 9;
341
342 /*
343 * Create the receive buffer DMA maps.
344 */
345 for (i = 0; i < EX_NUPD; i++) {
346 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
347 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
348 &sc->sc_rx_dmamaps[i])) != 0) {
349 printf("%s: can't create rx DMA map %d, error = %d\n",
350 sc->sc_dev.dv_xname, i, error);
351 goto fail;
352 }
353 }
354
355 attach_stage = 10;
356
357 /*
358 * Create ring of upload descriptors, only once. The DMA engine
359 * will loop over this when receiving packets, stalling if it
360 * hits an UPD with a finished receive.
361 */
362 for (i = 0; i < EX_NUPD; i++) {
363 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
364 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
365 sc->sc_upd[i].upd_frags[0].fr_len =
366 htole32((MCLBYTES - 2) | EX_FR_LAST);
367 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
368 printf("%s: can't allocate or map rx buffers\n",
369 sc->sc_dev.dv_xname);
370 goto fail;
371 }
372 }
373
374 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
375 EX_NUPD * sizeof (struct ex_upd),
376 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
377
378 ex_init_txdescs(sc);
379
380 attach_stage = 11;
381
382
383 GO_WINDOW(3);
384 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
385 if (val & ELINK_MEDIACAP_MII)
386 sc->ex_conf |= EX_CONF_MII;
387
388 ifp = &sc->sc_ethercom.ec_if;
389
390 /*
391 * Initialize our media structures and MII info. We'll
392 * probe the MII if we discover that we have one.
393 */
394 sc->ex_mii.mii_ifp = ifp;
395 sc->ex_mii.mii_readreg = ex_mii_readreg;
396 sc->ex_mii.mii_writereg = ex_mii_writereg;
397 sc->ex_mii.mii_statchg = ex_mii_statchg;
398 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
399 ex_media_stat);
400
401 if (sc->ex_conf & EX_CONF_MII) {
402 /*
403 * Find PHY, extract media information from it.
404 * First, select the right transceiver.
405 */
406 u_int32_t icfg;
407
408 GO_WINDOW(3);
409 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
410 icfg &= ~(CONFIG_XCVR_SEL << 16);
411 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
412 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
413 if (val & ELINK_MEDIACAP_100BASETX)
414 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
415 if (val & ELINK_MEDIACAP_100BASEFX)
416 icfg |= ELINKMEDIA_100BASE_FX
417 << (CONFIG_XCVR_SEL_SHIFT + 16);
418 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
419
420 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
421 MII_PHY_ANY, MII_OFFSET_ANY, 0);
422 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
423 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
424 0, NULL);
425 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
426 } else {
427 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
428 }
429 } else
430 ex_probemedia(sc);
431
432 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
433 ifp->if_softc = sc;
434 ifp->if_start = ex_start;
435 ifp->if_ioctl = ex_ioctl;
436 ifp->if_watchdog = ex_watchdog;
437 ifp->if_flags =
438 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
439
440 if_attach(ifp);
441 ether_ifattach(ifp, macaddr);
442
443 GO_WINDOW(1);
444
445 sc->tx_start_thresh = 20;
446 sc->tx_succ_ok = 0;
447
448 /* TODO: set queues to 0 */
449
450 #if NBPFILTER > 0
451 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
452 sizeof(struct ether_header));
453 #endif
454
455 #if NRND > 0
456 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
457 RND_TYPE_NET, 0);
458 #endif
459
460 /* Establish callback to reset card when we reboot. */
461 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
462
463 /* The attach is successful. */
464 sc->ex_flags |= EX_FLAGS_ATTACHED;
465 return;
466
467 fail:
468 /*
469 * Free any resources we've allocated during the failed attach
470 * attempt. Do this in reverse order and fall though.
471 */
472 switch (attach_stage) {
473 case 11:
474 {
475 struct ex_rxdesc *rxd;
476
477 for (i = 0; i < EX_NUPD; i++) {
478 rxd = &sc->sc_rxdescs[i];
479 if (rxd->rx_mbhead != NULL) {
480 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
481 m_freem(rxd->rx_mbhead);
482 }
483 }
484 }
485 /* FALLTHROUGH */
486
487 case 10:
488 for (i = 0; i < EX_NUPD; i++)
489 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
490 /* FALLTHROUGH */
491
492 case 9:
493 for (i = 0; i < EX_NDPD; i++)
494 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
495 /* FALLTHROUGH */
496 case 8:
497 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
498 /* FALLTHROUGH */
499
500 case 7:
501 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
502 /* FALLTHROUGH */
503
504 case 6:
505 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
506 EX_NDPD * sizeof (struct ex_dpd));
507 /* FALLTHROUGH */
508
509 case 5:
510 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
511 break;
512
513 case 4:
514 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
515 /* FALLTHROUGH */
516
517 case 3:
518 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
519 /* FALLTHROUGH */
520
521 case 2:
522 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
523 EX_NUPD * sizeof (struct ex_upd));
524 /* FALLTHROUGH */
525
526 case 1:
527 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
528 break;
529 }
530
531 }
532
533 /*
534 * Find the media present on non-MII chips.
535 */
536 void
537 ex_probemedia(sc)
538 struct ex_softc *sc;
539 {
540 bus_space_tag_t iot = sc->sc_iot;
541 bus_space_handle_t ioh = sc->sc_ioh;
542 struct ifmedia *ifm = &sc->ex_mii.mii_media;
543 struct ex_media *exm;
544 u_int16_t config1, reset_options, default_media;
545 int defmedia = 0;
546 const char *sep = "", *defmedianame = NULL;
547
548 GO_WINDOW(3);
549 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
550 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
551 GO_WINDOW(0);
552
553 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
554
555 printf("%s: ", sc->sc_dev.dv_xname);
556
557 /* Sanity check that there are any media! */
558 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
559 printf("no media present!\n");
560 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
561 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
562 return;
563 }
564
565 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
566
567 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
568 if (reset_options & exm->exm_mpbit) {
569 /*
570 * Default media is a little complicated. We
571 * support full-duplex which uses the same
572 * reset options bit.
573 *
574 * XXX Check EEPROM for default to FDX?
575 */
576 if (exm->exm_epmedia == default_media) {
577 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
578 defmedia = exm->exm_ifmedia;
579 defmedianame = exm->exm_name;
580 }
581 } else if (defmedia == 0) {
582 defmedia = exm->exm_ifmedia;
583 defmedianame = exm->exm_name;
584 }
585 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
586 NULL);
587 PRINT(exm->exm_name);
588 }
589 }
590
591 #undef PRINT
592
593 #ifdef DIAGNOSTIC
594 if (defmedia == 0)
595 panic("ex_probemedia: impossible");
596 #endif
597
598 printf(", default %s\n", defmedianame);
599 ifmedia_set(ifm, defmedia);
600 }
601
602 /*
603 * Bring device up.
604 */
605 void
606 ex_init(sc)
607 struct ex_softc *sc;
608 {
609 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
610 bus_space_tag_t iot = sc->sc_iot;
611 bus_space_handle_t ioh = sc->sc_ioh;
612 int s, i;
613
614 s = splnet();
615
616 ex_waitcmd(sc);
617 ex_stop(sc);
618
619 /*
620 * Set the station address and clear the station mask. The latter
621 * is needed for 90x cards, 0 is the default for 90xB cards.
622 */
623 GO_WINDOW(2);
624 for (i = 0; i < ETHER_ADDR_LEN; i++) {
625 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
626 LLADDR(ifp->if_sadl)[i]);
627 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
628 }
629
630 GO_WINDOW(3);
631
632 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
633 ex_waitcmd(sc);
634 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
635 ex_waitcmd(sc);
636
637 /*
638 * Disable reclaim threshold for 90xB, set free threshold to
639 * 6 * 256 = 1536 for 90x.
640 */
641 if (sc->ex_conf & EX_CONF_90XB)
642 bus_space_write_2(iot, ioh, ELINK_COMMAND,
643 ELINK_TXRECLTHRESH | 255);
644 else
645 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
646
647 bus_space_write_2(iot, ioh, ELINK_COMMAND,
648 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
649
650 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
651 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
652
653 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
654 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
655
656 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
657 if (sc->intr_ack)
658 (* sc->intr_ack)(sc);
659 ex_set_media(sc);
660 ex_set_mc(sc);
661
662
663 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
664 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
665 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
666 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
667 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
668
669 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
670 u_int16_t cbcard_config;
671
672 GO_WINDOW(2);
673 cbcard_config = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 0x0c);
674 if (sc->ex_conf & EX_CONF_PHY_POWER) {
675 cbcard_config |= 0x4000; /* turn on PHY power */
676 }
677 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) {
678 cbcard_config |= 0x0020; /* invert LED polarity */
679 }
680 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 0x0c, cbcard_config);
681
682 GO_WINDOW(3);
683 }
684
685 ifp->if_flags |= IFF_RUNNING;
686 ifp->if_flags &= ~IFF_OACTIVE;
687 ex_start(ifp);
688
689 GO_WINDOW(1);
690
691 splx(s);
692
693 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
694 }
695
696 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & 0xff)
697
698 /*
699 * Set multicast receive filter. Also take care of promiscuous mode
700 * here (XXX).
701 */
702 void
703 ex_set_mc(sc)
704 struct ex_softc *sc;
705 {
706 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
707 struct ethercom *ec = &sc->sc_ethercom;
708 struct ether_multi *enm;
709 struct ether_multistep estep;
710 int i;
711 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
712
713 if (ifp->if_flags & IFF_PROMISC)
714 mask |= FIL_PROMISC;
715
716 if (!(ifp->if_flags & IFF_MULTICAST))
717 goto out;
718
719 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
720 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
721 } else {
722 ETHER_FIRST_MULTI(estep, ec, enm);
723 while (enm != NULL) {
724 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
725 ETHER_ADDR_LEN) != 0)
726 goto out;
727 i = ex_mchash(enm->enm_addrlo);
728 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
729 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
730 ETHER_NEXT_MULTI(estep, enm);
731 }
732 mask |= FIL_MULTIHASH;
733 }
734 out:
735 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
736 SET_RX_FILTER | mask);
737 }
738
739
740 static void
741 ex_txstat(sc)
742 struct ex_softc *sc;
743 {
744 bus_space_tag_t iot = sc->sc_iot;
745 bus_space_handle_t ioh = sc->sc_ioh;
746 int i;
747
748 /*
749 * We need to read+write TX_STATUS until we get a 0 status
750 * in order to turn off the interrupt flag.
751 */
752 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
753 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
754
755 if (i & TXS_JABBER) {
756 ++sc->sc_ethercom.ec_if.if_oerrors;
757 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
758 printf("%s: jabber (%x)\n",
759 sc->sc_dev.dv_xname, i);
760 ex_init(sc);
761 /* TODO: be more subtle here */
762 } else if (i & TXS_UNDERRUN) {
763 ++sc->sc_ethercom.ec_if.if_oerrors;
764 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
765 printf("%s: fifo underrun (%x) @%d\n",
766 sc->sc_dev.dv_xname, i,
767 sc->tx_start_thresh);
768 if (sc->tx_succ_ok < 100)
769 sc->tx_start_thresh = min(ETHER_MAX_LEN,
770 sc->tx_start_thresh + 20);
771 sc->tx_succ_ok = 0;
772 ex_init(sc);
773 /* TODO: be more subtle here */
774 } else if (i & TXS_MAX_COLLISION) {
775 ++sc->sc_ethercom.ec_if.if_collisions;
776 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
777 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
778 } else
779 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
780 }
781 }
782
783 int
784 ex_media_chg(ifp)
785 struct ifnet *ifp;
786 {
787 struct ex_softc *sc = ifp->if_softc;
788
789 if (ifp->if_flags & IFF_UP)
790 ex_init(sc);
791 return 0;
792 }
793
794 void
795 ex_set_media(sc)
796 struct ex_softc *sc;
797 {
798 bus_space_tag_t iot = sc->sc_iot;
799 bus_space_handle_t ioh = sc->sc_ioh;
800 u_int32_t configreg;
801
802 if (((sc->ex_conf & EX_CONF_MII) &&
803 (sc->ex_mii.mii_media_active & IFM_FDX))
804 || (!(sc->ex_conf & EX_CONF_MII) &&
805 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
806 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
807 MAC_CONTROL_FDX);
808 } else {
809 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
810 }
811
812 /*
813 * If the device has MII, select it, and then tell the
814 * PHY which media to use.
815 */
816 if (sc->ex_conf & EX_CONF_MII) {
817 GO_WINDOW(3);
818
819 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
820
821 configreg &= ~(CONFIG_MEDIAMASK << 16);
822 configreg |= (ELINKMEDIA_MII << (CONFIG_MEDIAMASK_SHIFT + 16));
823
824 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
825 mii_mediachg(&sc->ex_mii);
826 return;
827 }
828
829 GO_WINDOW(4);
830 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
831 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
832 delay(800);
833
834 /*
835 * Now turn on the selected media/transceiver.
836 */
837 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
838 case IFM_10_T:
839 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
840 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
841 break;
842
843 case IFM_10_2:
844 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
845 DELAY(800);
846 break;
847
848 case IFM_100_TX:
849 case IFM_100_FX:
850 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
851 LINKBEAT_ENABLE);
852 DELAY(800);
853 break;
854
855 case IFM_10_5:
856 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
857 SQE_ENABLE);
858 DELAY(800);
859 break;
860
861 case IFM_MANUAL:
862 break;
863
864 case IFM_NONE:
865 return;
866
867 default:
868 panic("ex_set_media: impossible");
869 }
870
871 GO_WINDOW(3);
872 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
873
874 configreg &= ~(CONFIG_MEDIAMASK << 16);
875 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
876 (CONFIG_MEDIAMASK_SHIFT + 16));
877
878 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
879 }
880
881 /*
882 * Get currently-selected media from card.
883 * (if_media callback, may be called before interface is brought up).
884 */
885 void
886 ex_media_stat(ifp, req)
887 struct ifnet *ifp;
888 struct ifmediareq *req;
889 {
890 struct ex_softc *sc = ifp->if_softc;
891
892 if (sc->ex_conf & EX_CONF_MII) {
893 mii_pollstat(&sc->ex_mii);
894 req->ifm_status = sc->ex_mii.mii_media_status;
895 req->ifm_active = sc->ex_mii.mii_media_active;
896 } else {
897 GO_WINDOW(4);
898 req->ifm_status = IFM_AVALID;
899 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
900 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
901 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
902 req->ifm_status |= IFM_ACTIVE;
903 GO_WINDOW(1);
904 }
905 }
906
907
908
909 /*
910 * Start outputting on the interface.
911 */
912 static void
913 ex_start(ifp)
914 struct ifnet *ifp;
915 {
916 struct ex_softc *sc = ifp->if_softc;
917 bus_space_tag_t iot = sc->sc_iot;
918 bus_space_handle_t ioh = sc->sc_ioh;
919 volatile struct ex_fraghdr *fr = NULL;
920 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
921 struct ex_txdesc *txp;
922 bus_dmamap_t dmamap;
923 int offset, totlen;
924
925 if (sc->tx_head || sc->tx_free == NULL)
926 return;
927
928 txp = NULL;
929
930 /*
931 * We're finished if there is nothing more to add to the list or if
932 * we're all filled up with buffers to transmit.
933 */
934 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
935 struct mbuf *mb_head;
936 int segment, error;
937
938 /*
939 * Grab a packet to transmit.
940 */
941 IF_DEQUEUE(&ifp->if_snd, mb_head);
942
943 /*
944 * Get pointer to next available tx desc.
945 */
946 txp = sc->tx_free;
947 sc->tx_free = txp->tx_next;
948 txp->tx_next = NULL;
949 dmamap = txp->tx_dmamap;
950
951 /*
952 * Go through each of the mbufs in the chain and initialize
953 * the transmit buffer descriptors with the physical address
954 * and size of the mbuf.
955 */
956 reload:
957 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
958 mb_head, BUS_DMA_NOWAIT);
959 switch (error) {
960 case 0:
961 /* Success. */
962 break;
963
964 case EFBIG:
965 {
966 struct mbuf *mn;
967
968 /*
969 * We ran out of segments. We have to recopy this
970 * mbuf chain first. Bail out if we can't get the
971 * new buffers.
972 */
973 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
974
975 MGETHDR(mn, M_DONTWAIT, MT_DATA);
976 if (mn == NULL) {
977 m_freem(mb_head);
978 printf("aborting\n");
979 goto out;
980 }
981 if (mb_head->m_pkthdr.len > MHLEN) {
982 MCLGET(mn, M_DONTWAIT);
983 if ((mn->m_flags & M_EXT) == 0) {
984 m_freem(mn);
985 m_freem(mb_head);
986 printf("aborting\n");
987 goto out;
988 }
989 }
990 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
991 mtod(mn, caddr_t));
992 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
993 m_freem(mb_head);
994 mb_head = mn;
995 printf("retrying\n");
996 goto reload;
997 }
998
999 default:
1000 /*
1001 * Some other problem; report it.
1002 */
1003 printf("%s: can't load mbuf chain, error = %d\n",
1004 sc->sc_dev.dv_xname, error);
1005 m_freem(mb_head);
1006 goto out;
1007 }
1008
1009 fr = &txp->tx_dpd->dpd_frags[0];
1010 totlen = 0;
1011 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1012 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1013 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
1014 totlen += dmamap->dm_segs[segment].ds_len;
1015 }
1016 fr--;
1017 fr->fr_len |= htole32(EX_FR_LAST);
1018 txp->tx_mbhead = mb_head;
1019
1020 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1021 BUS_DMASYNC_PREWRITE);
1022
1023 dpd = txp->tx_dpd;
1024 dpd->dpd_nextptr = 0;
1025 dpd->dpd_fsh = htole32(totlen);
1026
1027 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1028 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1029 sizeof (struct ex_dpd),
1030 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1031
1032 /*
1033 * No need to stall the download engine, we know it's
1034 * not busy right now.
1035 *
1036 * Fix up pointers in both the "soft" tx and the physical
1037 * tx list.
1038 */
1039 if (sc->tx_head != NULL) {
1040 prevdpd = sc->tx_tail->tx_dpd;
1041 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1042 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1043 offset, sizeof (struct ex_dpd),
1044 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1045 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1046 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1047 offset, sizeof (struct ex_dpd),
1048 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1049 sc->tx_tail->tx_next = txp;
1050 sc->tx_tail = txp;
1051 } else {
1052 sc->tx_tail = sc->tx_head = txp;
1053 }
1054
1055 #if NBPFILTER > 0
1056 /*
1057 * Pass packet to bpf if there is a listener.
1058 */
1059 if (ifp->if_bpf)
1060 bpf_mtap(ifp->if_bpf, mb_head);
1061 #endif
1062 }
1063 out:
1064 if (sc->tx_head) {
1065 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1066 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1067 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1068 sizeof (struct ex_dpd),
1069 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1070 ifp->if_flags |= IFF_OACTIVE;
1071 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1072 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1073 DPD_DMADDR(sc, sc->tx_head));
1074
1075 /* trigger watchdog */
1076 ifp->if_timer = 5;
1077 }
1078 }
1079
1080
1081 int
1082 ex_intr(arg)
1083 void *arg;
1084 {
1085 struct ex_softc *sc = arg;
1086 bus_space_tag_t iot = sc->sc_iot;
1087 bus_space_handle_t ioh = sc->sc_ioh;
1088 u_int16_t stat;
1089 int ret = 0;
1090 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1091
1092 if (sc->enabled == 0 ||
1093 (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1094 return (0);
1095
1096 for (;;) {
1097 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1098
1099 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1100
1101 if ((stat & S_MASK) == 0) {
1102 if ((stat & S_INTR_LATCH) == 0) {
1103 #if 0
1104 printf("%s: intr latch cleared\n",
1105 sc->sc_dev.dv_xname);
1106 #endif
1107 break;
1108 }
1109 }
1110
1111 ret = 1;
1112
1113 /*
1114 * Acknowledge interrupts.
1115 */
1116 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1117 (stat & S_MASK));
1118 if (sc->intr_ack)
1119 (*sc->intr_ack)(sc);
1120
1121 if (stat & S_HOST_ERROR) {
1122 printf("%s: adapter failure (%x)\n",
1123 sc->sc_dev.dv_xname, stat);
1124 ex_reset(sc);
1125 ex_init(sc);
1126 return 1;
1127 }
1128 if (stat & S_TX_COMPLETE) {
1129 ex_txstat(sc);
1130 }
1131 if (stat & S_UPD_STATS) {
1132 ex_getstats(sc);
1133 }
1134 if (stat & S_DN_COMPLETE) {
1135 struct ex_txdesc *txp, *ptxp = NULL;
1136 bus_dmamap_t txmap;
1137
1138 /* reset watchdog timer, was set in ex_start() */
1139 ifp->if_timer = 0;
1140
1141 for (txp = sc->tx_head; txp != NULL;
1142 txp = txp->tx_next) {
1143 bus_dmamap_sync(sc->sc_dmat,
1144 sc->sc_dpd_dmamap,
1145 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1146 sizeof (struct ex_dpd),
1147 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1148 if (txp->tx_mbhead != NULL) {
1149 txmap = txp->tx_dmamap;
1150 bus_dmamap_sync(sc->sc_dmat, txmap,
1151 0, txmap->dm_mapsize,
1152 BUS_DMASYNC_POSTWRITE);
1153 bus_dmamap_unload(sc->sc_dmat, txmap);
1154 m_freem(txp->tx_mbhead);
1155 txp->tx_mbhead = NULL;
1156 }
1157 ptxp = txp;
1158 }
1159
1160 /*
1161 * Move finished tx buffers back to the tx free list.
1162 */
1163 if (sc->tx_free) {
1164 sc->tx_ftail->tx_next = sc->tx_head;
1165 sc->tx_ftail = ptxp;
1166 } else
1167 sc->tx_ftail = sc->tx_free = sc->tx_head;
1168
1169 sc->tx_head = sc->tx_tail = NULL;
1170 ifp->if_flags &= ~IFF_OACTIVE;
1171 }
1172
1173 if (stat & S_UP_COMPLETE) {
1174 struct ex_rxdesc *rxd;
1175 struct mbuf *m;
1176 struct ex_upd *upd;
1177 bus_dmamap_t rxmap;
1178 u_int32_t pktstat;
1179
1180 rcvloop:
1181 rxd = sc->rx_head;
1182 rxmap = rxd->rx_dmamap;
1183 m = rxd->rx_mbhead;
1184 upd = rxd->rx_upd;
1185
1186 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1187 rxmap->dm_mapsize,
1188 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1189 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1190 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1191 sizeof (struct ex_upd),
1192 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1193 pktstat = le32toh(upd->upd_pktstatus);
1194
1195 if (pktstat & EX_UPD_COMPLETE) {
1196 /*
1197 * Remove first packet from the chain.
1198 */
1199 sc->rx_head = rxd->rx_next;
1200 rxd->rx_next = NULL;
1201
1202 /*
1203 * Add a new buffer to the receive chain.
1204 * If this fails, the old buffer is recycled
1205 * instead.
1206 */
1207 if (ex_add_rxbuf(sc, rxd) == 0) {
1208 struct ether_header *eh;
1209 u_int16_t total_len;
1210
1211
1212 if (pktstat & EX_UPD_ERR) {
1213 ifp->if_ierrors++;
1214 m_freem(m);
1215 goto rcvloop;
1216 }
1217
1218 total_len = pktstat & EX_UPD_PKTLENMASK;
1219 if (total_len <
1220 sizeof(struct ether_header)) {
1221 m_freem(m);
1222 goto rcvloop;
1223 }
1224 m->m_pkthdr.rcvif = ifp;
1225 m->m_pkthdr.len = m->m_len = total_len;
1226 eh = mtod(m, struct ether_header *);
1227 #if NBPFILTER > 0
1228 if (ifp->if_bpf) {
1229 bpf_tap(ifp->if_bpf,
1230 mtod(m, caddr_t),
1231 total_len);
1232 /*
1233 * Only pass this packet up
1234 * if it is for us.
1235 */
1236 if ((ifp->if_flags &
1237 IFF_PROMISC) &&
1238 (eh->ether_dhost[0] & 1)
1239 == 0 &&
1240 bcmp(eh->ether_dhost,
1241 LLADDR(ifp->if_sadl),
1242 sizeof(eh->ether_dhost))
1243 != 0) {
1244 m_freem(m);
1245 goto rcvloop;
1246 }
1247 }
1248 #endif /* NBPFILTER > 0 */
1249 (*ifp->if_input)(ifp, m);
1250 }
1251 goto rcvloop;
1252 }
1253 /*
1254 * Just in case we filled up all UPDs and the DMA engine
1255 * stalled. We could be more subtle about this.
1256 */
1257 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1258 printf("%s: uplistptr was 0\n",
1259 sc->sc_dev.dv_xname);
1260 ex_init(sc);
1261 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1262 & 0x2000) {
1263 printf("%s: receive stalled\n",
1264 sc->sc_dev.dv_xname);
1265 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1266 ELINK_UPUNSTALL);
1267 }
1268 }
1269 }
1270
1271 /* no more interrupts */
1272 if (ret && ifp->if_snd.ifq_head)
1273 ex_start(ifp);
1274 return ret;
1275 }
1276
1277 int
1278 ex_ioctl(ifp, cmd, data)
1279 struct ifnet *ifp;
1280 u_long cmd;
1281 caddr_t data;
1282 {
1283 struct ex_softc *sc = ifp->if_softc;
1284 struct ifaddr *ifa = (struct ifaddr *)data;
1285 struct ifreq *ifr = (struct ifreq *)data;
1286 int s, error = 0;
1287
1288 s = splnet();
1289
1290 switch (cmd) {
1291
1292 case SIOCSIFADDR:
1293 ifp->if_flags |= IFF_UP;
1294 switch (ifa->ifa_addr->sa_family) {
1295 #ifdef INET
1296 case AF_INET:
1297 ex_init(sc);
1298 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1299 break;
1300 #endif
1301 #ifdef NS
1302 case AF_NS:
1303 {
1304 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1305
1306 if (ns_nullhost(*ina))
1307 ina->x_host = *(union ns_host *)
1308 LLADDR(ifp->if_sadl);
1309 else
1310 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1311 ifp->if_addrlen);
1312 /* Set new address. */
1313 ex_init(sc);
1314 break;
1315 }
1316 #endif
1317 default:
1318 ex_init(sc);
1319 break;
1320 }
1321 break;
1322 case SIOCSIFMEDIA:
1323 case SIOCGIFMEDIA:
1324 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1325 break;
1326
1327 case SIOCSIFFLAGS:
1328 if ((ifp->if_flags & IFF_UP) == 0 &&
1329 (ifp->if_flags & IFF_RUNNING) != 0) {
1330 /*
1331 * If interface is marked down and it is running, then
1332 * stop it.
1333 */
1334 ex_stop(sc);
1335 ifp->if_flags &= ~IFF_RUNNING;
1336 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1337 (ifp->if_flags & IFF_RUNNING) == 0) {
1338 /*
1339 * If interface is marked up and it is stopped, then
1340 * start it.
1341 */
1342 ex_init(sc);
1343 } else if ((ifp->if_flags & IFF_UP) != 0) {
1344 /*
1345 * Deal with other flags that change hardware
1346 * state, i.e. IFF_PROMISC.
1347 */
1348 ex_set_mc(sc);
1349 }
1350 break;
1351
1352 case SIOCADDMULTI:
1353 case SIOCDELMULTI:
1354 error = (cmd == SIOCADDMULTI) ?
1355 ether_addmulti(ifr, &sc->sc_ethercom) :
1356 ether_delmulti(ifr, &sc->sc_ethercom);
1357
1358 if (error == ENETRESET) {
1359 /*
1360 * Multicast list has changed; set the hardware filter
1361 * accordingly.
1362 */
1363 ex_set_mc(sc);
1364 error = 0;
1365 }
1366 break;
1367
1368 default:
1369 error = EINVAL;
1370 break;
1371 }
1372
1373 splx(s);
1374 return (error);
1375 }
1376
1377 void
1378 ex_getstats(sc)
1379 struct ex_softc *sc;
1380 {
1381 bus_space_handle_t ioh = sc->sc_ioh;
1382 bus_space_tag_t iot = sc->sc_iot;
1383 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1384 u_int8_t upperok;
1385
1386 GO_WINDOW(6);
1387 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1388 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1389 ifp->if_ipackets += (upperok & 0x03) << 8;
1390 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1391 ifp->if_opackets += (upperok & 0x30) << 4;
1392 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1393 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1394 /*
1395 * There seems to be no way to get the exact number of collisions,
1396 * this is the number that occured at the very least.
1397 */
1398 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1399 TX_AFTER_X_COLLISIONS);
1400 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1401 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1402
1403 /*
1404 * Clear the following to avoid stats overflow interrupts
1405 */
1406 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1407 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1408 bus_space_read_1(iot, ioh, TX_NO_SQE);
1409 bus_space_read_1(iot, ioh, TX_CD_LOST);
1410 GO_WINDOW(4);
1411 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1412 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1413 ifp->if_ibytes += (upperok & 0x0f) << 16;
1414 ifp->if_obytes += (upperok & 0xf0) << 12;
1415 GO_WINDOW(1);
1416 }
1417
1418 void
1419 ex_printstats(sc)
1420 struct ex_softc *sc;
1421 {
1422 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1423
1424 ex_getstats(sc);
1425 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1426 "%llu\n", (unsigned long long)ifp->if_ipackets,
1427 (unsigned long long)ifp->if_opackets,
1428 (unsigned long long)ifp->if_ierrors,
1429 (unsigned long long)ifp->if_oerrors,
1430 (unsigned long long)ifp->if_ibytes,
1431 (unsigned long long)ifp->if_obytes);
1432 }
1433
1434 void
1435 ex_tick(arg)
1436 void *arg;
1437 {
1438 struct ex_softc *sc = arg;
1439 int s;
1440
1441 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1442 return;
1443
1444 s = splnet();
1445
1446 if (sc->ex_conf & EX_CONF_MII)
1447 mii_tick(&sc->ex_mii);
1448
1449 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1450 & S_COMMAND_IN_PROGRESS))
1451 ex_getstats(sc);
1452
1453 splx(s);
1454
1455 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1456 }
1457
1458 void
1459 ex_reset(sc)
1460 struct ex_softc *sc;
1461 {
1462 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, GLOBAL_RESET);
1463 delay(400);
1464 ex_waitcmd(sc);
1465 }
1466
1467 void
1468 ex_watchdog(ifp)
1469 struct ifnet *ifp;
1470 {
1471 struct ex_softc *sc = ifp->if_softc;
1472
1473 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1474 ++sc->sc_ethercom.ec_if.if_oerrors;
1475
1476 ex_reset(sc);
1477 ex_init(sc);
1478 }
1479
1480 void
1481 ex_stop(sc)
1482 struct ex_softc *sc;
1483 {
1484 bus_space_tag_t iot = sc->sc_iot;
1485 bus_space_handle_t ioh = sc->sc_ioh;
1486 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1487 struct ex_txdesc *tx;
1488 struct ex_rxdesc *rx;
1489 int i;
1490
1491 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1492 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1493 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1494
1495 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1496 if (tx->tx_mbhead == NULL)
1497 continue;
1498 m_freem(tx->tx_mbhead);
1499 tx->tx_mbhead = NULL;
1500 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1501 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1502 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1503 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1504 sizeof (struct ex_dpd),
1505 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1506 }
1507 sc->tx_tail = sc->tx_head = NULL;
1508 ex_init_txdescs(sc);
1509
1510 sc->rx_tail = sc->rx_head = 0;
1511 for (i = 0; i < EX_NUPD; i++) {
1512 rx = &sc->sc_rxdescs[i];
1513 if (rx->rx_mbhead != NULL) {
1514 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1515 m_freem(rx->rx_mbhead);
1516 rx->rx_mbhead = NULL;
1517 }
1518 ex_add_rxbuf(sc, rx);
1519 }
1520
1521 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1522
1523 callout_stop(&sc->ex_mii_callout);
1524 if (sc->ex_conf & EX_CONF_MII)
1525 mii_down(&sc->ex_mii);
1526
1527 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1528 ifp->if_timer = 0;
1529 }
1530
1531 static void
1532 ex_init_txdescs(sc)
1533 struct ex_softc *sc;
1534 {
1535 int i;
1536
1537 for (i = 0; i < EX_NDPD; i++) {
1538 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1539 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1540 if (i < EX_NDPD - 1)
1541 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1542 else
1543 sc->sc_txdescs[i].tx_next = NULL;
1544 }
1545 sc->tx_free = &sc->sc_txdescs[0];
1546 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1547 }
1548
1549
1550 int
1551 ex_activate(self, act)
1552 struct device *self;
1553 enum devact act;
1554 {
1555 struct ex_softc *sc = (void *) self;
1556 int s, error = 0;
1557
1558 s = splnet();
1559 switch (act) {
1560 case DVACT_ACTIVATE:
1561 error = EOPNOTSUPP;
1562 break;
1563
1564 case DVACT_DEACTIVATE:
1565 if (sc->ex_conf & EX_CONF_MII)
1566 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1567 MII_OFFSET_ANY);
1568 if_deactivate(&sc->sc_ethercom.ec_if);
1569 break;
1570 }
1571 splx(s);
1572
1573 return (error);
1574 }
1575
1576 int
1577 ex_detach(sc)
1578 struct ex_softc *sc;
1579 {
1580 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1581 struct ex_rxdesc *rxd;
1582 int i;
1583
1584 /* Succeed now if there's no work to do. */
1585 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1586 return (0);
1587
1588 /* Unhook our tick handler. */
1589 callout_stop(&sc->ex_mii_callout);
1590
1591 if (sc->ex_conf & EX_CONF_MII) {
1592 /* Detach all PHYs */
1593 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1594 }
1595
1596 /* Delete all remaining media. */
1597 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1598
1599 #if NRND > 0
1600 rnd_detach_source(&sc->rnd_source);
1601 #endif
1602 #if NBPFILTER > 0
1603 bpfdetach(ifp);
1604 #endif
1605 ether_ifdetach(ifp);
1606 if_detach(ifp);
1607
1608 for (i = 0; i < EX_NUPD; i++) {
1609 rxd = &sc->sc_rxdescs[i];
1610 if (rxd->rx_mbhead != NULL) {
1611 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1612 m_freem(rxd->rx_mbhead);
1613 rxd->rx_mbhead = NULL;
1614 }
1615 }
1616 for (i = 0; i < EX_NUPD; i++)
1617 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1618 for (i = 0; i < EX_NDPD; i++)
1619 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1620 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1621 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1622 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1623 EX_NDPD * sizeof (struct ex_dpd));
1624 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1625 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1626 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1627 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1628 EX_NUPD * sizeof (struct ex_upd));
1629 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1630
1631 shutdownhook_disestablish(sc->sc_sdhook);
1632
1633 return (0);
1634 }
1635
1636 /*
1637 * Before reboots, reset card completely.
1638 */
1639 static void
1640 ex_shutdown(arg)
1641 void *arg;
1642 {
1643 struct ex_softc *sc = arg;
1644
1645 ex_stop(sc);
1646 }
1647
1648 /*
1649 * Read EEPROM data.
1650 * XXX what to do if EEPROM doesn't unbusy?
1651 */
1652 u_int16_t
1653 ex_read_eeprom(sc, offset)
1654 struct ex_softc *sc;
1655 int offset;
1656 {
1657 bus_space_tag_t iot = sc->sc_iot;
1658 bus_space_handle_t ioh = sc->sc_ioh;
1659 u_int16_t data = 0;
1660
1661 GO_WINDOW(0);
1662 if (ex_eeprom_busy(sc))
1663 goto out;
1664 switch (sc->ex_bustype) {
1665 case EX_BUS_PCI:
1666 bus_space_write_1(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1667 READ_EEPROM | (offset & 0x3f));
1668 break;
1669 case EX_BUS_CARDBUS:
1670 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1671 0x230 + (offset & 0x3f));
1672 break;
1673 }
1674 if (ex_eeprom_busy(sc))
1675 goto out;
1676 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1677 out:
1678 return data;
1679 }
1680
1681 static int
1682 ex_eeprom_busy(sc)
1683 struct ex_softc *sc;
1684 {
1685 bus_space_tag_t iot = sc->sc_iot;
1686 bus_space_handle_t ioh = sc->sc_ioh;
1687 int i = 100;
1688
1689 while (i--) {
1690 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1691 EEPROM_BUSY))
1692 return 0;
1693 delay(100);
1694 }
1695 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1696 return (1);
1697 }
1698
1699 /*
1700 * Create a new rx buffer and add it to the 'soft' rx list.
1701 */
1702 static int
1703 ex_add_rxbuf(sc, rxd)
1704 struct ex_softc *sc;
1705 struct ex_rxdesc *rxd;
1706 {
1707 struct mbuf *m, *oldm;
1708 bus_dmamap_t rxmap;
1709 int error, rval = 0;
1710
1711 oldm = rxd->rx_mbhead;
1712 rxmap = rxd->rx_dmamap;
1713
1714 MGETHDR(m, M_DONTWAIT, MT_DATA);
1715 if (m != NULL) {
1716 MCLGET(m, M_DONTWAIT);
1717 if ((m->m_flags & M_EXT) == 0) {
1718 m_freem(m);
1719 if (oldm == NULL)
1720 return 1;
1721 m = oldm;
1722 m->m_data = m->m_ext.ext_buf;
1723 rval = 1;
1724 }
1725 } else {
1726 if (oldm == NULL)
1727 return 1;
1728 m = oldm;
1729 m->m_data = m->m_ext.ext_buf;
1730 rval = 1;
1731 }
1732
1733 /*
1734 * Setup the DMA map for this receive buffer.
1735 */
1736 if (m != oldm) {
1737 if (oldm != NULL)
1738 bus_dmamap_unload(sc->sc_dmat, rxmap);
1739 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1740 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1741 if (error) {
1742 printf("%s: can't load rx buffer, error = %d\n",
1743 sc->sc_dev.dv_xname, error);
1744 panic("ex_add_rxbuf"); /* XXX */
1745 }
1746 }
1747
1748 /*
1749 * Align for data after 14 byte header.
1750 */
1751 m->m_data += 2;
1752
1753 rxd->rx_mbhead = m;
1754 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1755 rxd->rx_upd->upd_frags[0].fr_addr =
1756 htole32(rxmap->dm_segs[0].ds_addr + 2);
1757 rxd->rx_upd->upd_nextptr = 0;
1758
1759 /*
1760 * Attach it to the end of the list.
1761 */
1762 if (sc->rx_head != NULL) {
1763 sc->rx_tail->rx_next = rxd;
1764 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1765 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1766 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1767 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1768 sizeof (struct ex_upd),
1769 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1770 } else {
1771 sc->rx_head = rxd;
1772 }
1773 sc->rx_tail = rxd;
1774
1775 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1776 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1777 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1778 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1779 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1780 return (rval);
1781 }
1782
1783 u_int32_t
1784 ex_mii_bitbang_read(self)
1785 struct device *self;
1786 {
1787 struct ex_softc *sc = (void *) self;
1788
1789 /* We're already in Window 4. */
1790 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1791 }
1792
1793 void
1794 ex_mii_bitbang_write(self, val)
1795 struct device *self;
1796 u_int32_t val;
1797 {
1798 struct ex_softc *sc = (void *) self;
1799
1800 /* We're already in Window 4. */
1801 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1802 }
1803
1804 int
1805 ex_mii_readreg(v, phy, reg)
1806 struct device *v;
1807 int phy, reg;
1808 {
1809 struct ex_softc *sc = (struct ex_softc *)v;
1810 int val;
1811
1812 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1813 return 0;
1814
1815 GO_WINDOW(4);
1816
1817 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1818
1819 GO_WINDOW(1);
1820
1821 return (val);
1822 }
1823
1824 void
1825 ex_mii_writereg(v, phy, reg, data)
1826 struct device *v;
1827 int phy;
1828 int reg;
1829 int data;
1830 {
1831 struct ex_softc *sc = (struct ex_softc *)v;
1832
1833 GO_WINDOW(4);
1834
1835 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1836
1837 GO_WINDOW(1);
1838 }
1839
1840 void
1841 ex_mii_statchg(v)
1842 struct device *v;
1843 {
1844 struct ex_softc *sc = (struct ex_softc *)v;
1845 bus_space_tag_t iot = sc->sc_iot;
1846 bus_space_handle_t ioh = sc->sc_ioh;
1847 int mctl;
1848
1849 GO_WINDOW(3);
1850 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1851 if (sc->ex_mii.mii_media_active & IFM_FDX)
1852 mctl |= MAC_CONTROL_FDX;
1853 else
1854 mctl &= ~MAC_CONTROL_FDX;
1855 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1856 GO_WINDOW(1); /* back to operating window */
1857 }
1858