elinkxl.c revision 1.15.4.1 1 /* $NetBSD: elinkxl.c,v 1.15.4.1 1999/11/15 00:40:32 fvdl Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/syslog.h>
52 #include <sys/select.h>
53 #include <sys/device.h>
54 #if NRND > 0
55 #include <sys/rnd.h>
56 #endif
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/if_ether.h>
61 #include <net/if_media.h>
62
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/if_inarp.h>
69 #endif
70
71 #ifdef NS
72 #include <netns/ns.h>
73 #include <netns/ns_if.h>
74 #endif
75
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #include <net/bpfdesc.h>
79 #endif
80
81 #include <machine/cpu.h>
82 #include <machine/bus.h>
83 #include <machine/intr.h>
84
85 #if BYTE_ORDER == BIG_ENDIAN
86 #include <machine/bswap.h>
87 #define htopci(x) bswap32(x)
88 #define pcitoh(x) bswap32(x)
89 #else
90 #define htopci(x) (x)
91 #define pcitoh(x) (x)
92 #endif
93
94 #include <vm/vm.h>
95 #include <vm/pmap.h>
96
97 #include <dev/mii/miivar.h>
98 #include <dev/mii/mii.h>
99
100 #include <dev/ic/elink3reg.h>
101 /* #include <dev/ic/elink3var.h> */
102 #include <dev/ic/elinkxlreg.h>
103 #include <dev/ic/elinkxlvar.h>
104
105 #ifdef DEBUG
106 int exdebug = 0;
107 #endif
108
109 /* ifmedia callbacks */
110 int ex_media_chg __P((struct ifnet *ifp));
111 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
112
113 void ex_probe_media __P((struct ex_softc *));
114 void ex_set_filter __P((struct ex_softc *));
115 void ex_set_media __P((struct ex_softc *));
116 struct mbuf *ex_get __P((struct ex_softc *, int));
117 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
118 void ex_init __P((struct ex_softc *));
119 void ex_read __P((struct ex_softc *));
120 void ex_reset __P((struct ex_softc *));
121 void ex_set_mc __P((struct ex_softc *));
122 void ex_getstats __P((struct ex_softc *));
123 void ex_printstats __P((struct ex_softc *));
124 void ex_tick __P((void *));
125
126 static int ex_eeprom_busy __P((struct ex_softc *));
127 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
128 static void ex_init_txdescs __P((struct ex_softc *));
129
130 static void ex_shutdown __P((void *));
131 static void ex_start __P((struct ifnet *));
132 static void ex_txstat __P((struct ex_softc *));
133 static u_int16_t ex_mchash __P((u_char *));
134 static void ex_mii_writebits __P((struct ex_softc *, u_int, int));
135
136 void ex_mii_setbit __P((void *, u_int16_t));
137 void ex_mii_clrbit __P((void *, u_int16_t));
138 u_int16_t ex_mii_readbit __P((void *, u_int16_t));
139 int ex_mii_readreg __P((struct device *, int, int));
140 void ex_mii_writereg __P((struct device *, int, int, int));
141 void ex_mii_statchg __P((struct device *));
142
143 void ex_probemedia __P((struct ex_softc *));
144
145 /*
146 * Structure to map media-present bits in boards to ifmedia codes and
147 * printable media names. Used for table-driven ifmedia initialization.
148 */
149 struct ex_media {
150 int exm_mpbit; /* media present bit */
151 const char *exm_name; /* name of medium */
152 int exm_ifmedia; /* ifmedia word for medium */
153 int exm_epmedia; /* ELINKMEDIA_* constant */
154 };
155
156 /*
157 * Media table for 3c90x chips. Note that chips with MII have no
158 * `native' media.
159 */
160 struct ex_media ex_native_media[] = {
161 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
162 ELINKMEDIA_10BASE_T },
163 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
164 ELINKMEDIA_10BASE_T },
165 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
166 ELINKMEDIA_AUI },
167 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
168 ELINKMEDIA_10BASE_2 },
169 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
170 ELINKMEDIA_100BASE_TX },
171 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
172 ELINKMEDIA_100BASE_TX },
173 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
174 ELINKMEDIA_100BASE_FX },
175 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
176 ELINKMEDIA_MII },
177 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
178 ELINKMEDIA_100BASE_T4 },
179 { 0, NULL, 0,
180 0 },
181 };
182
183 /*
184 * Back-end attach and configure.
185 */
186 void
187 ex_config(sc)
188 struct ex_softc *sc;
189 {
190 struct ifnet *ifp;
191 u_int16_t val;
192 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
193 bus_space_tag_t iot = sc->sc_iot;
194 bus_space_handle_t ioh = sc->sc_ioh;
195 bus_dma_segment_t useg, dseg;
196 int urseg, drseg, i, error, attach_stage;
197
198 ex_reset(sc);
199
200 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
201 macaddr[0] = val >> 8;
202 macaddr[1] = val & 0xff;
203 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
204 macaddr[2] = val >> 8;
205 macaddr[3] = val & 0xff;
206 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
207 macaddr[4] = val >> 8;
208 macaddr[5] = val & 0xff;
209
210 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
211 ether_sprintf(macaddr));
212
213 if (sc->intr_ack) { /* 3C575BTX specific */
214 GO_WINDOW(2);
215 bus_space_write_2(sc->sc_iot, ioh, 12, 0x10|bus_space_read_2(sc->sc_iot, ioh, 12));
216 }
217
218 attach_stage = 0;
219
220 /*
221 * Allocate the upload descriptors, and create and load the DMA
222 * map for them.
223 */
224 if ((error = bus_dmamem_alloc(sc->sc_dmat,
225 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &useg, 1, &urseg,
226 BUS_DMA_NOWAIT)) != 0) {
227 printf("%s: can't allocate upload descriptors, error = %d\n",
228 sc->sc_dev.dv_xname, error);
229 goto fail;
230 }
231
232 attach_stage = 1;
233
234 if ((error = bus_dmamem_map(sc->sc_dmat, &useg, urseg,
235 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
236 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
237 printf("%s: can't map upload descriptors, error = %d\n",
238 sc->sc_dev.dv_xname, error);
239 goto fail;
240 }
241
242 attach_stage = 2;
243
244 if ((error = bus_dmamap_create(sc->sc_dmat,
245 EX_NUPD * sizeof (struct ex_upd), 1,
246 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
247 &sc->sc_upd_dmamap)) != 0) {
248 printf("%s: can't create upload desc. DMA map, error = %d\n",
249 sc->sc_dev.dv_xname, error);
250 goto fail;
251 }
252
253 attach_stage = 3;
254
255 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
256 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
257 BUS_DMA_NOWAIT)) != 0) {
258 printf("%s: can't load upload desc. DMA map, error = %d\n",
259 sc->sc_dev.dv_xname, error);
260 goto fail;
261 }
262
263 attach_stage = 4;
264
265 /*
266 * Allocate the download descriptors, and create and load the DMA
267 * map for them.
268 */
269 if ((error = bus_dmamem_alloc(sc->sc_dmat,
270 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &dseg, 1, &drseg,
271 BUS_DMA_NOWAIT)) != 0) {
272 printf("%s: can't allocate download descriptors, error = %d\n",
273 sc->sc_dev.dv_xname, error);
274 goto fail;
275 }
276
277 attach_stage = 5;
278
279 if ((error = bus_dmamem_map(sc->sc_dmat, &dseg, drseg,
280 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
281 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
282 printf("%s: can't map download descriptors, error = %d\n",
283 sc->sc_dev.dv_xname, error);
284 goto fail;
285 }
286 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
287
288 attach_stage = 6;
289
290 if ((error = bus_dmamap_create(sc->sc_dmat,
291 EX_NDPD * sizeof (struct ex_dpd), 1,
292 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
293 &sc->sc_dpd_dmamap)) != 0) {
294 printf("%s: can't create download desc. DMA map, error = %d\n",
295 sc->sc_dev.dv_xname, error);
296 goto fail;
297 }
298
299 attach_stage = 7;
300
301 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
302 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
303 BUS_DMA_NOWAIT)) != 0) {
304 printf("%s: can't load download desc. DMA map, error = %d\n",
305 sc->sc_dev.dv_xname, error);
306 goto fail;
307 }
308
309 attach_stage = 8;
310
311
312 /*
313 * Create the transmit buffer DMA maps.
314 */
315 for (i = 0; i < EX_NDPD; i++) {
316 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
317 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
318 &sc->sc_tx_dmamaps[i])) != 0) {
319 printf("%s: can't create tx DMA map %d, error = %d\n",
320 sc->sc_dev.dv_xname, i, error);
321 goto fail;
322 }
323 }
324
325 attach_stage = 9;
326
327 /*
328 * Create the receive buffer DMA maps.
329 */
330 for (i = 0; i < EX_NUPD; i++) {
331 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
332 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
333 &sc->sc_rx_dmamaps[i])) != 0) {
334 printf("%s: can't create rx DMA map %d, error = %d\n",
335 sc->sc_dev.dv_xname, i, error);
336 goto fail;
337 }
338 }
339
340 attach_stage = 10;
341
342 /*
343 * Create ring of upload descriptors, only once. The DMA engine
344 * will loop over this when receiving packets, stalling if it
345 * hits an UPD with a finished receive.
346 */
347 for (i = 0; i < EX_NUPD; i++) {
348 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
349 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
350 sc->sc_upd[i].upd_frags[0].fr_len =
351 htopci((MCLBYTES - 2) | EX_FR_LAST);
352 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
353 printf("%s: can't allocate or map rx buffers\n",
354 sc->sc_dev.dv_xname);
355 goto fail;
356 }
357 }
358
359 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
360 EX_NUPD * sizeof (struct ex_upd),
361 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
362
363 ex_init_txdescs(sc);
364
365 attach_stage = 11;
366
367
368 GO_WINDOW(3);
369 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
370 if (val & ELINK_MEDIACAP_MII)
371 sc->ex_conf |= EX_CONF_MII;
372
373 ifp = &sc->sc_ethercom.ec_if;
374
375 /*
376 * Initialize our media structures and MII info. We'll
377 * probe the MII if we discover that we have one.
378 */
379 sc->ex_mii.mii_ifp = ifp;
380 sc->ex_mii.mii_readreg = ex_mii_readreg;
381 sc->ex_mii.mii_writereg = ex_mii_writereg;
382 sc->ex_mii.mii_statchg = ex_mii_statchg;
383 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
384 ex_media_stat);
385
386 if (sc->ex_conf & EX_CONF_MII) {
387 /*
388 * Find PHY, extract media information from it.
389 * First, select the right transceiver.
390 */
391 u_int32_t icfg;
392
393 GO_WINDOW(3);
394 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
395 icfg &= ~(CONFIG_XCVR_SEL << 16);
396 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
397 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
398 if (val & ELINK_MEDIACAP_100BASETX)
399 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
400 if (val & ELINK_MEDIACAP_100BASEFX)
401 icfg |= ELINKMEDIA_100BASE_FX
402 << (CONFIG_XCVR_SEL_SHIFT + 16);
403 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
404
405 mii_phy_probe(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
406 MII_PHY_ANY, MII_OFFSET_ANY);
407 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
408 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
409 0, NULL);
410 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
411 } else {
412 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
413 }
414 } else
415 ex_probemedia(sc);
416
417 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
418 ifp->if_softc = sc;
419 ifp->if_start = ex_start;
420 ifp->if_ioctl = ex_ioctl;
421 ifp->if_watchdog = ex_watchdog;
422 ifp->if_flags =
423 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
424
425 if_attach(ifp);
426 ether_ifattach(ifp, macaddr);
427
428 GO_WINDOW(1);
429
430 sc->tx_start_thresh = 20;
431 sc->tx_succ_ok = 0;
432
433 /* TODO: set queues to 0 */
434
435 #if NBPFILTER > 0
436 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
437 sizeof(struct ether_header));
438 #endif
439
440 #if NRND > 0
441 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
442 RND_TYPE_NET, 0);
443 #endif
444
445 /* Establish callback to reset card when we reboot. */
446 shutdownhook_establish(ex_shutdown, sc);
447 return;
448
449 fail:
450 /*
451 * Free any resources we've allocated during the failed attach
452 * attempt. Do this in reverse order and fall though.
453 */
454 switch (attach_stage) {
455 case 11:
456 {
457 struct ex_rxdesc *rxd;
458
459 for (i = 0; i < EX_NUPD; i++) {
460 rxd = &sc->sc_rxdescs[i];
461 if (rxd->rx_mbhead != NULL) {
462 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
463 m_freem(rxd->rx_mbhead);
464 }
465 }
466 }
467 /* FALLTHROUGH */
468
469 case 10:
470 for (i = 0; i < EX_NUPD; i++)
471 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
472 /* FALLTHROUGH */
473
474 case 9:
475 for (i = 0; i < EX_NDPD; i++)
476 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
477 /* FALLTHROUGH */
478 case 8:
479 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
480 /* FALLTHROUGH */
481
482 case 7:
483 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
484 /* FALLTHROUGH */
485
486 case 6:
487 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
488 EX_NDPD * sizeof (struct ex_dpd));
489 /* FALLTHROUGH */
490
491 case 5:
492 bus_dmamem_free(sc->sc_dmat, &dseg, drseg);
493 break;
494
495 case 4:
496 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
497 /* FALLTHROUGH */
498
499 case 3:
500 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
501 /* FALLTHROUGH */
502
503 case 2:
504 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
505 EX_NUPD * sizeof (struct ex_upd));
506 /* FALLTHROUGH */
507
508 case 1:
509 bus_dmamem_free(sc->sc_dmat, &useg, urseg);
510 break;
511 }
512
513 }
514
515 /*
516 * Find the media present on non-MII chips.
517 */
518 void
519 ex_probemedia(sc)
520 struct ex_softc *sc;
521 {
522 bus_space_tag_t iot = sc->sc_iot;
523 bus_space_handle_t ioh = sc->sc_ioh;
524 struct ifmedia *ifm = &sc->ex_mii.mii_media;
525 struct ex_media *exm;
526 u_int16_t config1, reset_options, default_media;
527 int defmedia = 0;
528 const char *sep = "", *defmedianame = NULL;
529
530 GO_WINDOW(3);
531 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
532 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
533 GO_WINDOW(0);
534
535 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
536
537 printf("%s: ", sc->sc_dev.dv_xname);
538
539 /* Sanity check that there are any media! */
540 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
541 printf("no media present!\n");
542 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
543 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
544 return;
545 }
546
547 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
548
549 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
550 if (reset_options & exm->exm_mpbit) {
551 /*
552 * Default media is a little complicated. We
553 * support full-duplex which uses the same
554 * reset options bit.
555 *
556 * XXX Check EEPROM for default to FDX?
557 */
558 if (exm->exm_epmedia == default_media) {
559 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
560 defmedia = exm->exm_ifmedia;
561 defmedianame = exm->exm_name;
562 }
563 } else if (defmedia == 0) {
564 defmedia = exm->exm_ifmedia;
565 defmedianame = exm->exm_name;
566 }
567 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
568 NULL);
569 PRINT(exm->exm_name);
570 }
571 }
572
573 #undef PRINT
574
575 #ifdef DIAGNOSTIC
576 if (defmedia == 0)
577 panic("ex_probemedia: impossible");
578 #endif
579
580 printf(", default %s\n", defmedianame);
581 ifmedia_set(ifm, defmedia);
582 }
583
584 /*
585 * Bring device up.
586 */
587 void
588 ex_init(sc)
589 struct ex_softc *sc;
590 {
591 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
592 bus_space_tag_t iot = sc->sc_iot;
593 bus_space_handle_t ioh = sc->sc_ioh;
594 int s, i;
595
596 s = splnet();
597
598 ex_waitcmd(sc);
599 ex_stop(sc);
600
601 /*
602 * Set the station address and clear the station mask. The latter
603 * is needed for 90x cards, 0 is the default for 90xB cards.
604 */
605 GO_WINDOW(2);
606 for (i = 0; i < ETHER_ADDR_LEN; i++) {
607 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
608 LLADDR(ifp->if_sadl)[i]);
609 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
610 }
611
612 GO_WINDOW(3);
613
614 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
615 ex_waitcmd(sc);
616 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
617 ex_waitcmd(sc);
618
619 /*
620 * Disable reclaim threshold for 90xB, set free threshold to
621 * 6 * 256 = 1536 for 90x.
622 */
623 if (sc->ex_conf & EX_CONF_90XB)
624 bus_space_write_2(iot, ioh, ELINK_COMMAND,
625 ELINK_TXRECLTHRESH | 255);
626 else
627 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
628
629 bus_space_write_2(iot, ioh, ELINK_COMMAND,
630 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
631
632 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
633 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
634
635 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
636 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
637
638 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
639 if (sc->intr_ack)
640 (* sc->intr_ack)(sc);
641 ex_set_media(sc);
642 ex_set_mc(sc);
643
644
645 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
646 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
647 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
648 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
649 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
650
651 ifp->if_flags |= IFF_RUNNING;
652 ifp->if_flags &= ~IFF_OACTIVE;
653 ex_start(ifp);
654
655 GO_WINDOW(1);
656
657 splx(s);
658
659 timeout(ex_tick, sc, hz);
660 }
661
662 /*
663 * Multicast hash filter according to the 3Com spec.
664 */
665 static u_int16_t
666 ex_mchash(addr)
667 u_char *addr;
668 {
669 u_int32_t crc, carry;
670 int i, j;
671 u_char c;
672
673 /* Compute CRC for the address value. */
674 crc = 0xffffffff; /* initial value */
675
676 for (i = 0; i < 6; i++) {
677 c = addr[i];
678 for (j = 0; j < 8; j++) {
679 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
680 crc <<= 1;
681 c >>= 1;
682 if (carry)
683 crc = (crc ^ 0x04c11db6) | carry;
684 }
685 }
686
687 /* Return the filter bit position. */
688 return(crc & 0x000000ff);
689 }
690
691
692 /*
693 * Set multicast receive filter. Also take care of promiscuous mode
694 * here (XXX).
695 */
696 void
697 ex_set_mc(sc)
698 register struct ex_softc *sc;
699 {
700 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
701 struct ethercom *ec = &sc->sc_ethercom;
702 struct ether_multi *enm;
703 struct ether_multistep estep;
704 int i;
705 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
706
707 if (ifp->if_flags & IFF_PROMISC)
708 mask |= FIL_PROMISC;
709
710 if (!(ifp->if_flags & IFF_MULTICAST))
711 goto out;
712
713 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
714 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
715 } else {
716 ETHER_FIRST_MULTI(estep, ec, enm);
717 while (enm != NULL) {
718 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
719 ETHER_ADDR_LEN) != 0)
720 goto out;
721 i = ex_mchash(enm->enm_addrlo);
722 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
723 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
724 ETHER_NEXT_MULTI(estep, enm);
725 }
726 mask |= FIL_MULTIHASH;
727 }
728 out:
729 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
730 SET_RX_FILTER | mask);
731 }
732
733
734 static void
735 ex_txstat(sc)
736 struct ex_softc *sc;
737 {
738 bus_space_tag_t iot = sc->sc_iot;
739 bus_space_handle_t ioh = sc->sc_ioh;
740 int i;
741
742 /*
743 * We need to read+write TX_STATUS until we get a 0 status
744 * in order to turn off the interrupt flag.
745 */
746 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
747 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
748
749 if (i & TXS_JABBER) {
750 ++sc->sc_ethercom.ec_if.if_oerrors;
751 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
752 printf("%s: jabber (%x)\n",
753 sc->sc_dev.dv_xname, i);
754 ex_init(sc);
755 /* TODO: be more subtle here */
756 } else if (i & TXS_UNDERRUN) {
757 ++sc->sc_ethercom.ec_if.if_oerrors;
758 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
759 printf("%s: fifo underrun (%x) @%d\n",
760 sc->sc_dev.dv_xname, i,
761 sc->tx_start_thresh);
762 if (sc->tx_succ_ok < 100)
763 sc->tx_start_thresh = min(ETHER_MAX_LEN,
764 sc->tx_start_thresh + 20);
765 sc->tx_succ_ok = 0;
766 ex_init(sc);
767 /* TODO: be more subtle here */
768 } else if (i & TXS_MAX_COLLISION) {
769 ++sc->sc_ethercom.ec_if.if_collisions;
770 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
771 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
772 } else
773 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
774 }
775 }
776
777 int
778 ex_media_chg(ifp)
779 struct ifnet *ifp;
780 {
781 struct ex_softc *sc = ifp->if_softc;
782
783 if (ifp->if_flags & IFF_UP)
784 ex_init(sc);
785 return 0;
786 }
787
788 void
789 ex_set_media(sc)
790 struct ex_softc *sc;
791 {
792 bus_space_tag_t iot = sc->sc_iot;
793 bus_space_handle_t ioh = sc->sc_ioh;
794 int config0, config1;
795
796 if (((sc->ex_conf & EX_CONF_MII) &&
797 (sc->ex_mii.mii_media_active & IFM_FDX))
798 || (!(sc->ex_conf & EX_CONF_MII) &&
799 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
800 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
801 MAC_CONTROL_FDX);
802 } else {
803 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
804 }
805
806 /*
807 * If the device has MII, select it, and then tell the
808 * PHY which media to use.
809 */
810 if (sc->ex_conf & EX_CONF_MII) {
811 GO_WINDOW(3);
812
813 config0 = (u_int)bus_space_read_2(iot, ioh,
814 ELINK_W3_INTERNAL_CONFIG);
815 config1 = (u_int)bus_space_read_2(iot, ioh,
816 ELINK_W3_INTERNAL_CONFIG + 2);
817
818 config1 = config1 & ~CONFIG_MEDIAMASK;
819 config1 |= (ELINKMEDIA_MII << CONFIG_MEDIAMASK_SHIFT);
820
821 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
822 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
823 mii_mediachg(&sc->ex_mii);
824 return;
825 }
826
827 GO_WINDOW(4);
828 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
829 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
830 delay(800);
831
832 /*
833 * Now turn on the selected media/transceiver.
834 */
835 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
836 case IFM_10_T:
837 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
838 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
839 break;
840
841 case IFM_10_2:
842 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
843 DELAY(800);
844 break;
845
846 case IFM_100_TX:
847 case IFM_100_FX:
848 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
849 LINKBEAT_ENABLE);
850 DELAY(800);
851 break;
852
853 case IFM_10_5:
854 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
855 SQE_ENABLE);
856 DELAY(800);
857 break;
858
859 case IFM_MANUAL:
860 break;
861
862 case IFM_NONE:
863 return;
864
865 default:
866 panic("ex_set_media: impossible");
867 }
868
869 GO_WINDOW(3);
870 config0 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
871 config1 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
872
873 config1 = config1 & ~CONFIG_MEDIAMASK;
874 config1 |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
875 CONFIG_MEDIAMASK_SHIFT);
876
877 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
878 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
879 }
880
881 /*
882 * Get currently-selected media from card.
883 * (if_media callback, may be called before interface is brought up).
884 */
885 void
886 ex_media_stat(ifp, req)
887 struct ifnet *ifp;
888 struct ifmediareq *req;
889 {
890 struct ex_softc *sc = ifp->if_softc;
891
892 if (sc->ex_conf & EX_CONF_MII) {
893 mii_pollstat(&sc->ex_mii);
894 req->ifm_status = sc->ex_mii.mii_media_status;
895 req->ifm_active = sc->ex_mii.mii_media_active;
896 } else {
897 GO_WINDOW(4);
898 req->ifm_status = IFM_AVALID;
899 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
900 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
901 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
902 req->ifm_status |= IFM_ACTIVE;
903 GO_WINDOW(1);
904 }
905 }
906
907
908
909 /*
910 * Start outputting on the interface.
911 */
912 static void
913 ex_start(ifp)
914 struct ifnet *ifp;
915 {
916 struct ex_softc *sc = ifp->if_softc;
917 bus_space_tag_t iot = sc->sc_iot;
918 bus_space_handle_t ioh = sc->sc_ioh;
919 volatile struct ex_fraghdr *fr = NULL;
920 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
921 struct ex_txdesc *txp;
922 bus_dmamap_t dmamap;
923 int offset, totlen;
924
925 if (sc->tx_head || sc->tx_free == NULL)
926 return;
927
928 txp = NULL;
929
930 /*
931 * We're finished if there is nothing more to add to the list or if
932 * we're all filled up with buffers to transmit.
933 */
934 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
935 struct mbuf *mb_head;
936 int segment, error;
937
938 /*
939 * Grab a packet to transmit.
940 */
941 IF_DEQUEUE(&ifp->if_snd, mb_head);
942
943 /*
944 * Get pointer to next available tx desc.
945 */
946 txp = sc->tx_free;
947 sc->tx_free = txp->tx_next;
948 txp->tx_next = NULL;
949 dmamap = txp->tx_dmamap;
950
951 /*
952 * Go through each of the mbufs in the chain and initialize
953 * the transmit buffer descriptors with the physical address
954 * and size of the mbuf.
955 */
956 reload:
957 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
958 mb_head, BUS_DMA_NOWAIT);
959 switch (error) {
960 case 0:
961 /* Success. */
962 break;
963
964 case EFBIG:
965 {
966 struct mbuf *mn;
967
968 /*
969 * We ran out of segments. We have to recopy this
970 * mbuf chain first. Bail out if we can't get the
971 * new buffers.
972 */
973 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
974
975 MGETHDR(mn, M_DONTWAIT, MT_DATA);
976 if (mn == NULL) {
977 m_freem(mb_head);
978 printf("aborting\n");
979 goto out;
980 }
981 if (mb_head->m_pkthdr.len > MHLEN) {
982 MCLGET(mn, M_DONTWAIT);
983 if ((mn->m_flags & M_EXT) == 0) {
984 m_freem(mn);
985 m_freem(mb_head);
986 printf("aborting\n");
987 goto out;
988 }
989 }
990 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
991 mtod(mn, caddr_t));
992 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
993 m_freem(mb_head);
994 mb_head = mn;
995 printf("retrying\n");
996 goto reload;
997 }
998
999 default:
1000 /*
1001 * Some other problem; report it.
1002 */
1003 printf("%s: can't load mbuf chain, error = %d\n",
1004 sc->sc_dev.dv_xname, error);
1005 m_freem(mb_head);
1006 goto out;
1007 }
1008
1009 fr = &txp->tx_dpd->dpd_frags[0];
1010 totlen = 0;
1011 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1012 fr->fr_addr = htopci(dmamap->dm_segs[segment].ds_addr);
1013 fr->fr_len = htopci(dmamap->dm_segs[segment].ds_len);
1014 totlen += dmamap->dm_segs[segment].ds_len;
1015 }
1016 fr--;
1017 fr->fr_len |= htopci(EX_FR_LAST);
1018 txp->tx_mbhead = mb_head;
1019
1020 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1021 BUS_DMASYNC_PREWRITE);
1022
1023 dpd = txp->tx_dpd;
1024 dpd->dpd_nextptr = 0;
1025 dpd->dpd_fsh = htopci(totlen);
1026
1027 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1028 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1029 sizeof (struct ex_dpd),
1030 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1031
1032 /*
1033 * No need to stall the download engine, we know it's
1034 * not busy right now.
1035 *
1036 * Fix up pointers in both the "soft" tx and the physical
1037 * tx list.
1038 */
1039 if (sc->tx_head != NULL) {
1040 prevdpd = sc->tx_tail->tx_dpd;
1041 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1042 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1043 offset, sizeof (struct ex_dpd),
1044 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1045 prevdpd->dpd_nextptr = htopci(DPD_DMADDR(sc, txp));
1046 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1047 offset, sizeof (struct ex_dpd),
1048 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1049 sc->tx_tail->tx_next = txp;
1050 sc->tx_tail = txp;
1051 } else {
1052 sc->tx_tail = sc->tx_head = txp;
1053 }
1054
1055 #if NBPFILTER > 0
1056 /*
1057 * Pass packet to bpf if there is a listener.
1058 */
1059 if (ifp->if_bpf)
1060 bpf_mtap(ifp->if_bpf, mb_head);
1061 #endif
1062 }
1063 out:
1064 if (sc->tx_head) {
1065 sc->tx_tail->tx_dpd->dpd_fsh |= htopci(EX_DPD_DNIND);
1066 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1067 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1068 sizeof (struct ex_dpd),
1069 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1070 ifp->if_flags |= IFF_OACTIVE;
1071 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1072 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1073 DPD_DMADDR(sc, sc->tx_head));
1074
1075 /* trigger watchdog */
1076 ifp->if_timer = 5;
1077 }
1078 }
1079
1080
1081 int
1082 ex_intr(arg)
1083 void *arg;
1084 {
1085 struct ex_softc *sc = arg;
1086 bus_space_tag_t iot = sc->sc_iot;
1087 bus_space_handle_t ioh = sc->sc_ioh;
1088 u_int16_t stat;
1089 int ret = 0;
1090 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1091
1092 if (sc->enabled == 0) {
1093 return ret;
1094 }
1095 for (;;) {
1096 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1097 if (!(stat & S_MASK))
1098 break;
1099 /*
1100 * Acknowledge interrupts.
1101 */
1102 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1103 (stat & S_MASK));
1104 if (sc->intr_ack)
1105 (*sc->intr_ack)(sc);
1106 ret = 1;
1107 if (stat & S_HOST_ERROR) {
1108 printf("%s: adapter failure (%x)\n",
1109 sc->sc_dev.dv_xname, stat);
1110 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1111 C_INTR_LATCH);
1112 ex_reset(sc);
1113 ex_init(sc);
1114 return 1;
1115 }
1116 if (stat & S_TX_COMPLETE) {
1117 ex_txstat(sc);
1118 }
1119 if (stat & S_UPD_STATS) {
1120 ex_getstats(sc);
1121 }
1122 if (stat & S_DN_COMPLETE) {
1123 struct ex_txdesc *txp, *ptxp = NULL;
1124 bus_dmamap_t txmap;
1125
1126 /* reset watchdog timer, was set in ex_start() */
1127 ifp->if_timer = 0;
1128
1129 for (txp = sc->tx_head; txp != NULL;
1130 txp = txp->tx_next) {
1131 bus_dmamap_sync(sc->sc_dmat,
1132 sc->sc_dpd_dmamap,
1133 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1134 sizeof (struct ex_dpd),
1135 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1136 if (txp->tx_mbhead != NULL) {
1137 txmap = txp->tx_dmamap;
1138 bus_dmamap_sync(sc->sc_dmat, txmap,
1139 0, txmap->dm_mapsize,
1140 BUS_DMASYNC_POSTWRITE);
1141 bus_dmamap_unload(sc->sc_dmat, txmap);
1142 m_freem(txp->tx_mbhead);
1143 txp->tx_mbhead = NULL;
1144 }
1145 ptxp = txp;
1146 }
1147
1148 /*
1149 * Move finished tx buffers back to the tx free list.
1150 */
1151 if (sc->tx_free) {
1152 sc->tx_ftail->tx_next = sc->tx_head;
1153 sc->tx_ftail = ptxp;
1154 } else
1155 sc->tx_ftail = sc->tx_free = sc->tx_head;
1156
1157 sc->tx_head = sc->tx_tail = NULL;
1158 ifp->if_flags &= ~IFF_OACTIVE;
1159 }
1160
1161 if (stat & S_UP_COMPLETE) {
1162 struct ex_rxdesc *rxd;
1163 struct mbuf *m;
1164 struct ex_upd *upd;
1165 bus_dmamap_t rxmap;
1166 u_int32_t pktstat;
1167
1168 rcvloop:
1169 rxd = sc->rx_head;
1170 rxmap = rxd->rx_dmamap;
1171 m = rxd->rx_mbhead;
1172 upd = rxd->rx_upd;
1173 pktstat = pcitoh(upd->upd_pktstatus);
1174
1175 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1176 rxmap->dm_mapsize,
1177 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1178 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1179 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1180 sizeof (struct ex_upd),
1181 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1182
1183 if (pktstat & EX_UPD_COMPLETE) {
1184 /*
1185 * Remove first packet from the chain.
1186 */
1187 sc->rx_head = rxd->rx_next;
1188 rxd->rx_next = NULL;
1189
1190 /*
1191 * Add a new buffer to the receive chain.
1192 * If this fails, the old buffer is recycled
1193 * instead.
1194 */
1195 if (ex_add_rxbuf(sc, rxd) == 0) {
1196 struct ether_header *eh;
1197 u_int16_t total_len;
1198
1199
1200 if (pktstat & EX_UPD_ERR) {
1201 ifp->if_ierrors++;
1202 m_freem(m);
1203 goto rcvloop;
1204 }
1205
1206 total_len = pktstat & EX_UPD_PKTLENMASK;
1207 if (total_len <
1208 sizeof(struct ether_header)) {
1209 m_freem(m);
1210 goto rcvloop;
1211 }
1212 m->m_pkthdr.rcvif = ifp;
1213 m->m_pkthdr.len = m->m_len = total_len;
1214 eh = mtod(m, struct ether_header *);
1215 #if NBPFILTER > 0
1216 if (ifp->if_bpf) {
1217 bpf_tap(ifp->if_bpf,
1218 mtod(m, caddr_t),
1219 total_len);
1220 /*
1221 * Only pass this packet up
1222 * if it is for us.
1223 */
1224 if ((ifp->if_flags &
1225 IFF_PROMISC) &&
1226 (eh->ether_dhost[0] & 1)
1227 == 0 &&
1228 bcmp(eh->ether_dhost,
1229 LLADDR(ifp->if_sadl),
1230 sizeof(eh->ether_dhost))
1231 != 0) {
1232 m_freem(m);
1233 goto rcvloop;
1234 }
1235 }
1236 #endif /* NBPFILTER > 0 */
1237 (*ifp->if_input)(ifp, m);
1238 }
1239 goto rcvloop;
1240 }
1241 /*
1242 * Just in case we filled up all UPDs and the DMA engine
1243 * stalled. We could be more subtle about this.
1244 */
1245 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1246 printf("%s: uplistptr was 0\n",
1247 sc->sc_dev.dv_xname);
1248 ex_init(sc);
1249 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1250 & 0x2000) {
1251 printf("%s: receive stalled\n",
1252 sc->sc_dev.dv_xname);
1253 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1254 ELINK_UPUNSTALL);
1255 }
1256 }
1257 }
1258 if (ret) {
1259 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1260 if (ifp->if_snd.ifq_head != NULL)
1261 ex_start(ifp);
1262 }
1263 return ret;
1264 }
1265
1266 int
1267 ex_ioctl(ifp, cmd, data)
1268 register struct ifnet *ifp;
1269 u_long cmd;
1270 caddr_t data;
1271 {
1272 struct ex_softc *sc = ifp->if_softc;
1273 struct ifaddr *ifa = (struct ifaddr *)data;
1274 struct ifreq *ifr = (struct ifreq *)data;
1275 int s, error = 0;
1276
1277 s = splnet();
1278
1279 switch (cmd) {
1280
1281 case SIOCSIFADDR:
1282 ifp->if_flags |= IFF_UP;
1283 switch (ifa->ifa_addr->sa_family) {
1284 #ifdef INET
1285 case AF_INET:
1286 ex_init(sc);
1287 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1288 break;
1289 #endif
1290 #ifdef NS
1291 case AF_NS:
1292 {
1293 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1294
1295 if (ns_nullhost(*ina))
1296 ina->x_host = *(union ns_host *)
1297 LLADDR(ifp->if_sadl);
1298 else
1299 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1300 ifp->if_addrlen);
1301 /* Set new address. */
1302 ex_init(sc);
1303 break;
1304 }
1305 #endif
1306 default:
1307 ex_init(sc);
1308 break;
1309 }
1310 break;
1311 case SIOCSIFMEDIA:
1312 case SIOCGIFMEDIA:
1313 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1314 break;
1315
1316 case SIOCSIFFLAGS:
1317 if ((ifp->if_flags & IFF_UP) == 0 &&
1318 (ifp->if_flags & IFF_RUNNING) != 0) {
1319 /*
1320 * If interface is marked down and it is running, then
1321 * stop it.
1322 */
1323 ex_stop(sc);
1324 ifp->if_flags &= ~IFF_RUNNING;
1325 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1326 (ifp->if_flags & IFF_RUNNING) == 0) {
1327 /*
1328 * If interface is marked up and it is stopped, then
1329 * start it.
1330 */
1331 ex_init(sc);
1332 } else if ((ifp->if_flags & IFF_UP) != 0) {
1333 /*
1334 * Deal with other flags that change hardware
1335 * state, i.e. IFF_PROMISC.
1336 */
1337 ex_set_mc(sc);
1338 }
1339 break;
1340
1341 case SIOCADDMULTI:
1342 case SIOCDELMULTI:
1343 error = (cmd == SIOCADDMULTI) ?
1344 ether_addmulti(ifr, &sc->sc_ethercom) :
1345 ether_delmulti(ifr, &sc->sc_ethercom);
1346
1347 if (error == ENETRESET) {
1348 /*
1349 * Multicast list has changed; set the hardware filter
1350 * accordingly.
1351 */
1352 ex_set_mc(sc);
1353 error = 0;
1354 }
1355 break;
1356
1357 default:
1358 error = EINVAL;
1359 break;
1360 }
1361
1362 splx(s);
1363 return (error);
1364 }
1365
1366 void
1367 ex_getstats(sc)
1368 struct ex_softc *sc;
1369 {
1370 bus_space_handle_t ioh = sc->sc_ioh;
1371 bus_space_tag_t iot = sc->sc_iot;
1372 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1373 u_int8_t upperok;
1374
1375 GO_WINDOW(6);
1376 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1377 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1378 ifp->if_ipackets += (upperok & 0x03) << 8;
1379 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1380 ifp->if_opackets += (upperok & 0x30) << 4;
1381 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1382 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1383 /*
1384 * There seems to be no way to get the exact number of collisions,
1385 * this is the number that occured at the very least.
1386 */
1387 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1388 TX_AFTER_X_COLLISIONS);
1389 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1390 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1391
1392 /*
1393 * Clear the following to avoid stats overflow interrupts
1394 */
1395 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1396 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1397 bus_space_read_1(iot, ioh, TX_NO_SQE);
1398 bus_space_read_1(iot, ioh, TX_CD_LOST);
1399 GO_WINDOW(4);
1400 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1401 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1402 ifp->if_ibytes += (upperok & 0x0f) << 16;
1403 ifp->if_obytes += (upperok & 0xf0) << 12;
1404 GO_WINDOW(1);
1405 }
1406
1407 void
1408 ex_printstats(sc)
1409 struct ex_softc *sc;
1410 {
1411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1412
1413 ex_getstats(sc);
1414 printf("in %ld out %ld ierror %ld oerror %ld ibytes %ld obytes %ld\n",
1415 ifp->if_ipackets, ifp->if_opackets, ifp->if_ierrors,
1416 ifp->if_oerrors, ifp->if_ibytes, ifp->if_obytes);
1417 }
1418
1419 void
1420 ex_tick(arg)
1421 void *arg;
1422 {
1423 struct ex_softc *sc = arg;
1424 int s = splnet();
1425
1426 if (sc->ex_conf & EX_CONF_MII)
1427 mii_tick(&sc->ex_mii);
1428
1429 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1430 & S_COMMAND_IN_PROGRESS))
1431 ex_getstats(sc);
1432
1433 splx(s);
1434
1435 timeout(ex_tick, sc, hz);
1436 }
1437
1438
1439 void
1440 ex_reset(sc)
1441 struct ex_softc *sc;
1442 {
1443 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, GLOBAL_RESET);
1444 delay(400);
1445 ex_waitcmd(sc);
1446 }
1447
1448 void
1449 ex_watchdog(ifp)
1450 struct ifnet *ifp;
1451 {
1452 struct ex_softc *sc = ifp->if_softc;
1453
1454 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1455 ++sc->sc_ethercom.ec_if.if_oerrors;
1456
1457 ex_reset(sc);
1458 ex_init(sc);
1459 }
1460
1461 void
1462 ex_stop(sc)
1463 struct ex_softc *sc;
1464 {
1465 bus_space_tag_t iot = sc->sc_iot;
1466 bus_space_handle_t ioh = sc->sc_ioh;
1467 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1468 struct ex_txdesc *tx;
1469 struct ex_rxdesc *rx;
1470 int i;
1471
1472 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1473 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1474 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1475
1476 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1477 if (tx->tx_mbhead == NULL)
1478 continue;
1479 m_freem(tx->tx_mbhead);
1480 tx->tx_mbhead = NULL;
1481 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1482 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1483 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1484 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1485 sizeof (struct ex_dpd),
1486 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1487 }
1488 sc->tx_tail = sc->tx_head = NULL;
1489 ex_init_txdescs(sc);
1490
1491 sc->rx_tail = sc->rx_head = 0;
1492 for (i = 0; i < EX_NUPD; i++) {
1493 rx = &sc->sc_rxdescs[i];
1494 if (rx->rx_mbhead != NULL) {
1495 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1496 m_freem(rx->rx_mbhead);
1497 rx->rx_mbhead = NULL;
1498 }
1499 ex_add_rxbuf(sc, rx);
1500 }
1501
1502 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1503
1504 untimeout(ex_tick, sc);
1505 if (sc->ex_conf & EX_CONF_MII)
1506 mii_down(&sc->ex_mii);
1507
1508 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1509 ifp->if_timer = 0;
1510 }
1511
1512 static void
1513 ex_init_txdescs(sc)
1514 struct ex_softc *sc;
1515 {
1516 int i;
1517
1518 for (i = 0; i < EX_NDPD; i++) {
1519 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1520 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1521 if (i < EX_NDPD - 1)
1522 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1523 else
1524 sc->sc_txdescs[i].tx_next = NULL;
1525 }
1526 sc->tx_free = &sc->sc_txdescs[0];
1527 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1528 }
1529
1530
1531 /*
1532 * Before reboots, reset card completely.
1533 */
1534 static void
1535 ex_shutdown(arg)
1536 void *arg;
1537 {
1538 register struct ex_softc *sc = arg;
1539
1540 ex_stop(sc);
1541 }
1542
1543 /*
1544 * Read EEPROM data.
1545 * XXX what to do if EEPROM doesn't unbusy?
1546 */
1547 u_int16_t
1548 ex_read_eeprom(sc, offset)
1549 struct ex_softc *sc;
1550 int offset;
1551 {
1552 bus_space_tag_t iot = sc->sc_iot;
1553 bus_space_handle_t ioh = sc->sc_ioh;
1554 u_int16_t data = 0;
1555
1556 GO_WINDOW(0);
1557 if (ex_eeprom_busy(sc))
1558 goto out;
1559 switch (sc->ex_bustype) {
1560 case EX_BUS_PCI:
1561 bus_space_write_1(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1562 READ_EEPROM | (offset & 0x3f));
1563 break;
1564 case EX_BUS_CARDBUS:
1565 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1566 0x230 + (offset & 0x3f));
1567 break;
1568 }
1569 if (ex_eeprom_busy(sc))
1570 goto out;
1571 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1572 out:
1573 return data;
1574 }
1575
1576 static int
1577 ex_eeprom_busy(sc)
1578 struct ex_softc *sc;
1579 {
1580 bus_space_tag_t iot = sc->sc_iot;
1581 bus_space_handle_t ioh = sc->sc_ioh;
1582 int i = 100;
1583
1584 while (i--) {
1585 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1586 EEPROM_BUSY))
1587 return 0;
1588 delay(100);
1589 }
1590 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1591 return (1);
1592 }
1593
1594 /*
1595 * Create a new rx buffer and add it to the 'soft' rx list.
1596 */
1597 static int
1598 ex_add_rxbuf(sc, rxd)
1599 struct ex_softc *sc;
1600 struct ex_rxdesc *rxd;
1601 {
1602 struct mbuf *m, *oldm;
1603 bus_dmamap_t rxmap;
1604 int error, rval = 0;
1605
1606 oldm = rxd->rx_mbhead;
1607 rxmap = rxd->rx_dmamap;
1608
1609 MGETHDR(m, M_DONTWAIT, MT_DATA);
1610 if (m != NULL) {
1611 MCLGET(m, M_DONTWAIT);
1612 if ((m->m_flags & M_EXT) == 0) {
1613 m_freem(m);
1614 if (oldm == NULL)
1615 return 1;
1616 m = oldm;
1617 m->m_data = m->m_ext.ext_buf;
1618 rval = 1;
1619 }
1620 } else {
1621 if (oldm == NULL)
1622 return 1;
1623 m = oldm;
1624 m->m_data = m->m_ext.ext_buf;
1625 rval = 1;
1626 }
1627
1628 /*
1629 * Setup the DMA map for this receive buffer.
1630 */
1631 if (m != oldm) {
1632 if (oldm != NULL)
1633 bus_dmamap_unload(sc->sc_dmat, rxmap);
1634 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1635 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1636 if (error) {
1637 printf("%s: can't load rx buffer, error = %d\n",
1638 sc->sc_dev.dv_xname, error);
1639 panic("ex_add_rxbuf"); /* XXX */
1640 }
1641 }
1642
1643 /*
1644 * Align for data after 14 byte header.
1645 */
1646 m->m_data += 2;
1647
1648 rxd->rx_mbhead = m;
1649 rxd->rx_upd->upd_pktstatus = htopci(MCLBYTES - 2);
1650 rxd->rx_upd->upd_frags[0].fr_addr =
1651 htopci(rxmap->dm_segs[0].ds_addr + 2);
1652 rxd->rx_upd->upd_nextptr = 0;
1653
1654 /*
1655 * Attach it to the end of the list.
1656 */
1657 if (sc->rx_head != NULL) {
1658 sc->rx_tail->rx_next = rxd;
1659 sc->rx_tail->rx_upd->upd_nextptr = htopci(sc->sc_upddma +
1660 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1661 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1662 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1663 sizeof (struct ex_upd),
1664 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1665 } else {
1666 sc->rx_head = rxd;
1667 }
1668 sc->rx_tail = rxd;
1669
1670 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1671 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1672 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1673 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1674 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1675 return (rval);
1676 }
1677
1678 void
1679 ex_mii_setbit(v, bit)
1680 void *v;
1681 u_int16_t bit;
1682 {
1683 struct ex_softc *sc = v;
1684 u_int16_t val;
1685
1686 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT);
1687 val |= bit;
1688 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1689 }
1690
1691 void
1692 ex_mii_clrbit(v, bit)
1693 void *v;
1694 u_int16_t bit;
1695 {
1696 struct ex_softc *sc = v;
1697 u_int16_t val;
1698
1699 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT);
1700 val &= ~bit;
1701 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1702 }
1703
1704 u_int16_t
1705 ex_mii_readbit(v, bit)
1706 void *v;
1707 u_int16_t bit;
1708 {
1709 struct ex_softc *sc = v;
1710 u_int16_t val;
1711
1712 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT);
1713 return (val & bit);
1714 }
1715
1716 /*
1717 * The reason why all this stuff below is here, is that we need a special
1718 * readreg function. It needs to check if we're accessing the internal
1719 * PHY on 905B-TX boards, or not. If so, the read must fail immediately,
1720 * because 905B-TX boards seem to return garbage from the MII if you
1721 * try to access non-existing PHYs.
1722 */
1723
1724 int
1725 ex_mii_readreg(v, phy, reg)
1726 struct device *v;
1727 int phy;
1728 int reg;
1729 {
1730 struct ex_softc *sc = (struct ex_softc *)v;
1731 int val = 0;
1732 int err =0;
1733 int i;
1734
1735 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1736 return 0;
1737
1738 GO_WINDOW(4);
1739
1740 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, 0);
1741
1742 ex_mii_setbit(sc, ELINK_PHY_DIR);
1743 delay(1);
1744 ex_mii_setbit(sc, ELINK_PHY_DIR|ELINK_PHY_DATA);
1745 delay(1);
1746
1747 for (i = 0; i < 32; i++) {
1748 ex_mii_setbit(sc, ELINK_PHY_CLK);
1749 delay(1);
1750 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1751 delay(1);
1752 }
1753 ex_mii_writebits(sc, MII_COMMAND_START, 2);
1754 ex_mii_writebits(sc, MII_COMMAND_READ, 2);
1755 ex_mii_writebits(sc, phy, 5);
1756 ex_mii_writebits(sc, reg, 5);
1757
1758 ex_mii_clrbit(sc, ELINK_PHY_DATA|ELINK_PHY_CLK);
1759 delay(1);
1760 ex_mii_setbit(sc, ELINK_PHY_CLK);
1761 delay(1);
1762 ex_mii_clrbit(sc, ELINK_PHY_DIR|ELINK_PHY_CLK);
1763 delay(1);
1764 ex_mii_setbit(sc, ELINK_PHY_CLK);
1765 delay(1);
1766
1767 err = ex_mii_readbit(sc, ELINK_PHY_DATA);
1768
1769 for (i = 0; i < 16; i++) {
1770 val <<= 1;
1771 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1772 delay(1);
1773 if (err == 0 && ex_mii_readbit(sc, ELINK_PHY_DATA))
1774 val |= 1;
1775 ex_mii_setbit(sc, ELINK_PHY_CLK);
1776 delay(1);
1777 }
1778 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1779 delay(1);
1780 ex_mii_setbit(sc, ELINK_PHY_CLK);
1781 delay(1);
1782
1783 GO_WINDOW(1);
1784
1785 return (err ? 0 : val);
1786 }
1787
1788 static void
1789 ex_mii_writebits(sc, data, nbits)
1790 struct ex_softc *sc;
1791 unsigned int data;
1792 int nbits;
1793 {
1794 int i;
1795
1796 ex_mii_setbit(sc, ELINK_PHY_DIR);
1797 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1798
1799 for (i = 1 << (nbits -1); i; i = i >> 1) {
1800 if (data & i)
1801 ex_mii_setbit(sc, ELINK_PHY_DATA);
1802 else
1803 ex_mii_clrbit(sc, ELINK_PHY_DATA);
1804 delay(1);
1805 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1806 delay(1);
1807 ex_mii_setbit(sc, ELINK_PHY_CLK);
1808 }
1809 }
1810
1811 void
1812 ex_mii_writereg(v, phy, reg, data)
1813 struct device *v;
1814 int phy;
1815 int reg;
1816 int data;
1817 {
1818 struct ex_softc *sc = (struct ex_softc *)v;
1819 int i;
1820
1821 GO_WINDOW(4);
1822
1823 ex_mii_setbit(sc, ELINK_PHY_DIR);
1824 delay(1);
1825 ex_mii_setbit(sc, ELINK_PHY_DIR|ELINK_PHY_DATA);
1826 delay(1);
1827 for (i = 0; i < 32; i++) {
1828 ex_mii_setbit(sc, ELINK_PHY_CLK);
1829 delay(1);
1830 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1831 delay(1);
1832 }
1833 ex_mii_writebits(sc, MII_COMMAND_START, 2);
1834 ex_mii_writebits(sc, MII_COMMAND_WRITE, 2);
1835 ex_mii_writebits(sc, phy, 5);
1836 ex_mii_writebits(sc, reg, 5);
1837 ex_mii_writebits(sc, MII_COMMAND_ACK, 2);
1838 ex_mii_writebits(sc, data, 16);
1839
1840 ex_mii_setbit(sc, ELINK_PHY_CLK);
1841 delay(1);
1842 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1843 delay(1);
1844 ex_mii_clrbit(sc, ELINK_PHY_DIR);
1845
1846 GO_WINDOW(1);
1847 }
1848
1849 void
1850 ex_mii_statchg(v)
1851 struct device *v;
1852 {
1853 struct ex_softc *sc = (struct ex_softc *)v;
1854 bus_space_tag_t iot = sc->sc_iot;
1855 bus_space_handle_t ioh = sc->sc_ioh;
1856 int mctl;
1857
1858 /* XXX Update ifp->if_baudrate */
1859
1860 GO_WINDOW(3);
1861 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1862 if (sc->ex_mii.mii_media_active & IFM_FDX)
1863 mctl |= MAC_CONTROL_FDX;
1864 else
1865 mctl &= ~MAC_CONTROL_FDX;
1866 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1867 GO_WINDOW(1); /* back to operating window */
1868 }
1869