elinkxl.c revision 1.34.2.6 1 /* $NetBSD: elinkxl.c,v 1.34.2.6 2002/01/01 12:07:38 he Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/syslog.h>
53 #include <sys/select.h>
54 #include <sys/device.h>
55 #if NRND > 0
56 #include <sys/rnd.h>
57 #endif
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #ifdef NS
73 #include <netns/ns.h>
74 #include <netns/ns_if.h>
75 #endif
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
81
82 #include <machine/cpu.h>
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <vm/vm.h>
88 #include <vm/pmap.h>
89
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/mii.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/ic/elink3reg.h>
95 /* #include <dev/ic/elink3var.h> */
96 #include <dev/ic/elinkxlreg.h>
97 #include <dev/ic/elinkxlvar.h>
98
99 #ifdef DEBUG
100 int exdebug = 0;
101 #endif
102
103 /* ifmedia callbacks */
104 int ex_media_chg __P((struct ifnet *ifp));
105 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
106
107 void ex_probe_media __P((struct ex_softc *));
108 void ex_set_filter __P((struct ex_softc *));
109 void ex_set_media __P((struct ex_softc *));
110 struct mbuf *ex_get __P((struct ex_softc *, int));
111 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
112 void ex_init __P((struct ex_softc *));
113 void ex_read __P((struct ex_softc *));
114 void ex_reset __P((struct ex_softc *));
115 void ex_set_mc __P((struct ex_softc *));
116 void ex_getstats __P((struct ex_softc *));
117 void ex_printstats __P((struct ex_softc *));
118 void ex_tick __P((void *));
119
120 int ex_enable __P((struct ex_softc *));
121 void ex_disable __P((struct ex_softc *));
122 void ex_power __P((int, void *));
123
124 static int ex_eeprom_busy __P((struct ex_softc *));
125 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
126 static void ex_init_txdescs __P((struct ex_softc *));
127
128 static void ex_shutdown __P((void *));
129 static void ex_start __P((struct ifnet *));
130 static void ex_txstat __P((struct ex_softc *));
131
132 int ex_mii_readreg __P((struct device *, int, int));
133 void ex_mii_writereg __P((struct device *, int, int, int));
134 void ex_mii_statchg __P((struct device *));
135
136 void ex_probemedia __P((struct ex_softc *));
137
138 /*
139 * Structure to map media-present bits in boards to ifmedia codes and
140 * printable media names. Used for table-driven ifmedia initialization.
141 */
142 struct ex_media {
143 int exm_mpbit; /* media present bit */
144 const char *exm_name; /* name of medium */
145 int exm_ifmedia; /* ifmedia word for medium */
146 int exm_epmedia; /* ELINKMEDIA_* constant */
147 };
148
149 /*
150 * Media table for 3c90x chips. Note that chips with MII have no
151 * `native' media.
152 */
153 struct ex_media ex_native_media[] = {
154 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
155 ELINKMEDIA_10BASE_T },
156 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
157 ELINKMEDIA_10BASE_T },
158 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
159 ELINKMEDIA_AUI },
160 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
161 ELINKMEDIA_10BASE_2 },
162 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
163 ELINKMEDIA_100BASE_TX },
164 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
165 ELINKMEDIA_100BASE_TX },
166 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
167 ELINKMEDIA_100BASE_FX },
168 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
169 ELINKMEDIA_MII },
170 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
171 ELINKMEDIA_100BASE_T4 },
172 { 0, NULL, 0,
173 0 },
174 };
175
176 /*
177 * MII bit-bang glue.
178 */
179 u_int32_t ex_mii_bitbang_read __P((struct device *));
180 void ex_mii_bitbang_write __P((struct device *, u_int32_t));
181
182 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
183 ex_mii_bitbang_read,
184 ex_mii_bitbang_write,
185 {
186 ELINK_PHY_DATA, /* MII_BIT_MDO */
187 ELINK_PHY_DATA, /* MII_BIT_MDI */
188 ELINK_PHY_CLK, /* MII_BIT_MDC */
189 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
190 0, /* MII_BIT_DIR_PHY_HOST */
191 }
192 };
193
194 /*
195 * Back-end attach and configure.
196 */
197 void
198 ex_config(sc)
199 struct ex_softc *sc;
200 {
201 struct ifnet *ifp;
202 u_int16_t val;
203 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
204 bus_space_tag_t iot = sc->sc_iot;
205 bus_space_handle_t ioh = sc->sc_ioh;
206 int i, error, attach_stage;
207
208 callout_init(&sc->ex_mii_callout);
209
210 ex_reset(sc);
211
212 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
213 macaddr[0] = val >> 8;
214 macaddr[1] = val & 0xff;
215 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
216 macaddr[2] = val >> 8;
217 macaddr[3] = val & 0xff;
218 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
219 macaddr[4] = val >> 8;
220 macaddr[5] = val & 0xff;
221
222 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
223 ether_sprintf(macaddr));
224
225 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
226 GO_WINDOW(2);
227 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
228 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
229 val |= ELINK_RESET_OPT_LEDPOLAR;
230 if (sc->ex_conf & EX_CONF_PHY_POWER)
231 val |= ELINK_RESET_OPT_PHYPOWER;
232 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
233 }
234
235 attach_stage = 0;
236
237 /*
238 * Allocate the upload descriptors, and create and load the DMA
239 * map for them.
240 */
241 if ((error = bus_dmamem_alloc(sc->sc_dmat,
242 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &sc->sc_useg, 1,
243 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
244 printf("%s: can't allocate upload descriptors, error = %d\n",
245 sc->sc_dev.dv_xname, error);
246 goto fail;
247 }
248
249 attach_stage = 1;
250
251 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
252 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
253 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
254 printf("%s: can't map upload descriptors, error = %d\n",
255 sc->sc_dev.dv_xname, error);
256 goto fail;
257 }
258
259 attach_stage = 2;
260
261 if ((error = bus_dmamap_create(sc->sc_dmat,
262 EX_NUPD * sizeof (struct ex_upd), 1,
263 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
264 &sc->sc_upd_dmamap)) != 0) {
265 printf("%s: can't create upload desc. DMA map, error = %d\n",
266 sc->sc_dev.dv_xname, error);
267 goto fail;
268 }
269
270 attach_stage = 3;
271
272 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
273 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
274 BUS_DMA_NOWAIT)) != 0) {
275 printf("%s: can't load upload desc. DMA map, error = %d\n",
276 sc->sc_dev.dv_xname, error);
277 goto fail;
278 }
279
280 attach_stage = 4;
281
282 /*
283 * Allocate the download descriptors, and create and load the DMA
284 * map for them.
285 */
286 if ((error = bus_dmamem_alloc(sc->sc_dmat,
287 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &sc->sc_dseg, 1,
288 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
289 printf("%s: can't allocate download descriptors, error = %d\n",
290 sc->sc_dev.dv_xname, error);
291 goto fail;
292 }
293
294 attach_stage = 5;
295
296 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
297 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
298 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
299 printf("%s: can't map download descriptors, error = %d\n",
300 sc->sc_dev.dv_xname, error);
301 goto fail;
302 }
303 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
304
305 attach_stage = 6;
306
307 if ((error = bus_dmamap_create(sc->sc_dmat,
308 EX_NDPD * sizeof (struct ex_dpd), 1,
309 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
310 &sc->sc_dpd_dmamap)) != 0) {
311 printf("%s: can't create download desc. DMA map, error = %d\n",
312 sc->sc_dev.dv_xname, error);
313 goto fail;
314 }
315
316 attach_stage = 7;
317
318 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
319 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
320 BUS_DMA_NOWAIT)) != 0) {
321 printf("%s: can't load download desc. DMA map, error = %d\n",
322 sc->sc_dev.dv_xname, error);
323 goto fail;
324 }
325
326 attach_stage = 8;
327
328
329 /*
330 * Create the transmit buffer DMA maps.
331 */
332 for (i = 0; i < EX_NDPD; i++) {
333 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
334 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
335 &sc->sc_tx_dmamaps[i])) != 0) {
336 printf("%s: can't create tx DMA map %d, error = %d\n",
337 sc->sc_dev.dv_xname, i, error);
338 goto fail;
339 }
340 }
341
342 attach_stage = 9;
343
344 /*
345 * Create the receive buffer DMA maps.
346 */
347 for (i = 0; i < EX_NUPD; i++) {
348 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
349 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
350 &sc->sc_rx_dmamaps[i])) != 0) {
351 printf("%s: can't create rx DMA map %d, error = %d\n",
352 sc->sc_dev.dv_xname, i, error);
353 goto fail;
354 }
355 }
356
357 attach_stage = 10;
358
359 /*
360 * Create ring of upload descriptors, only once. The DMA engine
361 * will loop over this when receiving packets, stalling if it
362 * hits an UPD with a finished receive.
363 */
364 for (i = 0; i < EX_NUPD; i++) {
365 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
366 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
367 sc->sc_upd[i].upd_frags[0].fr_len =
368 htole32((MCLBYTES - 2) | EX_FR_LAST);
369 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
370 printf("%s: can't allocate or map rx buffers\n",
371 sc->sc_dev.dv_xname);
372 goto fail;
373 }
374 }
375
376 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
377 EX_NUPD * sizeof (struct ex_upd),
378 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
379
380 ex_init_txdescs(sc);
381
382 attach_stage = 11;
383
384
385 GO_WINDOW(3);
386 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
387 if (val & ELINK_MEDIACAP_MII)
388 sc->ex_conf |= EX_CONF_MII;
389
390 ifp = &sc->sc_ethercom.ec_if;
391
392 /*
393 * Initialize our media structures and MII info. We'll
394 * probe the MII if we discover that we have one.
395 */
396 sc->ex_mii.mii_ifp = ifp;
397 sc->ex_mii.mii_readreg = ex_mii_readreg;
398 sc->ex_mii.mii_writereg = ex_mii_writereg;
399 sc->ex_mii.mii_statchg = ex_mii_statchg;
400 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
401 ex_media_stat);
402
403 if (sc->ex_conf & EX_CONF_MII) {
404 /*
405 * Find PHY, extract media information from it.
406 * First, select the right transceiver.
407 */
408 u_int32_t icfg;
409
410 GO_WINDOW(3);
411 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
412 icfg &= ~(CONFIG_XCVR_SEL << 16);
413 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
414 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
415 if (val & ELINK_MEDIACAP_100BASETX)
416 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
417 if (val & ELINK_MEDIACAP_100BASEFX)
418 icfg |= ELINKMEDIA_100BASE_FX
419 << (CONFIG_XCVR_SEL_SHIFT + 16);
420 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
421
422 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
423 MII_PHY_ANY, MII_OFFSET_ANY, 0);
424 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
425 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
426 0, NULL);
427 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
428 } else {
429 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
430 }
431 } else
432 ex_probemedia(sc);
433
434 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
435 ifp->if_softc = sc;
436 ifp->if_start = ex_start;
437 ifp->if_ioctl = ex_ioctl;
438 ifp->if_watchdog = ex_watchdog;
439 ifp->if_flags =
440 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
441
442 /*
443 * We can support 802.1Q VLAN-sized frames.
444 */
445 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
446
447 if_attach(ifp);
448 ether_ifattach(ifp, macaddr);
449
450 GO_WINDOW(1);
451
452 sc->tx_start_thresh = 20;
453 sc->tx_succ_ok = 0;
454
455 /* TODO: set queues to 0 */
456
457 #if NBPFILTER > 0
458 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
459 sizeof(struct ether_header));
460 #endif
461
462 #if NRND > 0
463 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
464 RND_TYPE_NET, 0);
465 #endif
466
467 /* Establish callback to reset card when we reboot. */
468 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
469 if (sc->sc_sdhook == NULL)
470 printf("%s: WARNING: unable to establish shutdown hook\n",
471 sc->sc_dev.dv_xname);
472
473 /* Add a suspend hook to make sure we come back up after a resume. */
474 sc->sc_powerhook = powerhook_establish(ex_power, sc);
475 if (sc->sc_powerhook == NULL)
476 printf("s: WARNING: unable to establish power hook\n",
477 sc->sc_dev.dv_xname);
478
479 /* The attach is successful. */
480 sc->ex_flags |= EX_FLAGS_ATTACHED;
481 return;
482
483 fail:
484 /*
485 * Free any resources we've allocated during the failed attach
486 * attempt. Do this in reverse order and fall though.
487 */
488 switch (attach_stage) {
489 case 11:
490 {
491 struct ex_rxdesc *rxd;
492
493 for (i = 0; i < EX_NUPD; i++) {
494 rxd = &sc->sc_rxdescs[i];
495 if (rxd->rx_mbhead != NULL) {
496 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
497 m_freem(rxd->rx_mbhead);
498 }
499 }
500 }
501 /* FALLTHROUGH */
502
503 case 10:
504 for (i = 0; i < EX_NUPD; i++)
505 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
506 /* FALLTHROUGH */
507
508 case 9:
509 for (i = 0; i < EX_NDPD; i++)
510 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
511 /* FALLTHROUGH */
512 case 8:
513 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
514 /* FALLTHROUGH */
515
516 case 7:
517 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
518 /* FALLTHROUGH */
519
520 case 6:
521 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
522 EX_NDPD * sizeof (struct ex_dpd));
523 /* FALLTHROUGH */
524
525 case 5:
526 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
527 break;
528
529 case 4:
530 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
531 /* FALLTHROUGH */
532
533 case 3:
534 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
535 /* FALLTHROUGH */
536
537 case 2:
538 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
539 EX_NUPD * sizeof (struct ex_upd));
540 /* FALLTHROUGH */
541
542 case 1:
543 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
544 break;
545 }
546
547 }
548
549 /*
550 * Find the media present on non-MII chips.
551 */
552 void
553 ex_probemedia(sc)
554 struct ex_softc *sc;
555 {
556 bus_space_tag_t iot = sc->sc_iot;
557 bus_space_handle_t ioh = sc->sc_ioh;
558 struct ifmedia *ifm = &sc->ex_mii.mii_media;
559 struct ex_media *exm;
560 u_int16_t config1, reset_options, default_media;
561 int defmedia = 0;
562 const char *sep = "", *defmedianame = NULL;
563
564 GO_WINDOW(3);
565 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
566 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
567 GO_WINDOW(0);
568
569 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
570
571 printf("%s: ", sc->sc_dev.dv_xname);
572
573 /* Sanity check that there are any media! */
574 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
575 printf("no media present!\n");
576 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
577 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
578 return;
579 }
580
581 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
582
583 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
584 if (reset_options & exm->exm_mpbit) {
585 /*
586 * Default media is a little complicated. We
587 * support full-duplex which uses the same
588 * reset options bit.
589 *
590 * XXX Check EEPROM for default to FDX?
591 */
592 if (exm->exm_epmedia == default_media) {
593 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
594 defmedia = exm->exm_ifmedia;
595 defmedianame = exm->exm_name;
596 }
597 } else if (defmedia == 0) {
598 defmedia = exm->exm_ifmedia;
599 defmedianame = exm->exm_name;
600 }
601 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
602 NULL);
603 PRINT(exm->exm_name);
604 }
605 }
606
607 #undef PRINT
608
609 #ifdef DIAGNOSTIC
610 if (defmedia == 0)
611 panic("ex_probemedia: impossible");
612 #endif
613
614 printf(", default %s\n", defmedianame);
615 ifmedia_set(ifm, defmedia);
616 }
617
618 /*
619 * Bring device up.
620 */
621 void
622 ex_init(sc)
623 struct ex_softc *sc;
624 {
625 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
626 bus_space_tag_t iot = sc->sc_iot;
627 bus_space_handle_t ioh = sc->sc_ioh;
628 int s, i;
629
630 s = splnet();
631
632 ex_waitcmd(sc);
633 ex_stop(sc);
634
635 /*
636 * Set the station address and clear the station mask. The latter
637 * is needed for 90x cards, 0 is the default for 90xB cards.
638 */
639 GO_WINDOW(2);
640 for (i = 0; i < ETHER_ADDR_LEN; i++) {
641 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
642 LLADDR(ifp->if_sadl)[i]);
643 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
644 }
645
646 GO_WINDOW(3);
647
648 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
649 ex_waitcmd(sc);
650 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
651 ex_waitcmd(sc);
652
653 /*
654 * Disable reclaim threshold for 90xB, set free threshold to
655 * 6 * 256 = 1536 for 90x.
656 */
657 if (sc->ex_conf & EX_CONF_90XB)
658 bus_space_write_2(iot, ioh, ELINK_COMMAND,
659 ELINK_TXRECLTHRESH | 255);
660 else
661 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
662
663 bus_space_write_2(iot, ioh, ELINK_COMMAND,
664 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
665
666 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
667 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
668
669 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
670 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
671
672 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
673 if (sc->intr_ack)
674 (* sc->intr_ack)(sc);
675 ex_set_media(sc);
676 ex_set_mc(sc);
677
678
679 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
680 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
681 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
682 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
683 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
684
685 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
686 u_int16_t cbcard_config;
687
688 GO_WINDOW(2);
689 cbcard_config = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 0x0c);
690 if (sc->ex_conf & EX_CONF_PHY_POWER) {
691 cbcard_config |= 0x4000; /* turn on PHY power */
692 }
693 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) {
694 cbcard_config |= 0x0010; /* invert LED polarity */
695 }
696 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 0x0c, cbcard_config);
697
698 GO_WINDOW(3);
699 }
700
701 ifp->if_flags |= IFF_RUNNING;
702 ifp->if_flags &= ~IFF_OACTIVE;
703 ex_start(ifp);
704
705 GO_WINDOW(1);
706
707 splx(s);
708
709 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
710 }
711
712 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & 0xff)
713
714 /*
715 * Set multicast receive filter. Also take care of promiscuous mode
716 * here (XXX).
717 */
718 void
719 ex_set_mc(sc)
720 struct ex_softc *sc;
721 {
722 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
723 struct ethercom *ec = &sc->sc_ethercom;
724 struct ether_multi *enm;
725 struct ether_multistep estep;
726 int i;
727 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
728
729 if (ifp->if_flags & IFF_PROMISC)
730 mask |= FIL_PROMISC;
731
732 if (!(ifp->if_flags & IFF_MULTICAST))
733 goto out;
734
735 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
736 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
737 } else {
738 ETHER_FIRST_MULTI(estep, ec, enm);
739 while (enm != NULL) {
740 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
741 ETHER_ADDR_LEN) != 0)
742 goto out;
743 i = ex_mchash(enm->enm_addrlo);
744 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
745 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
746 ETHER_NEXT_MULTI(estep, enm);
747 }
748 mask |= FIL_MULTIHASH;
749 }
750 out:
751 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
752 SET_RX_FILTER | mask);
753 }
754
755
756 static void
757 ex_txstat(sc)
758 struct ex_softc *sc;
759 {
760 bus_space_tag_t iot = sc->sc_iot;
761 bus_space_handle_t ioh = sc->sc_ioh;
762 int i;
763
764 /*
765 * We need to read+write TX_STATUS until we get a 0 status
766 * in order to turn off the interrupt flag.
767 */
768 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
769 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
770
771 if (i & TXS_JABBER) {
772 ++sc->sc_ethercom.ec_if.if_oerrors;
773 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
774 printf("%s: jabber (%x)\n",
775 sc->sc_dev.dv_xname, i);
776 ex_init(sc);
777 /* TODO: be more subtle here */
778 } else if (i & TXS_UNDERRUN) {
779 ++sc->sc_ethercom.ec_if.if_oerrors;
780 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
781 printf("%s: fifo underrun (%x) @%d\n",
782 sc->sc_dev.dv_xname, i,
783 sc->tx_start_thresh);
784 if (sc->tx_succ_ok < 100)
785 sc->tx_start_thresh = min(ETHER_MAX_LEN,
786 sc->tx_start_thresh + 20);
787 sc->tx_succ_ok = 0;
788 ex_init(sc);
789 /* TODO: be more subtle here */
790 } else if (i & TXS_MAX_COLLISION) {
791 ++sc->sc_ethercom.ec_if.if_collisions;
792 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
793 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
794 } else
795 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
796 }
797 }
798
799 int
800 ex_media_chg(ifp)
801 struct ifnet *ifp;
802 {
803 struct ex_softc *sc = ifp->if_softc;
804
805 if (ifp->if_flags & IFF_UP)
806 ex_init(sc);
807 return 0;
808 }
809
810 void
811 ex_set_media(sc)
812 struct ex_softc *sc;
813 {
814 bus_space_tag_t iot = sc->sc_iot;
815 bus_space_handle_t ioh = sc->sc_ioh;
816 u_int32_t configreg;
817
818 if (((sc->ex_conf & EX_CONF_MII) &&
819 (sc->ex_mii.mii_media_active & IFM_FDX))
820 || (!(sc->ex_conf & EX_CONF_MII) &&
821 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
822 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
823 MAC_CONTROL_FDX);
824 } else {
825 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
826 }
827
828 /*
829 * If the device has MII, select it, and then tell the
830 * PHY which media to use.
831 */
832 if (sc->ex_conf & EX_CONF_MII) {
833 GO_WINDOW(3);
834
835 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
836
837 configreg &= ~(CONFIG_MEDIAMASK << 16);
838 configreg |= (ELINKMEDIA_MII << (CONFIG_MEDIAMASK_SHIFT + 16));
839
840 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
841 mii_mediachg(&sc->ex_mii);
842 return;
843 }
844
845 GO_WINDOW(4);
846 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
847 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
848 delay(800);
849
850 /*
851 * Now turn on the selected media/transceiver.
852 */
853 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
854 case IFM_10_T:
855 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
856 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
857 break;
858
859 case IFM_10_2:
860 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
861 DELAY(800);
862 break;
863
864 case IFM_100_TX:
865 case IFM_100_FX:
866 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
867 LINKBEAT_ENABLE);
868 DELAY(800);
869 break;
870
871 case IFM_10_5:
872 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
873 SQE_ENABLE);
874 DELAY(800);
875 break;
876
877 case IFM_MANUAL:
878 break;
879
880 case IFM_NONE:
881 return;
882
883 default:
884 panic("ex_set_media: impossible");
885 }
886
887 GO_WINDOW(3);
888 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
889
890 configreg &= ~(CONFIG_MEDIAMASK << 16);
891 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
892 (CONFIG_MEDIAMASK_SHIFT + 16));
893
894 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
895 }
896
897 /*
898 * Get currently-selected media from card.
899 * (if_media callback, may be called before interface is brought up).
900 */
901 void
902 ex_media_stat(ifp, req)
903 struct ifnet *ifp;
904 struct ifmediareq *req;
905 {
906 struct ex_softc *sc = ifp->if_softc;
907
908 if (sc->ex_conf & EX_CONF_MII) {
909 mii_pollstat(&sc->ex_mii);
910 req->ifm_status = sc->ex_mii.mii_media_status;
911 req->ifm_active = sc->ex_mii.mii_media_active;
912 } else {
913 GO_WINDOW(4);
914 req->ifm_status = IFM_AVALID;
915 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
916 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
917 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
918 req->ifm_status |= IFM_ACTIVE;
919 GO_WINDOW(1);
920 }
921 }
922
923
924
925 /*
926 * Start outputting on the interface.
927 */
928 static void
929 ex_start(ifp)
930 struct ifnet *ifp;
931 {
932 struct ex_softc *sc = ifp->if_softc;
933 bus_space_tag_t iot = sc->sc_iot;
934 bus_space_handle_t ioh = sc->sc_ioh;
935 volatile struct ex_fraghdr *fr = NULL;
936 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
937 struct ex_txdesc *txp;
938 bus_dmamap_t dmamap;
939 int offset, totlen;
940
941 if (sc->tx_head || sc->tx_free == NULL)
942 return;
943
944 txp = NULL;
945
946 /*
947 * We're finished if there is nothing more to add to the list or if
948 * we're all filled up with buffers to transmit.
949 */
950 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
951 struct mbuf *mb_head;
952 int segment, error;
953
954 /*
955 * Grab a packet to transmit.
956 */
957 IF_DEQUEUE(&ifp->if_snd, mb_head);
958
959 /*
960 * Get pointer to next available tx desc.
961 */
962 txp = sc->tx_free;
963 sc->tx_free = txp->tx_next;
964 txp->tx_next = NULL;
965 dmamap = txp->tx_dmamap;
966
967 /*
968 * Go through each of the mbufs in the chain and initialize
969 * the transmit buffer descriptors with the physical address
970 * and size of the mbuf.
971 */
972 reload:
973 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
974 mb_head, BUS_DMA_NOWAIT);
975 switch (error) {
976 case 0:
977 /* Success. */
978 break;
979
980 case EFBIG:
981 {
982 struct mbuf *mn;
983
984 /*
985 * We ran out of segments. We have to recopy this
986 * mbuf chain first. Bail out if we can't get the
987 * new buffers.
988 */
989 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
990
991 MGETHDR(mn, M_DONTWAIT, MT_DATA);
992 if (mn == NULL) {
993 m_freem(mb_head);
994 printf("aborting\n");
995 goto out;
996 }
997 if (mb_head->m_pkthdr.len > MHLEN) {
998 MCLGET(mn, M_DONTWAIT);
999 if ((mn->m_flags & M_EXT) == 0) {
1000 m_freem(mn);
1001 m_freem(mb_head);
1002 printf("aborting\n");
1003 goto out;
1004 }
1005 }
1006 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1007 mtod(mn, caddr_t));
1008 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1009 m_freem(mb_head);
1010 mb_head = mn;
1011 printf("retrying\n");
1012 goto reload;
1013 }
1014
1015 default:
1016 /*
1017 * Some other problem; report it.
1018 */
1019 printf("%s: can't load mbuf chain, error = %d\n",
1020 sc->sc_dev.dv_xname, error);
1021 m_freem(mb_head);
1022 goto out;
1023 }
1024
1025 fr = &txp->tx_dpd->dpd_frags[0];
1026 totlen = 0;
1027 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1028 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1029 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
1030 totlen += dmamap->dm_segs[segment].ds_len;
1031 }
1032 fr--;
1033 fr->fr_len |= htole32(EX_FR_LAST);
1034 txp->tx_mbhead = mb_head;
1035
1036 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1037 BUS_DMASYNC_PREWRITE);
1038
1039 dpd = txp->tx_dpd;
1040 dpd->dpd_nextptr = 0;
1041 dpd->dpd_fsh = htole32(totlen);
1042
1043 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1044 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1045 sizeof (struct ex_dpd),
1046 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1047
1048 /*
1049 * No need to stall the download engine, we know it's
1050 * not busy right now.
1051 *
1052 * Fix up pointers in both the "soft" tx and the physical
1053 * tx list.
1054 */
1055 if (sc->tx_head != NULL) {
1056 prevdpd = sc->tx_tail->tx_dpd;
1057 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1058 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1059 offset, sizeof (struct ex_dpd),
1060 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1061 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1062 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1063 offset, sizeof (struct ex_dpd),
1064 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1065 sc->tx_tail->tx_next = txp;
1066 sc->tx_tail = txp;
1067 } else {
1068 sc->tx_tail = sc->tx_head = txp;
1069 }
1070
1071 #if NBPFILTER > 0
1072 /*
1073 * Pass packet to bpf if there is a listener.
1074 */
1075 if (ifp->if_bpf)
1076 bpf_mtap(ifp->if_bpf, mb_head);
1077 #endif
1078 }
1079 out:
1080 if (sc->tx_head) {
1081 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1082 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1083 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1084 sizeof (struct ex_dpd),
1085 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1086 ifp->if_flags |= IFF_OACTIVE;
1087 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1088 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1089 DPD_DMADDR(sc, sc->tx_head));
1090
1091 /* trigger watchdog */
1092 ifp->if_timer = 5;
1093 }
1094 }
1095
1096
1097 int
1098 ex_intr(arg)
1099 void *arg;
1100 {
1101 struct ex_softc *sc = arg;
1102 bus_space_tag_t iot = sc->sc_iot;
1103 bus_space_handle_t ioh = sc->sc_ioh;
1104 u_int16_t stat;
1105 int ret = 0;
1106 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1107
1108 if (sc->enabled == 0 ||
1109 (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1110 return (0);
1111
1112 for (;;) {
1113 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1114
1115 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1116
1117 if ((stat & S_MASK) == 0) {
1118 if ((stat & S_INTR_LATCH) == 0) {
1119 #if 0
1120 printf("%s: intr latch cleared\n",
1121 sc->sc_dev.dv_xname);
1122 #endif
1123 break;
1124 }
1125 }
1126
1127 ret = 1;
1128
1129 /*
1130 * Acknowledge interrupts.
1131 */
1132 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1133 (stat & S_MASK));
1134 if (sc->intr_ack)
1135 (*sc->intr_ack)(sc);
1136
1137 if (stat & S_HOST_ERROR) {
1138 printf("%s: adapter failure (%x)\n",
1139 sc->sc_dev.dv_xname, stat);
1140 ex_reset(sc);
1141 ex_init(sc);
1142 return 1;
1143 }
1144 if (stat & S_TX_COMPLETE) {
1145 ex_txstat(sc);
1146 }
1147 if (stat & S_UPD_STATS) {
1148 ex_getstats(sc);
1149 }
1150 if (stat & S_DN_COMPLETE) {
1151 struct ex_txdesc *txp, *ptxp = NULL;
1152 bus_dmamap_t txmap;
1153
1154 /* reset watchdog timer, was set in ex_start() */
1155 ifp->if_timer = 0;
1156
1157 for (txp = sc->tx_head; txp != NULL;
1158 txp = txp->tx_next) {
1159 bus_dmamap_sync(sc->sc_dmat,
1160 sc->sc_dpd_dmamap,
1161 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1162 sizeof (struct ex_dpd),
1163 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1164 if (txp->tx_mbhead != NULL) {
1165 txmap = txp->tx_dmamap;
1166 bus_dmamap_sync(sc->sc_dmat, txmap,
1167 0, txmap->dm_mapsize,
1168 BUS_DMASYNC_POSTWRITE);
1169 bus_dmamap_unload(sc->sc_dmat, txmap);
1170 m_freem(txp->tx_mbhead);
1171 txp->tx_mbhead = NULL;
1172 }
1173 ptxp = txp;
1174 }
1175
1176 /*
1177 * Move finished tx buffers back to the tx free list.
1178 */
1179 if (sc->tx_free) {
1180 sc->tx_ftail->tx_next = sc->tx_head;
1181 sc->tx_ftail = ptxp;
1182 } else
1183 sc->tx_ftail = sc->tx_free = sc->tx_head;
1184
1185 sc->tx_head = sc->tx_tail = NULL;
1186 ifp->if_flags &= ~IFF_OACTIVE;
1187 }
1188
1189 if (stat & S_UP_COMPLETE) {
1190 struct ex_rxdesc *rxd;
1191 struct mbuf *m;
1192 struct ex_upd *upd;
1193 bus_dmamap_t rxmap;
1194 u_int32_t pktstat;
1195
1196 rcvloop:
1197 rxd = sc->rx_head;
1198 rxmap = rxd->rx_dmamap;
1199 m = rxd->rx_mbhead;
1200 upd = rxd->rx_upd;
1201
1202 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1203 rxmap->dm_mapsize,
1204 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1205 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1206 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1207 sizeof (struct ex_upd),
1208 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1209 pktstat = le32toh(upd->upd_pktstatus);
1210
1211 if (pktstat & EX_UPD_COMPLETE) {
1212 /*
1213 * Remove first packet from the chain.
1214 */
1215 sc->rx_head = rxd->rx_next;
1216 rxd->rx_next = NULL;
1217
1218 /*
1219 * Add a new buffer to the receive chain.
1220 * If this fails, the old buffer is recycled
1221 * instead.
1222 */
1223 if (ex_add_rxbuf(sc, rxd) == 0) {
1224 struct ether_header *eh;
1225 u_int16_t total_len;
1226
1227 if (pktstat &
1228 ((sc->sc_ethercom.ec_capenable &
1229 ETHERCAP_VLAN_MTU) ?
1230 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1231 ifp->if_ierrors++;
1232 m_freem(m);
1233 goto rcvloop;
1234 }
1235
1236 total_len = pktstat & EX_UPD_PKTLENMASK;
1237 if (total_len <
1238 sizeof(struct ether_header)) {
1239 m_freem(m);
1240 goto rcvloop;
1241 }
1242 m->m_pkthdr.rcvif = ifp;
1243 m->m_pkthdr.len = m->m_len = total_len;
1244 eh = mtod(m, struct ether_header *);
1245 #if NBPFILTER > 0
1246 if (ifp->if_bpf) {
1247 bpf_tap(ifp->if_bpf,
1248 mtod(m, caddr_t),
1249 total_len);
1250 /*
1251 * Only pass this packet up
1252 * if it is for us.
1253 */
1254 if ((ifp->if_flags &
1255 IFF_PROMISC) &&
1256 (eh->ether_dhost[0] & 1)
1257 == 0 &&
1258 bcmp(eh->ether_dhost,
1259 LLADDR(ifp->if_sadl),
1260 sizeof(eh->ether_dhost))
1261 != 0) {
1262 m_freem(m);
1263 goto rcvloop;
1264 }
1265 }
1266 #endif /* NBPFILTER > 0 */
1267 (*ifp->if_input)(ifp, m);
1268 }
1269 goto rcvloop;
1270 }
1271 /*
1272 * Just in case we filled up all UPDs and the DMA engine
1273 * stalled. We could be more subtle about this.
1274 */
1275 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1276 printf("%s: uplistptr was 0\n",
1277 sc->sc_dev.dv_xname);
1278 ex_init(sc);
1279 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1280 & 0x2000) {
1281 printf("%s: receive stalled\n",
1282 sc->sc_dev.dv_xname);
1283 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1284 ELINK_UPUNSTALL);
1285 }
1286 }
1287 }
1288
1289 /* no more interrupts */
1290 if (ret && ifp->if_snd.ifq_head)
1291 ex_start(ifp);
1292 return ret;
1293 }
1294
1295 int
1296 ex_ioctl(ifp, cmd, data)
1297 struct ifnet *ifp;
1298 u_long cmd;
1299 caddr_t data;
1300 {
1301 struct ex_softc *sc = ifp->if_softc;
1302 struct ifaddr *ifa = (struct ifaddr *)data;
1303 struct ifreq *ifr = (struct ifreq *)data;
1304 int s, error = 0;
1305
1306 s = splnet();
1307
1308 switch (cmd) {
1309
1310 case SIOCSIFADDR:
1311 if ((error = ex_enable(sc)) != 0)
1312 break;
1313 ifp->if_flags |= IFF_UP;
1314 switch (ifa->ifa_addr->sa_family) {
1315 #ifdef INET
1316 case AF_INET:
1317 ex_init(sc);
1318 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1319 break;
1320 #endif
1321 #ifdef NS
1322 case AF_NS:
1323 {
1324 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1325
1326 if (ns_nullhost(*ina))
1327 ina->x_host = *(union ns_host *)
1328 LLADDR(ifp->if_sadl);
1329 else
1330 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1331 ifp->if_addrlen);
1332 /* Set new address. */
1333 ex_init(sc);
1334 break;
1335 }
1336 #endif
1337 default:
1338 ex_init(sc);
1339 break;
1340 }
1341 break;
1342 case SIOCSIFMEDIA:
1343 case SIOCGIFMEDIA:
1344 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1345 break;
1346
1347 case SIOCSIFFLAGS:
1348 if ((ifp->if_flags & IFF_UP) == 0 &&
1349 (ifp->if_flags & IFF_RUNNING) != 0) {
1350 /*
1351 * If interface is marked down and it is running, then
1352 * stop it.
1353 */
1354 ex_stop(sc);
1355 ifp->if_flags &= ~IFF_RUNNING;
1356 ex_disable(sc);
1357 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1358 (ifp->if_flags & IFF_RUNNING) == 0) {
1359 /*
1360 * If interface is marked up and it is stopped, then
1361 * start it.
1362 */
1363 if ((error = ex_enable(sc)) != 0)
1364 break;
1365 ex_init(sc);
1366 } else if ((ifp->if_flags & IFF_UP) != 0) {
1367 /*
1368 * Deal with other flags that change hardware
1369 * state, i.e. IFF_PROMISC.
1370 */
1371 if ((error = ex_enable(sc)) != 0)
1372 break;
1373 ex_set_mc(sc);
1374 }
1375 break;
1376
1377 case SIOCADDMULTI:
1378 case SIOCDELMULTI:
1379 error = (cmd == SIOCADDMULTI) ?
1380 ether_addmulti(ifr, &sc->sc_ethercom) :
1381 ether_delmulti(ifr, &sc->sc_ethercom);
1382
1383 if (error == ENETRESET) {
1384 /*
1385 * Multicast list has changed; set the hardware filter
1386 * accordingly.
1387 */
1388 ex_set_mc(sc);
1389 error = 0;
1390 }
1391 break;
1392
1393 default:
1394 error = EINVAL;
1395 break;
1396 }
1397
1398 splx(s);
1399 return (error);
1400 }
1401
1402 void
1403 ex_getstats(sc)
1404 struct ex_softc *sc;
1405 {
1406 bus_space_handle_t ioh = sc->sc_ioh;
1407 bus_space_tag_t iot = sc->sc_iot;
1408 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1409 u_int8_t upperok;
1410
1411 GO_WINDOW(6);
1412 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1413 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1414 ifp->if_ipackets += (upperok & 0x03) << 8;
1415 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1416 ifp->if_opackets += (upperok & 0x30) << 4;
1417 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1418 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1419 /*
1420 * There seems to be no way to get the exact number of collisions,
1421 * this is the number that occured at the very least.
1422 */
1423 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1424 TX_AFTER_X_COLLISIONS);
1425 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1426 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1427
1428 /*
1429 * Clear the following to avoid stats overflow interrupts
1430 */
1431 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1432 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1433 bus_space_read_1(iot, ioh, TX_NO_SQE);
1434 bus_space_read_1(iot, ioh, TX_CD_LOST);
1435 GO_WINDOW(4);
1436 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1437 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1438 ifp->if_ibytes += (upperok & 0x0f) << 16;
1439 ifp->if_obytes += (upperok & 0xf0) << 12;
1440 GO_WINDOW(1);
1441 }
1442
1443 void
1444 ex_printstats(sc)
1445 struct ex_softc *sc;
1446 {
1447 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1448
1449 ex_getstats(sc);
1450 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1451 "%llu\n", (unsigned long long)ifp->if_ipackets,
1452 (unsigned long long)ifp->if_opackets,
1453 (unsigned long long)ifp->if_ierrors,
1454 (unsigned long long)ifp->if_oerrors,
1455 (unsigned long long)ifp->if_ibytes,
1456 (unsigned long long)ifp->if_obytes);
1457 }
1458
1459 void
1460 ex_tick(arg)
1461 void *arg;
1462 {
1463 struct ex_softc *sc = arg;
1464 int s;
1465
1466 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1467 return;
1468
1469 s = splnet();
1470
1471 if (sc->ex_conf & EX_CONF_MII)
1472 mii_tick(&sc->ex_mii);
1473
1474 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1475 & S_COMMAND_IN_PROGRESS))
1476 ex_getstats(sc);
1477
1478 splx(s);
1479
1480 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1481 }
1482
1483 void
1484 ex_reset(sc)
1485 struct ex_softc *sc;
1486 {
1487 u_int16_t val = GLOBAL_RESET;
1488
1489 if (sc->ex_conf & EX_CONF_RESETHACK)
1490 val |= 0x10;
1491 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1492 /*
1493 * XXX apparently the command in progress bit can't be trusted
1494 * during a reset, so we just always wait this long. Fortunately
1495 * we normally only reset the chip during autoconfig.
1496 */
1497 delay(100000);
1498 ex_waitcmd(sc);
1499 }
1500
1501 void
1502 ex_watchdog(ifp)
1503 struct ifnet *ifp;
1504 {
1505 struct ex_softc *sc = ifp->if_softc;
1506
1507 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1508 ++sc->sc_ethercom.ec_if.if_oerrors;
1509
1510 ex_reset(sc);
1511 ex_init(sc);
1512 }
1513
1514 void
1515 ex_stop(sc)
1516 struct ex_softc *sc;
1517 {
1518 bus_space_tag_t iot = sc->sc_iot;
1519 bus_space_handle_t ioh = sc->sc_ioh;
1520 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1521 struct ex_txdesc *tx;
1522 struct ex_rxdesc *rx;
1523 int i;
1524
1525 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1526 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1527 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1528
1529 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1530 if (tx->tx_mbhead == NULL)
1531 continue;
1532 m_freem(tx->tx_mbhead);
1533 tx->tx_mbhead = NULL;
1534 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1535 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1536 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1537 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1538 sizeof (struct ex_dpd),
1539 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1540 }
1541 sc->tx_tail = sc->tx_head = NULL;
1542 ex_init_txdescs(sc);
1543
1544 sc->rx_tail = sc->rx_head = 0;
1545 for (i = 0; i < EX_NUPD; i++) {
1546 rx = &sc->sc_rxdescs[i];
1547 if (rx->rx_mbhead != NULL) {
1548 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1549 m_freem(rx->rx_mbhead);
1550 rx->rx_mbhead = NULL;
1551 }
1552 ex_add_rxbuf(sc, rx);
1553 }
1554
1555 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1556
1557 callout_stop(&sc->ex_mii_callout);
1558 if (sc->ex_conf & EX_CONF_MII)
1559 mii_down(&sc->ex_mii);
1560
1561 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1562 ifp->if_timer = 0;
1563 }
1564
1565 static void
1566 ex_init_txdescs(sc)
1567 struct ex_softc *sc;
1568 {
1569 int i;
1570
1571 for (i = 0; i < EX_NDPD; i++) {
1572 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1573 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1574 if (i < EX_NDPD - 1)
1575 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1576 else
1577 sc->sc_txdescs[i].tx_next = NULL;
1578 }
1579 sc->tx_free = &sc->sc_txdescs[0];
1580 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1581 }
1582
1583
1584 int
1585 ex_activate(self, act)
1586 struct device *self;
1587 enum devact act;
1588 {
1589 struct ex_softc *sc = (void *) self;
1590 int s, error = 0;
1591
1592 s = splnet();
1593 switch (act) {
1594 case DVACT_ACTIVATE:
1595 error = EOPNOTSUPP;
1596 break;
1597
1598 case DVACT_DEACTIVATE:
1599 if (sc->ex_conf & EX_CONF_MII)
1600 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1601 MII_OFFSET_ANY);
1602 if_deactivate(&sc->sc_ethercom.ec_if);
1603 break;
1604 }
1605 splx(s);
1606
1607 return (error);
1608 }
1609
1610 int
1611 ex_detach(sc)
1612 struct ex_softc *sc;
1613 {
1614 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1615 struct ex_rxdesc *rxd;
1616 int i;
1617
1618 /* Succeed now if there's no work to do. */
1619 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1620 return (0);
1621
1622 /* Unhook our tick handler. */
1623 callout_stop(&sc->ex_mii_callout);
1624
1625 if (sc->ex_conf & EX_CONF_MII) {
1626 /* Detach all PHYs */
1627 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1628 }
1629
1630 /* Delete all remaining media. */
1631 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1632
1633 #if NRND > 0
1634 rnd_detach_source(&sc->rnd_source);
1635 #endif
1636 #if NBPFILTER > 0
1637 bpfdetach(ifp);
1638 #endif
1639 ether_ifdetach(ifp);
1640 if_detach(ifp);
1641
1642 for (i = 0; i < EX_NUPD; i++) {
1643 rxd = &sc->sc_rxdescs[i];
1644 if (rxd->rx_mbhead != NULL) {
1645 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1646 m_freem(rxd->rx_mbhead);
1647 rxd->rx_mbhead = NULL;
1648 }
1649 }
1650 for (i = 0; i < EX_NUPD; i++)
1651 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1652 for (i = 0; i < EX_NDPD; i++)
1653 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1654 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1655 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1656 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1657 EX_NDPD * sizeof (struct ex_dpd));
1658 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1659 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1660 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1661 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1662 EX_NUPD * sizeof (struct ex_upd));
1663 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1664
1665 shutdownhook_disestablish(sc->sc_sdhook);
1666 powerhook_disestablish(sc->sc_powerhook);
1667
1668 return (0);
1669 }
1670
1671 /*
1672 * Before reboots, reset card completely.
1673 */
1674 static void
1675 ex_shutdown(arg)
1676 void *arg;
1677 {
1678 struct ex_softc *sc = arg;
1679
1680 ex_stop(sc);
1681 }
1682
1683 /*
1684 * Read EEPROM data.
1685 * XXX what to do if EEPROM doesn't unbusy?
1686 */
1687 u_int16_t
1688 ex_read_eeprom(sc, offset)
1689 struct ex_softc *sc;
1690 int offset;
1691 {
1692 bus_space_tag_t iot = sc->sc_iot;
1693 bus_space_handle_t ioh = sc->sc_ioh;
1694 u_int16_t data = 0, cmd = READ_EEPROM;
1695 int off;
1696
1697 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1698 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1699
1700 GO_WINDOW(0);
1701 if (ex_eeprom_busy(sc))
1702 goto out;
1703 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1704 cmd | (off + (offset & 0x3f)));
1705 if (ex_eeprom_busy(sc))
1706 goto out;
1707 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1708 out:
1709 return data;
1710 }
1711
1712 static int
1713 ex_eeprom_busy(sc)
1714 struct ex_softc *sc;
1715 {
1716 bus_space_tag_t iot = sc->sc_iot;
1717 bus_space_handle_t ioh = sc->sc_ioh;
1718 int i = 100;
1719
1720 while (i--) {
1721 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1722 EEPROM_BUSY))
1723 return 0;
1724 delay(100);
1725 }
1726 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1727 return (1);
1728 }
1729
1730 /*
1731 * Create a new rx buffer and add it to the 'soft' rx list.
1732 */
1733 static int
1734 ex_add_rxbuf(sc, rxd)
1735 struct ex_softc *sc;
1736 struct ex_rxdesc *rxd;
1737 {
1738 struct mbuf *m, *oldm;
1739 bus_dmamap_t rxmap;
1740 int error, rval = 0;
1741
1742 oldm = rxd->rx_mbhead;
1743 rxmap = rxd->rx_dmamap;
1744
1745 MGETHDR(m, M_DONTWAIT, MT_DATA);
1746 if (m != NULL) {
1747 MCLGET(m, M_DONTWAIT);
1748 if ((m->m_flags & M_EXT) == 0) {
1749 m_freem(m);
1750 if (oldm == NULL)
1751 return 1;
1752 m = oldm;
1753 m->m_data = m->m_ext.ext_buf;
1754 rval = 1;
1755 }
1756 } else {
1757 if (oldm == NULL)
1758 return 1;
1759 m = oldm;
1760 m->m_data = m->m_ext.ext_buf;
1761 rval = 1;
1762 }
1763
1764 /*
1765 * Setup the DMA map for this receive buffer.
1766 */
1767 if (m != oldm) {
1768 if (oldm != NULL)
1769 bus_dmamap_unload(sc->sc_dmat, rxmap);
1770 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1771 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1772 if (error) {
1773 printf("%s: can't load rx buffer, error = %d\n",
1774 sc->sc_dev.dv_xname, error);
1775 panic("ex_add_rxbuf"); /* XXX */
1776 }
1777 }
1778
1779 /*
1780 * Align for data after 14 byte header.
1781 */
1782 m->m_data += 2;
1783
1784 rxd->rx_mbhead = m;
1785 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1786 rxd->rx_upd->upd_frags[0].fr_addr =
1787 htole32(rxmap->dm_segs[0].ds_addr + 2);
1788 rxd->rx_upd->upd_nextptr = 0;
1789
1790 /*
1791 * Attach it to the end of the list.
1792 */
1793 if (sc->rx_head != NULL) {
1794 sc->rx_tail->rx_next = rxd;
1795 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1796 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1797 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1798 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1799 sizeof (struct ex_upd),
1800 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1801 } else {
1802 sc->rx_head = rxd;
1803 }
1804 sc->rx_tail = rxd;
1805
1806 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1807 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1808 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1809 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1810 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1811 return (rval);
1812 }
1813
1814 u_int32_t
1815 ex_mii_bitbang_read(self)
1816 struct device *self;
1817 {
1818 struct ex_softc *sc = (void *) self;
1819
1820 /* We're already in Window 4. */
1821 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1822 }
1823
1824 void
1825 ex_mii_bitbang_write(self, val)
1826 struct device *self;
1827 u_int32_t val;
1828 {
1829 struct ex_softc *sc = (void *) self;
1830
1831 /* We're already in Window 4. */
1832 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1833 }
1834
1835 int
1836 ex_mii_readreg(v, phy, reg)
1837 struct device *v;
1838 int phy, reg;
1839 {
1840 struct ex_softc *sc = (struct ex_softc *)v;
1841 int val;
1842
1843 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1844 return 0;
1845
1846 GO_WINDOW(4);
1847
1848 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1849
1850 GO_WINDOW(1);
1851
1852 return (val);
1853 }
1854
1855 void
1856 ex_mii_writereg(v, phy, reg, data)
1857 struct device *v;
1858 int phy;
1859 int reg;
1860 int data;
1861 {
1862 struct ex_softc *sc = (struct ex_softc *)v;
1863
1864 GO_WINDOW(4);
1865
1866 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1867
1868 GO_WINDOW(1);
1869 }
1870
1871 void
1872 ex_mii_statchg(v)
1873 struct device *v;
1874 {
1875 struct ex_softc *sc = (struct ex_softc *)v;
1876 bus_space_tag_t iot = sc->sc_iot;
1877 bus_space_handle_t ioh = sc->sc_ioh;
1878 int mctl;
1879
1880 GO_WINDOW(3);
1881 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1882 if (sc->ex_mii.mii_media_active & IFM_FDX)
1883 mctl |= MAC_CONTROL_FDX;
1884 else
1885 mctl &= ~MAC_CONTROL_FDX;
1886 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1887 GO_WINDOW(1); /* back to operating window */
1888 }
1889
1890 int
1891 ex_enable(sc)
1892 struct ex_softc *sc;
1893 {
1894 if (sc->enabled == 0 && sc->enable != NULL) {
1895 if ((*sc->enable)(sc) != 0) {
1896 printf("%s: de/vice enable failed\n",
1897 sc->sc_dev.dv_xname);
1898 return (EIO);
1899 }
1900 sc->enabled = 1;
1901 }
1902 return (0);
1903 }
1904
1905 void
1906 ex_disable(sc)
1907 struct ex_softc *sc;
1908 {
1909 if (sc->enabled == 1 && sc->disable != NULL) {
1910 (*sc->disable)(sc);
1911 sc->enabled = 0;
1912 }
1913 }
1914
1915 void
1916 ex_power(why, arg)
1917 int why;
1918 void *arg;
1919 {
1920 struct ex_softc *sc = (void *)arg;
1921 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1922 int s;
1923
1924 s = splnet();
1925 switch (why) {
1926 case PWR_SUSPEND:
1927 case PWR_STANDBY:
1928 ex_stop(sc);
1929 if (sc->power != NULL)
1930 (*sc->power)(sc, why);
1931 break;
1932 case PWR_RESUME:
1933 if (ifp->if_flags & IFF_UP) {
1934 if (sc->power != NULL)
1935 (*sc->power)(sc, why);
1936 ex_init(sc);
1937 }
1938 break;
1939 case PWR_SOFTSUSPEND:
1940 case PWR_SOFTSTANDBY:
1941 case PWR_SOFTRESUME:
1942 break;
1943 }
1944 splx(s);
1945 }
1946
1947
1948
1949