elinkxl.c revision 1.37 1 /* $NetBSD: elinkxl.c,v 1.37 2000/08/25 09:01:59 haya Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/syslog.h>
53 #include <sys/select.h>
54 #include <sys/device.h>
55 #if NRND > 0
56 #include <sys/rnd.h>
57 #endif
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #ifdef NS
73 #include <netns/ns.h>
74 #include <netns/ns_if.h>
75 #endif
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
81
82 #include <machine/cpu.h>
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <dev/mii/miivar.h>
88 #include <dev/mii/mii.h>
89 #include <dev/mii/mii_bitbang.h>
90
91 #include <dev/ic/elink3reg.h>
92 /* #include <dev/ic/elink3var.h> */
93 #include <dev/ic/elinkxlreg.h>
94 #include <dev/ic/elinkxlvar.h>
95
96 #ifdef DEBUG
97 int exdebug = 0;
98 #endif
99
100 /* ifmedia callbacks */
101 int ex_media_chg __P((struct ifnet *ifp));
102 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
103
104 void ex_probe_media __P((struct ex_softc *));
105 void ex_set_filter __P((struct ex_softc *));
106 void ex_set_media __P((struct ex_softc *));
107 struct mbuf *ex_get __P((struct ex_softc *, int));
108 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
109 void ex_init __P((struct ex_softc *));
110 void ex_read __P((struct ex_softc *));
111 void ex_reset __P((struct ex_softc *));
112 void ex_set_mc __P((struct ex_softc *));
113 void ex_getstats __P((struct ex_softc *));
114 void ex_printstats __P((struct ex_softc *));
115 void ex_tick __P((void *));
116
117 static int ex_eeprom_busy __P((struct ex_softc *));
118 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
119 static void ex_init_txdescs __P((struct ex_softc *));
120
121 static void ex_shutdown __P((void *));
122 static void ex_start __P((struct ifnet *));
123 static void ex_txstat __P((struct ex_softc *));
124
125 int ex_mii_readreg __P((struct device *, int, int));
126 void ex_mii_writereg __P((struct device *, int, int, int));
127 void ex_mii_statchg __P((struct device *));
128
129 void ex_probemedia __P((struct ex_softc *));
130
131 /*
132 * Structure to map media-present bits in boards to ifmedia codes and
133 * printable media names. Used for table-driven ifmedia initialization.
134 */
135 struct ex_media {
136 int exm_mpbit; /* media present bit */
137 const char *exm_name; /* name of medium */
138 int exm_ifmedia; /* ifmedia word for medium */
139 int exm_epmedia; /* ELINKMEDIA_* constant */
140 };
141
142 /*
143 * Media table for 3c90x chips. Note that chips with MII have no
144 * `native' media.
145 */
146 struct ex_media ex_native_media[] = {
147 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
148 ELINKMEDIA_10BASE_T },
149 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
150 ELINKMEDIA_10BASE_T },
151 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
152 ELINKMEDIA_AUI },
153 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
154 ELINKMEDIA_10BASE_2 },
155 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
156 ELINKMEDIA_100BASE_TX },
157 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
158 ELINKMEDIA_100BASE_TX },
159 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
160 ELINKMEDIA_100BASE_FX },
161 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
162 ELINKMEDIA_MII },
163 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
164 ELINKMEDIA_100BASE_T4 },
165 { 0, NULL, 0,
166 0 },
167 };
168
169 /*
170 * MII bit-bang glue.
171 */
172 u_int32_t ex_mii_bitbang_read __P((struct device *));
173 void ex_mii_bitbang_write __P((struct device *, u_int32_t));
174
175 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
176 ex_mii_bitbang_read,
177 ex_mii_bitbang_write,
178 {
179 ELINK_PHY_DATA, /* MII_BIT_MDO */
180 ELINK_PHY_DATA, /* MII_BIT_MDI */
181 ELINK_PHY_CLK, /* MII_BIT_MDC */
182 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
183 0, /* MII_BIT_DIR_PHY_HOST */
184 }
185 };
186
187 /*
188 * Back-end attach and configure.
189 */
190 void
191 ex_config(sc)
192 struct ex_softc *sc;
193 {
194 struct ifnet *ifp;
195 u_int16_t val;
196 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
197 bus_space_tag_t iot = sc->sc_iot;
198 bus_space_handle_t ioh = sc->sc_ioh;
199 int i, error, attach_stage;
200
201 callout_init(&sc->ex_mii_callout);
202
203 ex_reset(sc);
204
205 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
206 macaddr[0] = val >> 8;
207 macaddr[1] = val & 0xff;
208 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
209 macaddr[2] = val >> 8;
210 macaddr[3] = val & 0xff;
211 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
212 macaddr[4] = val >> 8;
213 macaddr[5] = val & 0xff;
214
215 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
216 ether_sprintf(macaddr));
217
218 if (sc->intr_ack) { /* 3C575BTX specific */
219 GO_WINDOW(2);
220 bus_space_write_2(sc->sc_iot, ioh, 12, 0x10|bus_space_read_2(sc->sc_iot, ioh, 12));
221 }
222
223 attach_stage = 0;
224
225 /*
226 * Allocate the upload descriptors, and create and load the DMA
227 * map for them.
228 */
229 if ((error = bus_dmamem_alloc(sc->sc_dmat,
230 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &sc->sc_useg, 1,
231 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
232 printf("%s: can't allocate upload descriptors, error = %d\n",
233 sc->sc_dev.dv_xname, error);
234 goto fail;
235 }
236
237 attach_stage = 1;
238
239 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
240 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
241 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
242 printf("%s: can't map upload descriptors, error = %d\n",
243 sc->sc_dev.dv_xname, error);
244 goto fail;
245 }
246
247 attach_stage = 2;
248
249 if ((error = bus_dmamap_create(sc->sc_dmat,
250 EX_NUPD * sizeof (struct ex_upd), 1,
251 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
252 &sc->sc_upd_dmamap)) != 0) {
253 printf("%s: can't create upload desc. DMA map, error = %d\n",
254 sc->sc_dev.dv_xname, error);
255 goto fail;
256 }
257
258 attach_stage = 3;
259
260 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
261 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
262 BUS_DMA_NOWAIT)) != 0) {
263 printf("%s: can't load upload desc. DMA map, error = %d\n",
264 sc->sc_dev.dv_xname, error);
265 goto fail;
266 }
267
268 attach_stage = 4;
269
270 /*
271 * Allocate the download descriptors, and create and load the DMA
272 * map for them.
273 */
274 if ((error = bus_dmamem_alloc(sc->sc_dmat,
275 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &sc->sc_dseg, 1,
276 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
277 printf("%s: can't allocate download descriptors, error = %d\n",
278 sc->sc_dev.dv_xname, error);
279 goto fail;
280 }
281
282 attach_stage = 5;
283
284 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
285 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
286 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
287 printf("%s: can't map download descriptors, error = %d\n",
288 sc->sc_dev.dv_xname, error);
289 goto fail;
290 }
291 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
292
293 attach_stage = 6;
294
295 if ((error = bus_dmamap_create(sc->sc_dmat,
296 EX_NDPD * sizeof (struct ex_dpd), 1,
297 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
298 &sc->sc_dpd_dmamap)) != 0) {
299 printf("%s: can't create download desc. DMA map, error = %d\n",
300 sc->sc_dev.dv_xname, error);
301 goto fail;
302 }
303
304 attach_stage = 7;
305
306 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
307 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
308 BUS_DMA_NOWAIT)) != 0) {
309 printf("%s: can't load download desc. DMA map, error = %d\n",
310 sc->sc_dev.dv_xname, error);
311 goto fail;
312 }
313
314 attach_stage = 8;
315
316
317 /*
318 * Create the transmit buffer DMA maps.
319 */
320 for (i = 0; i < EX_NDPD; i++) {
321 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
322 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
323 &sc->sc_tx_dmamaps[i])) != 0) {
324 printf("%s: can't create tx DMA map %d, error = %d\n",
325 sc->sc_dev.dv_xname, i, error);
326 goto fail;
327 }
328 }
329
330 attach_stage = 9;
331
332 /*
333 * Create the receive buffer DMA maps.
334 */
335 for (i = 0; i < EX_NUPD; i++) {
336 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
337 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
338 &sc->sc_rx_dmamaps[i])) != 0) {
339 printf("%s: can't create rx DMA map %d, error = %d\n",
340 sc->sc_dev.dv_xname, i, error);
341 goto fail;
342 }
343 }
344
345 attach_stage = 10;
346
347 /*
348 * Create ring of upload descriptors, only once. The DMA engine
349 * will loop over this when receiving packets, stalling if it
350 * hits an UPD with a finished receive.
351 */
352 for (i = 0; i < EX_NUPD; i++) {
353 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
354 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
355 sc->sc_upd[i].upd_frags[0].fr_len =
356 htole32((MCLBYTES - 2) | EX_FR_LAST);
357 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
358 printf("%s: can't allocate or map rx buffers\n",
359 sc->sc_dev.dv_xname);
360 goto fail;
361 }
362 }
363
364 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
365 EX_NUPD * sizeof (struct ex_upd),
366 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
367
368 ex_init_txdescs(sc);
369
370 attach_stage = 11;
371
372
373 GO_WINDOW(3);
374 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
375 if (val & ELINK_MEDIACAP_MII)
376 sc->ex_conf |= EX_CONF_MII;
377
378 ifp = &sc->sc_ethercom.ec_if;
379
380 /*
381 * Initialize our media structures and MII info. We'll
382 * probe the MII if we discover that we have one.
383 */
384 sc->ex_mii.mii_ifp = ifp;
385 sc->ex_mii.mii_readreg = ex_mii_readreg;
386 sc->ex_mii.mii_writereg = ex_mii_writereg;
387 sc->ex_mii.mii_statchg = ex_mii_statchg;
388 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
389 ex_media_stat);
390
391 if (sc->ex_conf & EX_CONF_MII) {
392 /*
393 * Find PHY, extract media information from it.
394 * First, select the right transceiver.
395 */
396 u_int32_t icfg;
397
398 GO_WINDOW(3);
399 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
400 icfg &= ~(CONFIG_XCVR_SEL << 16);
401 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
402 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
403 if (val & ELINK_MEDIACAP_100BASETX)
404 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
405 if (val & ELINK_MEDIACAP_100BASEFX)
406 icfg |= ELINKMEDIA_100BASE_FX
407 << (CONFIG_XCVR_SEL_SHIFT + 16);
408 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
409
410 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
411 MII_PHY_ANY, MII_OFFSET_ANY, 0);
412 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
413 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
414 0, NULL);
415 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
416 } else {
417 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
418 }
419 } else
420 ex_probemedia(sc);
421
422 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
423 ifp->if_softc = sc;
424 ifp->if_start = ex_start;
425 ifp->if_ioctl = ex_ioctl;
426 ifp->if_watchdog = ex_watchdog;
427 ifp->if_flags =
428 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
429
430 if_attach(ifp);
431 ether_ifattach(ifp, macaddr);
432
433 GO_WINDOW(1);
434
435 sc->tx_start_thresh = 20;
436 sc->tx_succ_ok = 0;
437
438 /* TODO: set queues to 0 */
439
440 #if NBPFILTER > 0
441 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
442 sizeof(struct ether_header));
443 #endif
444
445 #if NRND > 0
446 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
447 RND_TYPE_NET, 0);
448 #endif
449
450 /* Establish callback to reset card when we reboot. */
451 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
452
453 /* The attach is successful. */
454 sc->ex_flags |= EX_FLAGS_ATTACHED;
455 return;
456
457 fail:
458 /*
459 * Free any resources we've allocated during the failed attach
460 * attempt. Do this in reverse order and fall though.
461 */
462 switch (attach_stage) {
463 case 11:
464 {
465 struct ex_rxdesc *rxd;
466
467 for (i = 0; i < EX_NUPD; i++) {
468 rxd = &sc->sc_rxdescs[i];
469 if (rxd->rx_mbhead != NULL) {
470 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
471 m_freem(rxd->rx_mbhead);
472 }
473 }
474 }
475 /* FALLTHROUGH */
476
477 case 10:
478 for (i = 0; i < EX_NUPD; i++)
479 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
480 /* FALLTHROUGH */
481
482 case 9:
483 for (i = 0; i < EX_NDPD; i++)
484 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
485 /* FALLTHROUGH */
486 case 8:
487 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
488 /* FALLTHROUGH */
489
490 case 7:
491 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
492 /* FALLTHROUGH */
493
494 case 6:
495 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
496 EX_NDPD * sizeof (struct ex_dpd));
497 /* FALLTHROUGH */
498
499 case 5:
500 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
501 break;
502
503 case 4:
504 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
505 /* FALLTHROUGH */
506
507 case 3:
508 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
509 /* FALLTHROUGH */
510
511 case 2:
512 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
513 EX_NUPD * sizeof (struct ex_upd));
514 /* FALLTHROUGH */
515
516 case 1:
517 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
518 break;
519 }
520
521 }
522
523 /*
524 * Find the media present on non-MII chips.
525 */
526 void
527 ex_probemedia(sc)
528 struct ex_softc *sc;
529 {
530 bus_space_tag_t iot = sc->sc_iot;
531 bus_space_handle_t ioh = sc->sc_ioh;
532 struct ifmedia *ifm = &sc->ex_mii.mii_media;
533 struct ex_media *exm;
534 u_int16_t config1, reset_options, default_media;
535 int defmedia = 0;
536 const char *sep = "", *defmedianame = NULL;
537
538 GO_WINDOW(3);
539 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
540 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
541 GO_WINDOW(0);
542
543 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
544
545 printf("%s: ", sc->sc_dev.dv_xname);
546
547 /* Sanity check that there are any media! */
548 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
549 printf("no media present!\n");
550 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
551 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
552 return;
553 }
554
555 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
556
557 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
558 if (reset_options & exm->exm_mpbit) {
559 /*
560 * Default media is a little complicated. We
561 * support full-duplex which uses the same
562 * reset options bit.
563 *
564 * XXX Check EEPROM for default to FDX?
565 */
566 if (exm->exm_epmedia == default_media) {
567 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
568 defmedia = exm->exm_ifmedia;
569 defmedianame = exm->exm_name;
570 }
571 } else if (defmedia == 0) {
572 defmedia = exm->exm_ifmedia;
573 defmedianame = exm->exm_name;
574 }
575 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
576 NULL);
577 PRINT(exm->exm_name);
578 }
579 }
580
581 #undef PRINT
582
583 #ifdef DIAGNOSTIC
584 if (defmedia == 0)
585 panic("ex_probemedia: impossible");
586 #endif
587
588 printf(", default %s\n", defmedianame);
589 ifmedia_set(ifm, defmedia);
590 }
591
592 /*
593 * Bring device up.
594 */
595 void
596 ex_init(sc)
597 struct ex_softc *sc;
598 {
599 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
600 bus_space_tag_t iot = sc->sc_iot;
601 bus_space_handle_t ioh = sc->sc_ioh;
602 int s, i;
603
604 s = splnet();
605
606 ex_waitcmd(sc);
607 ex_stop(sc);
608
609 /*
610 * Set the station address and clear the station mask. The latter
611 * is needed for 90x cards, 0 is the default for 90xB cards.
612 */
613 GO_WINDOW(2);
614 for (i = 0; i < ETHER_ADDR_LEN; i++) {
615 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
616 LLADDR(ifp->if_sadl)[i]);
617 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
618 }
619
620 GO_WINDOW(3);
621
622 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
623 ex_waitcmd(sc);
624 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
625 ex_waitcmd(sc);
626
627 /*
628 * Disable reclaim threshold for 90xB, set free threshold to
629 * 6 * 256 = 1536 for 90x.
630 */
631 if (sc->ex_conf & EX_CONF_90XB)
632 bus_space_write_2(iot, ioh, ELINK_COMMAND,
633 ELINK_TXRECLTHRESH | 255);
634 else
635 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
636
637 bus_space_write_2(iot, ioh, ELINK_COMMAND,
638 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
639
640 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
641 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
642
643 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
644 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
645
646 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
647 if (sc->intr_ack)
648 (* sc->intr_ack)(sc);
649 ex_set_media(sc);
650 ex_set_mc(sc);
651
652
653 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
654 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
655 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
656 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
657 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
658
659 ifp->if_flags |= IFF_RUNNING;
660 ifp->if_flags &= ~IFF_OACTIVE;
661 ex_start(ifp);
662
663 GO_WINDOW(1);
664
665 splx(s);
666
667 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
668 }
669
670 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & 0xff)
671
672 /*
673 * Set multicast receive filter. Also take care of promiscuous mode
674 * here (XXX).
675 */
676 void
677 ex_set_mc(sc)
678 struct ex_softc *sc;
679 {
680 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
681 struct ethercom *ec = &sc->sc_ethercom;
682 struct ether_multi *enm;
683 struct ether_multistep estep;
684 int i;
685 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
686
687 if (ifp->if_flags & IFF_PROMISC)
688 mask |= FIL_PROMISC;
689
690 if (!(ifp->if_flags & IFF_MULTICAST))
691 goto out;
692
693 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
694 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
695 } else {
696 ETHER_FIRST_MULTI(estep, ec, enm);
697 while (enm != NULL) {
698 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
699 ETHER_ADDR_LEN) != 0)
700 goto out;
701 i = ex_mchash(enm->enm_addrlo);
702 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
703 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
704 ETHER_NEXT_MULTI(estep, enm);
705 }
706 mask |= FIL_MULTIHASH;
707 }
708 out:
709 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
710 SET_RX_FILTER | mask);
711 }
712
713
714 static void
715 ex_txstat(sc)
716 struct ex_softc *sc;
717 {
718 bus_space_tag_t iot = sc->sc_iot;
719 bus_space_handle_t ioh = sc->sc_ioh;
720 int i;
721
722 /*
723 * We need to read+write TX_STATUS until we get a 0 status
724 * in order to turn off the interrupt flag.
725 */
726 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
727 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
728
729 if (i & TXS_JABBER) {
730 ++sc->sc_ethercom.ec_if.if_oerrors;
731 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
732 printf("%s: jabber (%x)\n",
733 sc->sc_dev.dv_xname, i);
734 ex_init(sc);
735 /* TODO: be more subtle here */
736 } else if (i & TXS_UNDERRUN) {
737 ++sc->sc_ethercom.ec_if.if_oerrors;
738 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
739 printf("%s: fifo underrun (%x) @%d\n",
740 sc->sc_dev.dv_xname, i,
741 sc->tx_start_thresh);
742 if (sc->tx_succ_ok < 100)
743 sc->tx_start_thresh = min(ETHER_MAX_LEN,
744 sc->tx_start_thresh + 20);
745 sc->tx_succ_ok = 0;
746 ex_init(sc);
747 /* TODO: be more subtle here */
748 } else if (i & TXS_MAX_COLLISION) {
749 ++sc->sc_ethercom.ec_if.if_collisions;
750 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
751 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
752 } else
753 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
754 }
755 }
756
757 int
758 ex_media_chg(ifp)
759 struct ifnet *ifp;
760 {
761 struct ex_softc *sc = ifp->if_softc;
762
763 if (ifp->if_flags & IFF_UP)
764 ex_init(sc);
765 return 0;
766 }
767
768 void
769 ex_set_media(sc)
770 struct ex_softc *sc;
771 {
772 bus_space_tag_t iot = sc->sc_iot;
773 bus_space_handle_t ioh = sc->sc_ioh;
774 u_int32_t configreg;
775
776 if (((sc->ex_conf & EX_CONF_MII) &&
777 (sc->ex_mii.mii_media_active & IFM_FDX))
778 || (!(sc->ex_conf & EX_CONF_MII) &&
779 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
780 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
781 MAC_CONTROL_FDX);
782 } else {
783 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
784 }
785
786 /*
787 * If the device has MII, select it, and then tell the
788 * PHY which media to use.
789 */
790 if (sc->ex_conf & EX_CONF_MII) {
791 GO_WINDOW(3);
792
793 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
794
795 configreg &= ~(CONFIG_MEDIAMASK << 16);
796 configreg |= (ELINKMEDIA_MII << (CONFIG_MEDIAMASK_SHIFT + 16));
797
798 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
799 mii_mediachg(&sc->ex_mii);
800 return;
801 }
802
803 GO_WINDOW(4);
804 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
805 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
806 delay(800);
807
808 /*
809 * Now turn on the selected media/transceiver.
810 */
811 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
812 case IFM_10_T:
813 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
814 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
815 break;
816
817 case IFM_10_2:
818 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
819 DELAY(800);
820 break;
821
822 case IFM_100_TX:
823 case IFM_100_FX:
824 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
825 LINKBEAT_ENABLE);
826 DELAY(800);
827 break;
828
829 case IFM_10_5:
830 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
831 SQE_ENABLE);
832 DELAY(800);
833 break;
834
835 case IFM_MANUAL:
836 break;
837
838 case IFM_NONE:
839 return;
840
841 default:
842 panic("ex_set_media: impossible");
843 }
844
845 GO_WINDOW(3);
846 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
847
848 configreg &= ~(CONFIG_MEDIAMASK << 16);
849 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
850 (CONFIG_MEDIAMASK_SHIFT + 16));
851
852 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
853 }
854
855 /*
856 * Get currently-selected media from card.
857 * (if_media callback, may be called before interface is brought up).
858 */
859 void
860 ex_media_stat(ifp, req)
861 struct ifnet *ifp;
862 struct ifmediareq *req;
863 {
864 struct ex_softc *sc = ifp->if_softc;
865
866 if (sc->ex_conf & EX_CONF_MII) {
867 mii_pollstat(&sc->ex_mii);
868 req->ifm_status = sc->ex_mii.mii_media_status;
869 req->ifm_active = sc->ex_mii.mii_media_active;
870 } else {
871 GO_WINDOW(4);
872 req->ifm_status = IFM_AVALID;
873 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
874 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
875 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
876 req->ifm_status |= IFM_ACTIVE;
877 GO_WINDOW(1);
878 }
879 }
880
881
882
883 /*
884 * Start outputting on the interface.
885 */
886 static void
887 ex_start(ifp)
888 struct ifnet *ifp;
889 {
890 struct ex_softc *sc = ifp->if_softc;
891 bus_space_tag_t iot = sc->sc_iot;
892 bus_space_handle_t ioh = sc->sc_ioh;
893 volatile struct ex_fraghdr *fr = NULL;
894 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
895 struct ex_txdesc *txp;
896 bus_dmamap_t dmamap;
897 int offset, totlen;
898
899 if (sc->tx_head || sc->tx_free == NULL)
900 return;
901
902 txp = NULL;
903
904 /*
905 * We're finished if there is nothing more to add to the list or if
906 * we're all filled up with buffers to transmit.
907 */
908 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
909 struct mbuf *mb_head;
910 int segment, error;
911
912 /*
913 * Grab a packet to transmit.
914 */
915 IF_DEQUEUE(&ifp->if_snd, mb_head);
916
917 /*
918 * Get pointer to next available tx desc.
919 */
920 txp = sc->tx_free;
921 sc->tx_free = txp->tx_next;
922 txp->tx_next = NULL;
923 dmamap = txp->tx_dmamap;
924
925 /*
926 * Go through each of the mbufs in the chain and initialize
927 * the transmit buffer descriptors with the physical address
928 * and size of the mbuf.
929 */
930 reload:
931 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
932 mb_head, BUS_DMA_NOWAIT);
933 switch (error) {
934 case 0:
935 /* Success. */
936 break;
937
938 case EFBIG:
939 {
940 struct mbuf *mn;
941
942 /*
943 * We ran out of segments. We have to recopy this
944 * mbuf chain first. Bail out if we can't get the
945 * new buffers.
946 */
947 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
948
949 MGETHDR(mn, M_DONTWAIT, MT_DATA);
950 if (mn == NULL) {
951 m_freem(mb_head);
952 printf("aborting\n");
953 goto out;
954 }
955 if (mb_head->m_pkthdr.len > MHLEN) {
956 MCLGET(mn, M_DONTWAIT);
957 if ((mn->m_flags & M_EXT) == 0) {
958 m_freem(mn);
959 m_freem(mb_head);
960 printf("aborting\n");
961 goto out;
962 }
963 }
964 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
965 mtod(mn, caddr_t));
966 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
967 m_freem(mb_head);
968 mb_head = mn;
969 printf("retrying\n");
970 goto reload;
971 }
972
973 default:
974 /*
975 * Some other problem; report it.
976 */
977 printf("%s: can't load mbuf chain, error = %d\n",
978 sc->sc_dev.dv_xname, error);
979 m_freem(mb_head);
980 goto out;
981 }
982
983 fr = &txp->tx_dpd->dpd_frags[0];
984 totlen = 0;
985 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
986 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
987 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
988 totlen += dmamap->dm_segs[segment].ds_len;
989 }
990 fr--;
991 fr->fr_len |= htole32(EX_FR_LAST);
992 txp->tx_mbhead = mb_head;
993
994 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
995 BUS_DMASYNC_PREWRITE);
996
997 dpd = txp->tx_dpd;
998 dpd->dpd_nextptr = 0;
999 dpd->dpd_fsh = htole32(totlen);
1000
1001 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1002 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1003 sizeof (struct ex_dpd),
1004 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1005
1006 /*
1007 * No need to stall the download engine, we know it's
1008 * not busy right now.
1009 *
1010 * Fix up pointers in both the "soft" tx and the physical
1011 * tx list.
1012 */
1013 if (sc->tx_head != NULL) {
1014 prevdpd = sc->tx_tail->tx_dpd;
1015 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1016 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1017 offset, sizeof (struct ex_dpd),
1018 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1019 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1020 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1021 offset, sizeof (struct ex_dpd),
1022 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1023 sc->tx_tail->tx_next = txp;
1024 sc->tx_tail = txp;
1025 } else {
1026 sc->tx_tail = sc->tx_head = txp;
1027 }
1028
1029 #if NBPFILTER > 0
1030 /*
1031 * Pass packet to bpf if there is a listener.
1032 */
1033 if (ifp->if_bpf)
1034 bpf_mtap(ifp->if_bpf, mb_head);
1035 #endif
1036 }
1037 out:
1038 if (sc->tx_head) {
1039 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1040 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1041 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1042 sizeof (struct ex_dpd),
1043 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1044 ifp->if_flags |= IFF_OACTIVE;
1045 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1046 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1047 DPD_DMADDR(sc, sc->tx_head));
1048
1049 /* trigger watchdog */
1050 ifp->if_timer = 5;
1051 }
1052 }
1053
1054
1055 int
1056 ex_intr(arg)
1057 void *arg;
1058 {
1059 struct ex_softc *sc = arg;
1060 bus_space_tag_t iot = sc->sc_iot;
1061 bus_space_handle_t ioh = sc->sc_ioh;
1062 u_int16_t stat;
1063 int ret = 0;
1064 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1065
1066 if (sc->enabled == 0 ||
1067 (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1068 return (0);
1069
1070 for (;;) {
1071 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1072
1073 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1074
1075 if ((stat & S_MASK) == 0) {
1076 if ((stat & S_INTR_LATCH) == 0) {
1077 #if 0
1078 printf("%s: intr latch cleared\n",
1079 sc->sc_dev.dv_xname);
1080 #endif
1081 break;
1082 }
1083 }
1084
1085 ret = 1;
1086
1087 /*
1088 * Acknowledge interrupts.
1089 */
1090 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1091 (stat & S_MASK));
1092 if (sc->intr_ack)
1093 (*sc->intr_ack)(sc);
1094
1095 if (stat & S_HOST_ERROR) {
1096 printf("%s: adapter failure (%x)\n",
1097 sc->sc_dev.dv_xname, stat);
1098 ex_reset(sc);
1099 ex_init(sc);
1100 return 1;
1101 }
1102 if (stat & S_TX_COMPLETE) {
1103 ex_txstat(sc);
1104 }
1105 if (stat & S_UPD_STATS) {
1106 ex_getstats(sc);
1107 }
1108 if (stat & S_DN_COMPLETE) {
1109 struct ex_txdesc *txp, *ptxp = NULL;
1110 bus_dmamap_t txmap;
1111
1112 /* reset watchdog timer, was set in ex_start() */
1113 ifp->if_timer = 0;
1114
1115 for (txp = sc->tx_head; txp != NULL;
1116 txp = txp->tx_next) {
1117 bus_dmamap_sync(sc->sc_dmat,
1118 sc->sc_dpd_dmamap,
1119 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1120 sizeof (struct ex_dpd),
1121 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1122 if (txp->tx_mbhead != NULL) {
1123 txmap = txp->tx_dmamap;
1124 bus_dmamap_sync(sc->sc_dmat, txmap,
1125 0, txmap->dm_mapsize,
1126 BUS_DMASYNC_POSTWRITE);
1127 bus_dmamap_unload(sc->sc_dmat, txmap);
1128 m_freem(txp->tx_mbhead);
1129 txp->tx_mbhead = NULL;
1130 }
1131 ptxp = txp;
1132 }
1133
1134 /*
1135 * Move finished tx buffers back to the tx free list.
1136 */
1137 if (sc->tx_free) {
1138 sc->tx_ftail->tx_next = sc->tx_head;
1139 sc->tx_ftail = ptxp;
1140 } else
1141 sc->tx_ftail = sc->tx_free = sc->tx_head;
1142
1143 sc->tx_head = sc->tx_tail = NULL;
1144 ifp->if_flags &= ~IFF_OACTIVE;
1145 }
1146
1147 if (stat & S_UP_COMPLETE) {
1148 struct ex_rxdesc *rxd;
1149 struct mbuf *m;
1150 struct ex_upd *upd;
1151 bus_dmamap_t rxmap;
1152 u_int32_t pktstat;
1153
1154 rcvloop:
1155 rxd = sc->rx_head;
1156 rxmap = rxd->rx_dmamap;
1157 m = rxd->rx_mbhead;
1158 upd = rxd->rx_upd;
1159
1160 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1161 rxmap->dm_mapsize,
1162 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1163 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1164 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1165 sizeof (struct ex_upd),
1166 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1167 pktstat = le32toh(upd->upd_pktstatus);
1168
1169 if (pktstat & EX_UPD_COMPLETE) {
1170 /*
1171 * Remove first packet from the chain.
1172 */
1173 sc->rx_head = rxd->rx_next;
1174 rxd->rx_next = NULL;
1175
1176 /*
1177 * Add a new buffer to the receive chain.
1178 * If this fails, the old buffer is recycled
1179 * instead.
1180 */
1181 if (ex_add_rxbuf(sc, rxd) == 0) {
1182 struct ether_header *eh;
1183 u_int16_t total_len;
1184
1185
1186 if (pktstat & EX_UPD_ERR) {
1187 ifp->if_ierrors++;
1188 m_freem(m);
1189 goto rcvloop;
1190 }
1191
1192 total_len = pktstat & EX_UPD_PKTLENMASK;
1193 if (total_len <
1194 sizeof(struct ether_header)) {
1195 m_freem(m);
1196 goto rcvloop;
1197 }
1198 m->m_pkthdr.rcvif = ifp;
1199 m->m_pkthdr.len = m->m_len = total_len;
1200 eh = mtod(m, struct ether_header *);
1201 #if NBPFILTER > 0
1202 if (ifp->if_bpf) {
1203 bpf_tap(ifp->if_bpf,
1204 mtod(m, caddr_t),
1205 total_len);
1206 /*
1207 * Only pass this packet up
1208 * if it is for us.
1209 */
1210 if ((ifp->if_flags &
1211 IFF_PROMISC) &&
1212 (eh->ether_dhost[0] & 1)
1213 == 0 &&
1214 bcmp(eh->ether_dhost,
1215 LLADDR(ifp->if_sadl),
1216 sizeof(eh->ether_dhost))
1217 != 0) {
1218 m_freem(m);
1219 goto rcvloop;
1220 }
1221 }
1222 #endif /* NBPFILTER > 0 */
1223 (*ifp->if_input)(ifp, m);
1224 }
1225 goto rcvloop;
1226 }
1227 /*
1228 * Just in case we filled up all UPDs and the DMA engine
1229 * stalled. We could be more subtle about this.
1230 */
1231 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1232 printf("%s: uplistptr was 0\n",
1233 sc->sc_dev.dv_xname);
1234 ex_init(sc);
1235 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1236 & 0x2000) {
1237 printf("%s: receive stalled\n",
1238 sc->sc_dev.dv_xname);
1239 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1240 ELINK_UPUNSTALL);
1241 }
1242 }
1243 }
1244
1245 /* no more interrupts */
1246 if (ret && ifp->if_snd.ifq_head)
1247 ex_start(ifp);
1248 return ret;
1249 }
1250
1251 int
1252 ex_ioctl(ifp, cmd, data)
1253 struct ifnet *ifp;
1254 u_long cmd;
1255 caddr_t data;
1256 {
1257 struct ex_softc *sc = ifp->if_softc;
1258 struct ifaddr *ifa = (struct ifaddr *)data;
1259 struct ifreq *ifr = (struct ifreq *)data;
1260 int s, error = 0;
1261
1262 s = splnet();
1263
1264 switch (cmd) {
1265
1266 case SIOCSIFADDR:
1267 ifp->if_flags |= IFF_UP;
1268 switch (ifa->ifa_addr->sa_family) {
1269 #ifdef INET
1270 case AF_INET:
1271 ex_init(sc);
1272 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1273 break;
1274 #endif
1275 #ifdef NS
1276 case AF_NS:
1277 {
1278 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1279
1280 if (ns_nullhost(*ina))
1281 ina->x_host = *(union ns_host *)
1282 LLADDR(ifp->if_sadl);
1283 else
1284 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1285 ifp->if_addrlen);
1286 /* Set new address. */
1287 ex_init(sc);
1288 break;
1289 }
1290 #endif
1291 default:
1292 ex_init(sc);
1293 break;
1294 }
1295 break;
1296 case SIOCSIFMEDIA:
1297 case SIOCGIFMEDIA:
1298 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1299 break;
1300
1301 case SIOCSIFFLAGS:
1302 if ((ifp->if_flags & IFF_UP) == 0 &&
1303 (ifp->if_flags & IFF_RUNNING) != 0) {
1304 /*
1305 * If interface is marked down and it is running, then
1306 * stop it.
1307 */
1308 ex_stop(sc);
1309 ifp->if_flags &= ~IFF_RUNNING;
1310 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1311 (ifp->if_flags & IFF_RUNNING) == 0) {
1312 /*
1313 * If interface is marked up and it is stopped, then
1314 * start it.
1315 */
1316 ex_init(sc);
1317 } else if ((ifp->if_flags & IFF_UP) != 0) {
1318 /*
1319 * Deal with other flags that change hardware
1320 * state, i.e. IFF_PROMISC.
1321 */
1322 ex_set_mc(sc);
1323 }
1324 break;
1325
1326 case SIOCADDMULTI:
1327 case SIOCDELMULTI:
1328 error = (cmd == SIOCADDMULTI) ?
1329 ether_addmulti(ifr, &sc->sc_ethercom) :
1330 ether_delmulti(ifr, &sc->sc_ethercom);
1331
1332 if (error == ENETRESET) {
1333 /*
1334 * Multicast list has changed; set the hardware filter
1335 * accordingly.
1336 */
1337 ex_set_mc(sc);
1338 error = 0;
1339 }
1340 break;
1341
1342 default:
1343 error = EINVAL;
1344 break;
1345 }
1346
1347 splx(s);
1348 return (error);
1349 }
1350
1351 void
1352 ex_getstats(sc)
1353 struct ex_softc *sc;
1354 {
1355 bus_space_handle_t ioh = sc->sc_ioh;
1356 bus_space_tag_t iot = sc->sc_iot;
1357 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1358 u_int8_t upperok;
1359
1360 GO_WINDOW(6);
1361 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1362 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1363 ifp->if_ipackets += (upperok & 0x03) << 8;
1364 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1365 ifp->if_opackets += (upperok & 0x30) << 4;
1366 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1367 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1368 /*
1369 * There seems to be no way to get the exact number of collisions,
1370 * this is the number that occured at the very least.
1371 */
1372 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1373 TX_AFTER_X_COLLISIONS);
1374 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1375 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1376
1377 /*
1378 * Clear the following to avoid stats overflow interrupts
1379 */
1380 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1381 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1382 bus_space_read_1(iot, ioh, TX_NO_SQE);
1383 bus_space_read_1(iot, ioh, TX_CD_LOST);
1384 GO_WINDOW(4);
1385 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1386 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1387 ifp->if_ibytes += (upperok & 0x0f) << 16;
1388 ifp->if_obytes += (upperok & 0xf0) << 12;
1389 GO_WINDOW(1);
1390 }
1391
1392 void
1393 ex_printstats(sc)
1394 struct ex_softc *sc;
1395 {
1396 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1397
1398 ex_getstats(sc);
1399 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1400 "%llu\n", (unsigned long long)ifp->if_ipackets,
1401 (unsigned long long)ifp->if_opackets,
1402 (unsigned long long)ifp->if_ierrors,
1403 (unsigned long long)ifp->if_oerrors,
1404 (unsigned long long)ifp->if_ibytes,
1405 (unsigned long long)ifp->if_obytes);
1406 }
1407
1408 void
1409 ex_tick(arg)
1410 void *arg;
1411 {
1412 struct ex_softc *sc = arg;
1413 int s;
1414
1415 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1416 return;
1417
1418 s = splnet();
1419
1420 if (sc->ex_conf & EX_CONF_MII)
1421 mii_tick(&sc->ex_mii);
1422
1423 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1424 & S_COMMAND_IN_PROGRESS))
1425 ex_getstats(sc);
1426
1427 splx(s);
1428
1429 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1430 }
1431
1432 void
1433 ex_reset(sc)
1434 struct ex_softc *sc;
1435 {
1436 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, GLOBAL_RESET);
1437 delay(400);
1438 ex_waitcmd(sc);
1439 }
1440
1441 void
1442 ex_watchdog(ifp)
1443 struct ifnet *ifp;
1444 {
1445 struct ex_softc *sc = ifp->if_softc;
1446
1447 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1448 ++sc->sc_ethercom.ec_if.if_oerrors;
1449
1450 ex_reset(sc);
1451 ex_init(sc);
1452 }
1453
1454 void
1455 ex_stop(sc)
1456 struct ex_softc *sc;
1457 {
1458 bus_space_tag_t iot = sc->sc_iot;
1459 bus_space_handle_t ioh = sc->sc_ioh;
1460 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1461 struct ex_txdesc *tx;
1462 struct ex_rxdesc *rx;
1463 int i;
1464
1465 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1466 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1467 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1468
1469 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1470 if (tx->tx_mbhead == NULL)
1471 continue;
1472 m_freem(tx->tx_mbhead);
1473 tx->tx_mbhead = NULL;
1474 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1475 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1476 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1477 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1478 sizeof (struct ex_dpd),
1479 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1480 }
1481 sc->tx_tail = sc->tx_head = NULL;
1482 ex_init_txdescs(sc);
1483
1484 sc->rx_tail = sc->rx_head = 0;
1485 for (i = 0; i < EX_NUPD; i++) {
1486 rx = &sc->sc_rxdescs[i];
1487 if (rx->rx_mbhead != NULL) {
1488 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1489 m_freem(rx->rx_mbhead);
1490 rx->rx_mbhead = NULL;
1491 }
1492 ex_add_rxbuf(sc, rx);
1493 }
1494
1495 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1496
1497 callout_stop(&sc->ex_mii_callout);
1498 if (sc->ex_conf & EX_CONF_MII)
1499 mii_down(&sc->ex_mii);
1500
1501 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1502 ifp->if_timer = 0;
1503 }
1504
1505 static void
1506 ex_init_txdescs(sc)
1507 struct ex_softc *sc;
1508 {
1509 int i;
1510
1511 for (i = 0; i < EX_NDPD; i++) {
1512 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1513 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1514 if (i < EX_NDPD - 1)
1515 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1516 else
1517 sc->sc_txdescs[i].tx_next = NULL;
1518 }
1519 sc->tx_free = &sc->sc_txdescs[0];
1520 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1521 }
1522
1523
1524 int
1525 ex_activate(self, act)
1526 struct device *self;
1527 enum devact act;
1528 {
1529 struct ex_softc *sc = (void *) self;
1530 int s, error = 0;
1531
1532 s = splnet();
1533 switch (act) {
1534 case DVACT_ACTIVATE:
1535 error = EOPNOTSUPP;
1536 break;
1537
1538 case DVACT_DEACTIVATE:
1539 if (sc->ex_conf & EX_CONF_MII)
1540 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1541 MII_OFFSET_ANY);
1542 if_deactivate(&sc->sc_ethercom.ec_if);
1543 break;
1544 }
1545 splx(s);
1546
1547 return (error);
1548 }
1549
1550 int
1551 ex_detach(sc)
1552 struct ex_softc *sc;
1553 {
1554 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1555 struct ex_rxdesc *rxd;
1556 int i;
1557
1558 /* Succeed now if there's no work to do. */
1559 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1560 return (0);
1561
1562 /* Unhook our tick handler. */
1563 callout_stop(&sc->ex_mii_callout);
1564
1565 if (sc->ex_conf & EX_CONF_MII) {
1566 /* Detach all PHYs */
1567 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1568 }
1569
1570 /* Delete all remaining media. */
1571 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1572
1573 #if NRND > 0
1574 rnd_detach_source(&sc->rnd_source);
1575 #endif
1576 #if NBPFILTER > 0
1577 bpfdetach(ifp);
1578 #endif
1579 ether_ifdetach(ifp);
1580 if_detach(ifp);
1581
1582 for (i = 0; i < EX_NUPD; i++) {
1583 rxd = &sc->sc_rxdescs[i];
1584 if (rxd->rx_mbhead != NULL) {
1585 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1586 m_freem(rxd->rx_mbhead);
1587 rxd->rx_mbhead = NULL;
1588 }
1589 }
1590 for (i = 0; i < EX_NUPD; i++)
1591 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1592 for (i = 0; i < EX_NDPD; i++)
1593 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1594 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1595 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1596 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1597 EX_NDPD * sizeof (struct ex_dpd));
1598 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1599 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1600 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1601 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1602 EX_NUPD * sizeof (struct ex_upd));
1603 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1604
1605 shutdownhook_disestablish(sc->sc_sdhook);
1606
1607 return (0);
1608 }
1609
1610 /*
1611 * Before reboots, reset card completely.
1612 */
1613 static void
1614 ex_shutdown(arg)
1615 void *arg;
1616 {
1617 struct ex_softc *sc = arg;
1618
1619 ex_stop(sc);
1620 }
1621
1622 /*
1623 * Read EEPROM data.
1624 * XXX what to do if EEPROM doesn't unbusy?
1625 */
1626 u_int16_t
1627 ex_read_eeprom(sc, offset)
1628 struct ex_softc *sc;
1629 int offset;
1630 {
1631 bus_space_tag_t iot = sc->sc_iot;
1632 bus_space_handle_t ioh = sc->sc_ioh;
1633 u_int16_t data = 0;
1634
1635 GO_WINDOW(0);
1636 if (ex_eeprom_busy(sc))
1637 goto out;
1638 switch (sc->ex_bustype) {
1639 case EX_BUS_PCI:
1640 bus_space_write_1(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1641 READ_EEPROM | (offset & 0x3f));
1642 break;
1643 case EX_BUS_CARDBUS:
1644 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1645 0x230 + (offset & 0x3f));
1646 break;
1647 }
1648 if (ex_eeprom_busy(sc))
1649 goto out;
1650 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1651 out:
1652 return data;
1653 }
1654
1655 static int
1656 ex_eeprom_busy(sc)
1657 struct ex_softc *sc;
1658 {
1659 bus_space_tag_t iot = sc->sc_iot;
1660 bus_space_handle_t ioh = sc->sc_ioh;
1661 int i = 100;
1662
1663 while (i--) {
1664 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1665 EEPROM_BUSY))
1666 return 0;
1667 delay(100);
1668 }
1669 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1670 return (1);
1671 }
1672
1673 /*
1674 * Create a new rx buffer and add it to the 'soft' rx list.
1675 */
1676 static int
1677 ex_add_rxbuf(sc, rxd)
1678 struct ex_softc *sc;
1679 struct ex_rxdesc *rxd;
1680 {
1681 struct mbuf *m, *oldm;
1682 bus_dmamap_t rxmap;
1683 int error, rval = 0;
1684
1685 oldm = rxd->rx_mbhead;
1686 rxmap = rxd->rx_dmamap;
1687
1688 MGETHDR(m, M_DONTWAIT, MT_DATA);
1689 if (m != NULL) {
1690 MCLGET(m, M_DONTWAIT);
1691 if ((m->m_flags & M_EXT) == 0) {
1692 m_freem(m);
1693 if (oldm == NULL)
1694 return 1;
1695 m = oldm;
1696 m->m_data = m->m_ext.ext_buf;
1697 rval = 1;
1698 }
1699 } else {
1700 if (oldm == NULL)
1701 return 1;
1702 m = oldm;
1703 m->m_data = m->m_ext.ext_buf;
1704 rval = 1;
1705 }
1706
1707 /*
1708 * Setup the DMA map for this receive buffer.
1709 */
1710 if (m != oldm) {
1711 if (oldm != NULL)
1712 bus_dmamap_unload(sc->sc_dmat, rxmap);
1713 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1714 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1715 if (error) {
1716 printf("%s: can't load rx buffer, error = %d\n",
1717 sc->sc_dev.dv_xname, error);
1718 panic("ex_add_rxbuf"); /* XXX */
1719 }
1720 }
1721
1722 /*
1723 * Align for data after 14 byte header.
1724 */
1725 m->m_data += 2;
1726
1727 rxd->rx_mbhead = m;
1728 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1729 rxd->rx_upd->upd_frags[0].fr_addr =
1730 htole32(rxmap->dm_segs[0].ds_addr + 2);
1731 rxd->rx_upd->upd_nextptr = 0;
1732
1733 /*
1734 * Attach it to the end of the list.
1735 */
1736 if (sc->rx_head != NULL) {
1737 sc->rx_tail->rx_next = rxd;
1738 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1739 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1740 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1741 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1742 sizeof (struct ex_upd),
1743 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1744 } else {
1745 sc->rx_head = rxd;
1746 }
1747 sc->rx_tail = rxd;
1748
1749 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1750 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1751 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1752 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1753 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1754 return (rval);
1755 }
1756
1757 u_int32_t
1758 ex_mii_bitbang_read(self)
1759 struct device *self;
1760 {
1761 struct ex_softc *sc = (void *) self;
1762
1763 /* We're already in Window 4. */
1764 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1765 }
1766
1767 void
1768 ex_mii_bitbang_write(self, val)
1769 struct device *self;
1770 u_int32_t val;
1771 {
1772 struct ex_softc *sc = (void *) self;
1773
1774 /* We're already in Window 4. */
1775 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1776 }
1777
1778 int
1779 ex_mii_readreg(v, phy, reg)
1780 struct device *v;
1781 int phy, reg;
1782 {
1783 struct ex_softc *sc = (struct ex_softc *)v;
1784 int val;
1785
1786 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1787 return 0;
1788
1789 GO_WINDOW(4);
1790
1791 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1792
1793 GO_WINDOW(1);
1794
1795 return (val);
1796 }
1797
1798 void
1799 ex_mii_writereg(v, phy, reg, data)
1800 struct device *v;
1801 int phy;
1802 int reg;
1803 int data;
1804 {
1805 struct ex_softc *sc = (struct ex_softc *)v;
1806
1807 GO_WINDOW(4);
1808
1809 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1810
1811 GO_WINDOW(1);
1812 }
1813
1814 void
1815 ex_mii_statchg(v)
1816 struct device *v;
1817 {
1818 struct ex_softc *sc = (struct ex_softc *)v;
1819 bus_space_tag_t iot = sc->sc_iot;
1820 bus_space_handle_t ioh = sc->sc_ioh;
1821 int mctl;
1822
1823 GO_WINDOW(3);
1824 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1825 if (sc->ex_mii.mii_media_active & IFM_FDX)
1826 mctl |= MAC_CONTROL_FDX;
1827 else
1828 mctl &= ~MAC_CONTROL_FDX;
1829 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1830 GO_WINDOW(1); /* back to operating window */
1831 }
1832