elinkxl.c revision 1.15.2.1 1 /* $NetBSD: elinkxl.c,v 1.15.2.1 2000/11/20 11:40:33 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/callout.h>
47 #include <sys/kernel.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/ioctl.h>
51 #include <sys/errno.h>
52 #include <sys/syslog.h>
53 #include <sys/select.h>
54 #include <sys/device.h>
55 #if NRND > 0
56 #include <sys/rnd.h>
57 #endif
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_ether.h>
62 #include <net/if_media.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/if_inarp.h>
70 #endif
71
72 #ifdef NS
73 #include <netns/ns.h>
74 #include <netns/ns_if.h>
75 #endif
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #include <net/bpfdesc.h>
80 #endif
81
82 #include <machine/cpu.h>
83 #include <machine/bus.h>
84 #include <machine/intr.h>
85 #include <machine/endian.h>
86
87 #include <dev/mii/miivar.h>
88 #include <dev/mii/mii.h>
89 #include <dev/mii/mii_bitbang.h>
90
91 #include <dev/ic/elink3reg.h>
92 /* #include <dev/ic/elink3var.h> */
93 #include <dev/ic/elinkxlreg.h>
94 #include <dev/ic/elinkxlvar.h>
95
96 #ifdef DEBUG
97 int exdebug = 0;
98 #endif
99
100 /* ifmedia callbacks */
101 int ex_media_chg __P((struct ifnet *ifp));
102 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
103
104 void ex_probe_media __P((struct ex_softc *));
105 void ex_set_filter __P((struct ex_softc *));
106 void ex_set_media __P((struct ex_softc *));
107 struct mbuf *ex_get __P((struct ex_softc *, int));
108 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
109 int ex_init __P((struct ifnet *));
110 void ex_read __P((struct ex_softc *));
111 void ex_reset __P((struct ex_softc *));
112 void ex_set_mc __P((struct ex_softc *));
113 void ex_getstats __P((struct ex_softc *));
114 void ex_printstats __P((struct ex_softc *));
115 void ex_tick __P((void *));
116
117 static int ex_eeprom_busy __P((struct ex_softc *));
118 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
119 static void ex_init_txdescs __P((struct ex_softc *));
120
121 static void ex_shutdown __P((void *));
122 static void ex_start __P((struct ifnet *));
123 static void ex_txstat __P((struct ex_softc *));
124
125 int ex_mii_readreg __P((struct device *, int, int));
126 void ex_mii_writereg __P((struct device *, int, int, int));
127 void ex_mii_statchg __P((struct device *));
128
129 void ex_probemedia __P((struct ex_softc *));
130
131 /*
132 * Structure to map media-present bits in boards to ifmedia codes and
133 * printable media names. Used for table-driven ifmedia initialization.
134 */
135 struct ex_media {
136 int exm_mpbit; /* media present bit */
137 const char *exm_name; /* name of medium */
138 int exm_ifmedia; /* ifmedia word for medium */
139 int exm_epmedia; /* ELINKMEDIA_* constant */
140 };
141
142 /*
143 * Media table for 3c90x chips. Note that chips with MII have no
144 * `native' media.
145 */
146 struct ex_media ex_native_media[] = {
147 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
148 ELINKMEDIA_10BASE_T },
149 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
150 ELINKMEDIA_10BASE_T },
151 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
152 ELINKMEDIA_AUI },
153 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
154 ELINKMEDIA_10BASE_2 },
155 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
156 ELINKMEDIA_100BASE_TX },
157 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
158 ELINKMEDIA_100BASE_TX },
159 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
160 ELINKMEDIA_100BASE_FX },
161 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
162 ELINKMEDIA_MII },
163 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
164 ELINKMEDIA_100BASE_T4 },
165 { 0, NULL, 0,
166 0 },
167 };
168
169 /*
170 * MII bit-bang glue.
171 */
172 u_int32_t ex_mii_bitbang_read __P((struct device *));
173 void ex_mii_bitbang_write __P((struct device *, u_int32_t));
174
175 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
176 ex_mii_bitbang_read,
177 ex_mii_bitbang_write,
178 {
179 ELINK_PHY_DATA, /* MII_BIT_MDO */
180 ELINK_PHY_DATA, /* MII_BIT_MDI */
181 ELINK_PHY_CLK, /* MII_BIT_MDC */
182 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
183 0, /* MII_BIT_DIR_PHY_HOST */
184 }
185 };
186
187 /*
188 * Back-end attach and configure.
189 */
190 void
191 ex_config(sc)
192 struct ex_softc *sc;
193 {
194 struct ifnet *ifp;
195 u_int16_t val;
196 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
197 bus_space_tag_t iot = sc->sc_iot;
198 bus_space_handle_t ioh = sc->sc_ioh;
199 int i, error, attach_stage;
200
201 callout_init(&sc->ex_mii_callout);
202
203 ex_reset(sc);
204
205 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
206 macaddr[0] = val >> 8;
207 macaddr[1] = val & 0xff;
208 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
209 macaddr[2] = val >> 8;
210 macaddr[3] = val & 0xff;
211 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
212 macaddr[4] = val >> 8;
213 macaddr[5] = val & 0xff;
214
215 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
216 ether_sprintf(macaddr));
217
218 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
219 GO_WINDOW(2);
220 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
221 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
222 val |= ELINK_RESET_OPT_LEDPOLAR;
223 if (sc->ex_conf & EX_CONF_PHY_POWER)
224 val |= ELINK_RESET_OPT_PHYPOWER;
225 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
226 }
227
228 attach_stage = 0;
229
230 /*
231 * Allocate the upload descriptors, and create and load the DMA
232 * map for them.
233 */
234 if ((error = bus_dmamem_alloc(sc->sc_dmat,
235 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &sc->sc_useg, 1,
236 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
237 printf("%s: can't allocate upload descriptors, error = %d\n",
238 sc->sc_dev.dv_xname, error);
239 goto fail;
240 }
241
242 attach_stage = 1;
243
244 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
245 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
246 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
247 printf("%s: can't map upload descriptors, error = %d\n",
248 sc->sc_dev.dv_xname, error);
249 goto fail;
250 }
251
252 attach_stage = 2;
253
254 if ((error = bus_dmamap_create(sc->sc_dmat,
255 EX_NUPD * sizeof (struct ex_upd), 1,
256 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
257 &sc->sc_upd_dmamap)) != 0) {
258 printf("%s: can't create upload desc. DMA map, error = %d\n",
259 sc->sc_dev.dv_xname, error);
260 goto fail;
261 }
262
263 attach_stage = 3;
264
265 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
266 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
267 BUS_DMA_NOWAIT)) != 0) {
268 printf("%s: can't load upload desc. DMA map, error = %d\n",
269 sc->sc_dev.dv_xname, error);
270 goto fail;
271 }
272
273 attach_stage = 4;
274
275 /*
276 * Allocate the download descriptors, and create and load the DMA
277 * map for them.
278 */
279 if ((error = bus_dmamem_alloc(sc->sc_dmat,
280 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &sc->sc_dseg, 1,
281 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
282 printf("%s: can't allocate download descriptors, error = %d\n",
283 sc->sc_dev.dv_xname, error);
284 goto fail;
285 }
286
287 attach_stage = 5;
288
289 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
290 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
291 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
292 printf("%s: can't map download descriptors, error = %d\n",
293 sc->sc_dev.dv_xname, error);
294 goto fail;
295 }
296 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
297
298 attach_stage = 6;
299
300 if ((error = bus_dmamap_create(sc->sc_dmat,
301 EX_NDPD * sizeof (struct ex_dpd), 1,
302 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
303 &sc->sc_dpd_dmamap)) != 0) {
304 printf("%s: can't create download desc. DMA map, error = %d\n",
305 sc->sc_dev.dv_xname, error);
306 goto fail;
307 }
308
309 attach_stage = 7;
310
311 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
312 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
313 BUS_DMA_NOWAIT)) != 0) {
314 printf("%s: can't load download desc. DMA map, error = %d\n",
315 sc->sc_dev.dv_xname, error);
316 goto fail;
317 }
318
319 attach_stage = 8;
320
321
322 /*
323 * Create the transmit buffer DMA maps.
324 */
325 for (i = 0; i < EX_NDPD; i++) {
326 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
327 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
328 &sc->sc_tx_dmamaps[i])) != 0) {
329 printf("%s: can't create tx DMA map %d, error = %d\n",
330 sc->sc_dev.dv_xname, i, error);
331 goto fail;
332 }
333 }
334
335 attach_stage = 9;
336
337 /*
338 * Create the receive buffer DMA maps.
339 */
340 for (i = 0; i < EX_NUPD; i++) {
341 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
342 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
343 &sc->sc_rx_dmamaps[i])) != 0) {
344 printf("%s: can't create rx DMA map %d, error = %d\n",
345 sc->sc_dev.dv_xname, i, error);
346 goto fail;
347 }
348 }
349
350 attach_stage = 10;
351
352 /*
353 * Create ring of upload descriptors, only once. The DMA engine
354 * will loop over this when receiving packets, stalling if it
355 * hits an UPD with a finished receive.
356 */
357 for (i = 0; i < EX_NUPD; i++) {
358 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
359 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
360 sc->sc_upd[i].upd_frags[0].fr_len =
361 htole32((MCLBYTES - 2) | EX_FR_LAST);
362 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
363 printf("%s: can't allocate or map rx buffers\n",
364 sc->sc_dev.dv_xname);
365 goto fail;
366 }
367 }
368
369 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
370 EX_NUPD * sizeof (struct ex_upd),
371 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
372
373 ex_init_txdescs(sc);
374
375 attach_stage = 11;
376
377
378 GO_WINDOW(3);
379 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
380 if (val & ELINK_MEDIACAP_MII)
381 sc->ex_conf |= EX_CONF_MII;
382
383 ifp = &sc->sc_ethercom.ec_if;
384
385 /*
386 * Initialize our media structures and MII info. We'll
387 * probe the MII if we discover that we have one.
388 */
389 sc->ex_mii.mii_ifp = ifp;
390 sc->ex_mii.mii_readreg = ex_mii_readreg;
391 sc->ex_mii.mii_writereg = ex_mii_writereg;
392 sc->ex_mii.mii_statchg = ex_mii_statchg;
393 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
394 ex_media_stat);
395
396 if (sc->ex_conf & EX_CONF_MII) {
397 /*
398 * Find PHY, extract media information from it.
399 * First, select the right transceiver.
400 */
401 u_int32_t icfg;
402
403 GO_WINDOW(3);
404 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
405 icfg &= ~(CONFIG_XCVR_SEL << 16);
406 if (val & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
407 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
408 if (val & ELINK_MEDIACAP_100BASETX)
409 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
410 if (val & ELINK_MEDIACAP_100BASEFX)
411 icfg |= ELINKMEDIA_100BASE_FX
412 << (CONFIG_XCVR_SEL_SHIFT + 16);
413 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
414
415 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
416 MII_PHY_ANY, MII_OFFSET_ANY, 0);
417 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
418 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
419 0, NULL);
420 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
421 } else {
422 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
423 }
424 } else
425 ex_probemedia(sc);
426
427 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
428 ifp->if_softc = sc;
429 ifp->if_start = ex_start;
430 ifp->if_ioctl = ex_ioctl;
431 ifp->if_watchdog = ex_watchdog;
432 ifp->if_init = ex_init;
433 ifp->if_stop = ex_stop;
434 ifp->if_flags =
435 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
436
437 /*
438 * We can support 802.1Q VLAN-sized frames.
439 */
440 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
441
442 if_attach(ifp);
443 ether_ifattach(ifp, macaddr);
444
445 GO_WINDOW(1);
446
447 sc->tx_start_thresh = 20;
448 sc->tx_succ_ok = 0;
449
450 /* TODO: set queues to 0 */
451
452 #if NBPFILTER > 0
453 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
454 sizeof(struct ether_header));
455 #endif
456
457 #if NRND > 0
458 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
459 RND_TYPE_NET, 0);
460 #endif
461
462 /* Establish callback to reset card when we reboot. */
463 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
464
465 /* The attach is successful. */
466 sc->ex_flags |= EX_FLAGS_ATTACHED;
467 return;
468
469 fail:
470 /*
471 * Free any resources we've allocated during the failed attach
472 * attempt. Do this in reverse order and fall though.
473 */
474 switch (attach_stage) {
475 case 11:
476 {
477 struct ex_rxdesc *rxd;
478
479 for (i = 0; i < EX_NUPD; i++) {
480 rxd = &sc->sc_rxdescs[i];
481 if (rxd->rx_mbhead != NULL) {
482 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
483 m_freem(rxd->rx_mbhead);
484 }
485 }
486 }
487 /* FALLTHROUGH */
488
489 case 10:
490 for (i = 0; i < EX_NUPD; i++)
491 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
492 /* FALLTHROUGH */
493
494 case 9:
495 for (i = 0; i < EX_NDPD; i++)
496 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
497 /* FALLTHROUGH */
498 case 8:
499 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
500 /* FALLTHROUGH */
501
502 case 7:
503 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
504 /* FALLTHROUGH */
505
506 case 6:
507 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
508 EX_NDPD * sizeof (struct ex_dpd));
509 /* FALLTHROUGH */
510
511 case 5:
512 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
513 break;
514
515 case 4:
516 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
517 /* FALLTHROUGH */
518
519 case 3:
520 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
521 /* FALLTHROUGH */
522
523 case 2:
524 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
525 EX_NUPD * sizeof (struct ex_upd));
526 /* FALLTHROUGH */
527
528 case 1:
529 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
530 break;
531 }
532
533 }
534
535 /*
536 * Find the media present on non-MII chips.
537 */
538 void
539 ex_probemedia(sc)
540 struct ex_softc *sc;
541 {
542 bus_space_tag_t iot = sc->sc_iot;
543 bus_space_handle_t ioh = sc->sc_ioh;
544 struct ifmedia *ifm = &sc->ex_mii.mii_media;
545 struct ex_media *exm;
546 u_int16_t config1, reset_options, default_media;
547 int defmedia = 0;
548 const char *sep = "", *defmedianame = NULL;
549
550 GO_WINDOW(3);
551 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
552 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
553 GO_WINDOW(0);
554
555 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
556
557 printf("%s: ", sc->sc_dev.dv_xname);
558
559 /* Sanity check that there are any media! */
560 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
561 printf("no media present!\n");
562 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
563 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
564 return;
565 }
566
567 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
568
569 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
570 if (reset_options & exm->exm_mpbit) {
571 /*
572 * Default media is a little complicated. We
573 * support full-duplex which uses the same
574 * reset options bit.
575 *
576 * XXX Check EEPROM for default to FDX?
577 */
578 if (exm->exm_epmedia == default_media) {
579 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
580 defmedia = exm->exm_ifmedia;
581 defmedianame = exm->exm_name;
582 }
583 } else if (defmedia == 0) {
584 defmedia = exm->exm_ifmedia;
585 defmedianame = exm->exm_name;
586 }
587 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
588 NULL);
589 PRINT(exm->exm_name);
590 }
591 }
592
593 #undef PRINT
594
595 #ifdef DIAGNOSTIC
596 if (defmedia == 0)
597 panic("ex_probemedia: impossible");
598 #endif
599
600 printf(", default %s\n", defmedianame);
601 ifmedia_set(ifm, defmedia);
602 }
603
604 /*
605 * Bring device up.
606 */
607 int
608 ex_init(ifp)
609 struct ifnet *ifp;
610 {
611 struct ex_softc *sc = ifp->if_softc;
612 bus_space_tag_t iot = sc->sc_iot;
613 bus_space_handle_t ioh = sc->sc_ioh;
614 int s, i;
615
616 s = splnet();
617
618 ex_waitcmd(sc);
619 ex_stop(ifp, 0);
620
621 /*
622 * Set the station address and clear the station mask. The latter
623 * is needed for 90x cards, 0 is the default for 90xB cards.
624 */
625 GO_WINDOW(2);
626 for (i = 0; i < ETHER_ADDR_LEN; i++) {
627 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
628 LLADDR(ifp->if_sadl)[i]);
629 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
630 }
631
632 GO_WINDOW(3);
633
634 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
635 ex_waitcmd(sc);
636 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
637 ex_waitcmd(sc);
638
639 /*
640 * Disable reclaim threshold for 90xB, set free threshold to
641 * 6 * 256 = 1536 for 90x.
642 */
643 if (sc->ex_conf & EX_CONF_90XB)
644 bus_space_write_2(iot, ioh, ELINK_COMMAND,
645 ELINK_TXRECLTHRESH | 255);
646 else
647 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
648
649 bus_space_write_2(iot, ioh, ELINK_COMMAND,
650 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
651
652 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
653 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
654
655 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
656 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
657
658 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
659 if (sc->intr_ack)
660 (* sc->intr_ack)(sc);
661 ex_set_media(sc);
662 ex_set_mc(sc);
663
664
665 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
666 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
667 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
668 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
669 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
670
671 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
672 u_int16_t cbcard_config;
673
674 GO_WINDOW(2);
675 cbcard_config = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 0x0c);
676 if (sc->ex_conf & EX_CONF_PHY_POWER) {
677 cbcard_config |= 0x4000; /* turn on PHY power */
678 }
679 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) {
680 cbcard_config |= 0x0010; /* invert LED polarity */
681 }
682 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 0x0c, cbcard_config);
683
684 GO_WINDOW(3);
685 }
686
687 ifp->if_flags |= IFF_RUNNING;
688 ifp->if_flags &= ~IFF_OACTIVE;
689 ex_start(ifp);
690
691 GO_WINDOW(1);
692
693 splx(s);
694
695 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
696
697 return (0);
698 }
699
700 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & 0xff)
701
702 /*
703 * Set multicast receive filter. Also take care of promiscuous mode
704 * here (XXX).
705 */
706 void
707 ex_set_mc(sc)
708 struct ex_softc *sc;
709 {
710 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
711 struct ethercom *ec = &sc->sc_ethercom;
712 struct ether_multi *enm;
713 struct ether_multistep estep;
714 int i;
715 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
716
717 if (ifp->if_flags & IFF_PROMISC)
718 mask |= FIL_PROMISC;
719
720 if (!(ifp->if_flags & IFF_MULTICAST))
721 goto out;
722
723 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
724 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
725 } else {
726 ETHER_FIRST_MULTI(estep, ec, enm);
727 while (enm != NULL) {
728 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
729 ETHER_ADDR_LEN) != 0)
730 goto out;
731 i = ex_mchash(enm->enm_addrlo);
732 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
733 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
734 ETHER_NEXT_MULTI(estep, enm);
735 }
736 mask |= FIL_MULTIHASH;
737 }
738 out:
739 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
740 SET_RX_FILTER | mask);
741 }
742
743
744 static void
745 ex_txstat(sc)
746 struct ex_softc *sc;
747 {
748 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
749 bus_space_tag_t iot = sc->sc_iot;
750 bus_space_handle_t ioh = sc->sc_ioh;
751 int i;
752
753 /*
754 * We need to read+write TX_STATUS until we get a 0 status
755 * in order to turn off the interrupt flag.
756 */
757 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
758 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
759
760 if (i & TXS_JABBER) {
761 ++sc->sc_ethercom.ec_if.if_oerrors;
762 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
763 printf("%s: jabber (%x)\n",
764 sc->sc_dev.dv_xname, i);
765 ex_init(ifp);
766 /* TODO: be more subtle here */
767 } else if (i & TXS_UNDERRUN) {
768 ++sc->sc_ethercom.ec_if.if_oerrors;
769 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
770 printf("%s: fifo underrun (%x) @%d\n",
771 sc->sc_dev.dv_xname, i,
772 sc->tx_start_thresh);
773 if (sc->tx_succ_ok < 100)
774 sc->tx_start_thresh = min(ETHER_MAX_LEN,
775 sc->tx_start_thresh + 20);
776 sc->tx_succ_ok = 0;
777 ex_init(ifp);
778 /* TODO: be more subtle here */
779 } else if (i & TXS_MAX_COLLISION) {
780 ++sc->sc_ethercom.ec_if.if_collisions;
781 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
782 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
783 } else
784 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
785 }
786 }
787
788 int
789 ex_media_chg(ifp)
790 struct ifnet *ifp;
791 {
792
793 if (ifp->if_flags & IFF_UP)
794 ex_init(ifp);
795 return 0;
796 }
797
798 void
799 ex_set_media(sc)
800 struct ex_softc *sc;
801 {
802 bus_space_tag_t iot = sc->sc_iot;
803 bus_space_handle_t ioh = sc->sc_ioh;
804 u_int32_t configreg;
805
806 if (((sc->ex_conf & EX_CONF_MII) &&
807 (sc->ex_mii.mii_media_active & IFM_FDX))
808 || (!(sc->ex_conf & EX_CONF_MII) &&
809 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
810 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
811 MAC_CONTROL_FDX);
812 } else {
813 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
814 }
815
816 /*
817 * If the device has MII, select it, and then tell the
818 * PHY which media to use.
819 */
820 if (sc->ex_conf & EX_CONF_MII) {
821 GO_WINDOW(3);
822
823 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
824
825 configreg &= ~(CONFIG_MEDIAMASK << 16);
826 configreg |= (ELINKMEDIA_MII << (CONFIG_MEDIAMASK_SHIFT + 16));
827
828 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
829 mii_mediachg(&sc->ex_mii);
830 return;
831 }
832
833 GO_WINDOW(4);
834 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
835 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
836 delay(800);
837
838 /*
839 * Now turn on the selected media/transceiver.
840 */
841 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
842 case IFM_10_T:
843 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
844 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
845 break;
846
847 case IFM_10_2:
848 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
849 DELAY(800);
850 break;
851
852 case IFM_100_TX:
853 case IFM_100_FX:
854 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
855 LINKBEAT_ENABLE);
856 DELAY(800);
857 break;
858
859 case IFM_10_5:
860 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
861 SQE_ENABLE);
862 DELAY(800);
863 break;
864
865 case IFM_MANUAL:
866 break;
867
868 case IFM_NONE:
869 return;
870
871 default:
872 panic("ex_set_media: impossible");
873 }
874
875 GO_WINDOW(3);
876 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
877
878 configreg &= ~(CONFIG_MEDIAMASK << 16);
879 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
880 (CONFIG_MEDIAMASK_SHIFT + 16));
881
882 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
883 }
884
885 /*
886 * Get currently-selected media from card.
887 * (if_media callback, may be called before interface is brought up).
888 */
889 void
890 ex_media_stat(ifp, req)
891 struct ifnet *ifp;
892 struct ifmediareq *req;
893 {
894 struct ex_softc *sc = ifp->if_softc;
895
896 if (sc->ex_conf & EX_CONF_MII) {
897 mii_pollstat(&sc->ex_mii);
898 req->ifm_status = sc->ex_mii.mii_media_status;
899 req->ifm_active = sc->ex_mii.mii_media_active;
900 } else {
901 GO_WINDOW(4);
902 req->ifm_status = IFM_AVALID;
903 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
904 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
905 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
906 req->ifm_status |= IFM_ACTIVE;
907 GO_WINDOW(1);
908 }
909 }
910
911
912
913 /*
914 * Start outputting on the interface.
915 */
916 static void
917 ex_start(ifp)
918 struct ifnet *ifp;
919 {
920 struct ex_softc *sc = ifp->if_softc;
921 bus_space_tag_t iot = sc->sc_iot;
922 bus_space_handle_t ioh = sc->sc_ioh;
923 volatile struct ex_fraghdr *fr = NULL;
924 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
925 struct ex_txdesc *txp;
926 bus_dmamap_t dmamap;
927 int offset, totlen;
928
929 if (sc->tx_head || sc->tx_free == NULL)
930 return;
931
932 txp = NULL;
933
934 /*
935 * We're finished if there is nothing more to add to the list or if
936 * we're all filled up with buffers to transmit.
937 */
938 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
939 struct mbuf *mb_head;
940 int segment, error;
941
942 /*
943 * Grab a packet to transmit.
944 */
945 IF_DEQUEUE(&ifp->if_snd, mb_head);
946
947 /*
948 * Get pointer to next available tx desc.
949 */
950 txp = sc->tx_free;
951 sc->tx_free = txp->tx_next;
952 txp->tx_next = NULL;
953 dmamap = txp->tx_dmamap;
954
955 /*
956 * Go through each of the mbufs in the chain and initialize
957 * the transmit buffer descriptors with the physical address
958 * and size of the mbuf.
959 */
960 reload:
961 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
962 mb_head, BUS_DMA_NOWAIT);
963 switch (error) {
964 case 0:
965 /* Success. */
966 break;
967
968 case EFBIG:
969 {
970 struct mbuf *mn;
971
972 /*
973 * We ran out of segments. We have to recopy this
974 * mbuf chain first. Bail out if we can't get the
975 * new buffers.
976 */
977 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
978
979 MGETHDR(mn, M_DONTWAIT, MT_DATA);
980 if (mn == NULL) {
981 m_freem(mb_head);
982 printf("aborting\n");
983 goto out;
984 }
985 if (mb_head->m_pkthdr.len > MHLEN) {
986 MCLGET(mn, M_DONTWAIT);
987 if ((mn->m_flags & M_EXT) == 0) {
988 m_freem(mn);
989 m_freem(mb_head);
990 printf("aborting\n");
991 goto out;
992 }
993 }
994 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
995 mtod(mn, caddr_t));
996 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
997 m_freem(mb_head);
998 mb_head = mn;
999 printf("retrying\n");
1000 goto reload;
1001 }
1002
1003 default:
1004 /*
1005 * Some other problem; report it.
1006 */
1007 printf("%s: can't load mbuf chain, error = %d\n",
1008 sc->sc_dev.dv_xname, error);
1009 m_freem(mb_head);
1010 goto out;
1011 }
1012
1013 fr = &txp->tx_dpd->dpd_frags[0];
1014 totlen = 0;
1015 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1016 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1017 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
1018 totlen += dmamap->dm_segs[segment].ds_len;
1019 }
1020 fr--;
1021 fr->fr_len |= htole32(EX_FR_LAST);
1022 txp->tx_mbhead = mb_head;
1023
1024 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1025 BUS_DMASYNC_PREWRITE);
1026
1027 dpd = txp->tx_dpd;
1028 dpd->dpd_nextptr = 0;
1029 dpd->dpd_fsh = htole32(totlen);
1030
1031 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1032 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1033 sizeof (struct ex_dpd),
1034 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1035
1036 /*
1037 * No need to stall the download engine, we know it's
1038 * not busy right now.
1039 *
1040 * Fix up pointers in both the "soft" tx and the physical
1041 * tx list.
1042 */
1043 if (sc->tx_head != NULL) {
1044 prevdpd = sc->tx_tail->tx_dpd;
1045 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1046 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1047 offset, sizeof (struct ex_dpd),
1048 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1049 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1050 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1051 offset, sizeof (struct ex_dpd),
1052 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1053 sc->tx_tail->tx_next = txp;
1054 sc->tx_tail = txp;
1055 } else {
1056 sc->tx_tail = sc->tx_head = txp;
1057 }
1058
1059 #if NBPFILTER > 0
1060 /*
1061 * Pass packet to bpf if there is a listener.
1062 */
1063 if (ifp->if_bpf)
1064 bpf_mtap(ifp->if_bpf, mb_head);
1065 #endif
1066 }
1067 out:
1068 if (sc->tx_head) {
1069 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1070 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1071 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1072 sizeof (struct ex_dpd),
1073 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1074 ifp->if_flags |= IFF_OACTIVE;
1075 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1076 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1077 DPD_DMADDR(sc, sc->tx_head));
1078
1079 /* trigger watchdog */
1080 ifp->if_timer = 5;
1081 }
1082 }
1083
1084
1085 int
1086 ex_intr(arg)
1087 void *arg;
1088 {
1089 struct ex_softc *sc = arg;
1090 bus_space_tag_t iot = sc->sc_iot;
1091 bus_space_handle_t ioh = sc->sc_ioh;
1092 u_int16_t stat;
1093 int ret = 0;
1094 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1095
1096 if (sc->enabled == 0 ||
1097 (sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1098 return (0);
1099
1100 for (;;) {
1101 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1102
1103 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1104
1105 if ((stat & S_MASK) == 0) {
1106 if ((stat & S_INTR_LATCH) == 0) {
1107 #if 0
1108 printf("%s: intr latch cleared\n",
1109 sc->sc_dev.dv_xname);
1110 #endif
1111 break;
1112 }
1113 }
1114
1115 ret = 1;
1116
1117 /*
1118 * Acknowledge interrupts.
1119 */
1120 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1121 (stat & S_MASK));
1122 if (sc->intr_ack)
1123 (*sc->intr_ack)(sc);
1124
1125 if (stat & S_HOST_ERROR) {
1126 printf("%s: adapter failure (%x)\n",
1127 sc->sc_dev.dv_xname, stat);
1128 ex_reset(sc);
1129 ex_init(ifp);
1130 return 1;
1131 }
1132 if (stat & S_TX_COMPLETE) {
1133 ex_txstat(sc);
1134 }
1135 if (stat & S_UPD_STATS) {
1136 ex_getstats(sc);
1137 }
1138 if (stat & S_DN_COMPLETE) {
1139 struct ex_txdesc *txp, *ptxp = NULL;
1140 bus_dmamap_t txmap;
1141
1142 /* reset watchdog timer, was set in ex_start() */
1143 ifp->if_timer = 0;
1144
1145 for (txp = sc->tx_head; txp != NULL;
1146 txp = txp->tx_next) {
1147 bus_dmamap_sync(sc->sc_dmat,
1148 sc->sc_dpd_dmamap,
1149 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1150 sizeof (struct ex_dpd),
1151 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1152 if (txp->tx_mbhead != NULL) {
1153 txmap = txp->tx_dmamap;
1154 bus_dmamap_sync(sc->sc_dmat, txmap,
1155 0, txmap->dm_mapsize,
1156 BUS_DMASYNC_POSTWRITE);
1157 bus_dmamap_unload(sc->sc_dmat, txmap);
1158 m_freem(txp->tx_mbhead);
1159 txp->tx_mbhead = NULL;
1160 }
1161 ptxp = txp;
1162 }
1163
1164 /*
1165 * Move finished tx buffers back to the tx free list.
1166 */
1167 if (sc->tx_free) {
1168 sc->tx_ftail->tx_next = sc->tx_head;
1169 sc->tx_ftail = ptxp;
1170 } else
1171 sc->tx_ftail = sc->tx_free = sc->tx_head;
1172
1173 sc->tx_head = sc->tx_tail = NULL;
1174 ifp->if_flags &= ~IFF_OACTIVE;
1175 }
1176
1177 if (stat & S_UP_COMPLETE) {
1178 struct ex_rxdesc *rxd;
1179 struct mbuf *m;
1180 struct ex_upd *upd;
1181 bus_dmamap_t rxmap;
1182 u_int32_t pktstat;
1183
1184 rcvloop:
1185 rxd = sc->rx_head;
1186 rxmap = rxd->rx_dmamap;
1187 m = rxd->rx_mbhead;
1188 upd = rxd->rx_upd;
1189
1190 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1191 rxmap->dm_mapsize,
1192 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1193 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1194 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1195 sizeof (struct ex_upd),
1196 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1197 pktstat = le32toh(upd->upd_pktstatus);
1198
1199 if (pktstat & EX_UPD_COMPLETE) {
1200 /*
1201 * Remove first packet from the chain.
1202 */
1203 sc->rx_head = rxd->rx_next;
1204 rxd->rx_next = NULL;
1205
1206 /*
1207 * Add a new buffer to the receive chain.
1208 * If this fails, the old buffer is recycled
1209 * instead.
1210 */
1211 if (ex_add_rxbuf(sc, rxd) == 0) {
1212 u_int16_t total_len;
1213
1214 if (pktstat &
1215 ((sc->sc_ethercom.ec_capenable &
1216 ETHERCAP_VLAN_MTU) ?
1217 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1218 ifp->if_ierrors++;
1219 m_freem(m);
1220 goto rcvloop;
1221 }
1222
1223 total_len = pktstat & EX_UPD_PKTLENMASK;
1224 if (total_len <
1225 sizeof(struct ether_header)) {
1226 m_freem(m);
1227 goto rcvloop;
1228 }
1229 m->m_pkthdr.rcvif = ifp;
1230 m->m_pkthdr.len = m->m_len = total_len;
1231 #if NBPFILTER > 0
1232 if (ifp->if_bpf)
1233 bpf_mtap(ifp->if_bpf, m);
1234 #endif
1235 (*ifp->if_input)(ifp, m);
1236 }
1237 goto rcvloop;
1238 }
1239 /*
1240 * Just in case we filled up all UPDs and the DMA engine
1241 * stalled. We could be more subtle about this.
1242 */
1243 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1244 printf("%s: uplistptr was 0\n",
1245 sc->sc_dev.dv_xname);
1246 ex_init(ifp);
1247 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1248 & 0x2000) {
1249 printf("%s: receive stalled\n",
1250 sc->sc_dev.dv_xname);
1251 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1252 ELINK_UPUNSTALL);
1253 }
1254 }
1255 }
1256
1257 /* no more interrupts */
1258 if (ret && ifp->if_snd.ifq_head)
1259 ex_start(ifp);
1260 return ret;
1261 }
1262
1263 int
1264 ex_ioctl(ifp, cmd, data)
1265 struct ifnet *ifp;
1266 u_long cmd;
1267 caddr_t data;
1268 {
1269 struct ex_softc *sc = ifp->if_softc;
1270 struct ifreq *ifr = (struct ifreq *)data;
1271 int s, error;
1272
1273 s = splnet();
1274
1275 switch (cmd) {
1276 case SIOCSIFMEDIA:
1277 case SIOCGIFMEDIA:
1278 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1279 break;
1280
1281 default:
1282 error = ether_ioctl(ifp, cmd, data);
1283 if (error == ENETRESET) {
1284 /*
1285 * Multicast list has changed; set the hardware filter
1286 * accordingly.
1287 */
1288 ex_set_mc(sc);
1289 error = 0;
1290 }
1291 break;
1292 }
1293
1294 splx(s);
1295 return (error);
1296 }
1297
1298 void
1299 ex_getstats(sc)
1300 struct ex_softc *sc;
1301 {
1302 bus_space_handle_t ioh = sc->sc_ioh;
1303 bus_space_tag_t iot = sc->sc_iot;
1304 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1305 u_int8_t upperok;
1306
1307 GO_WINDOW(6);
1308 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1309 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1310 ifp->if_ipackets += (upperok & 0x03) << 8;
1311 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1312 ifp->if_opackets += (upperok & 0x30) << 4;
1313 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1314 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1315 /*
1316 * There seems to be no way to get the exact number of collisions,
1317 * this is the number that occured at the very least.
1318 */
1319 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1320 TX_AFTER_X_COLLISIONS);
1321 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1322 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1323
1324 /*
1325 * Clear the following to avoid stats overflow interrupts
1326 */
1327 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1328 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1329 bus_space_read_1(iot, ioh, TX_NO_SQE);
1330 bus_space_read_1(iot, ioh, TX_CD_LOST);
1331 GO_WINDOW(4);
1332 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1333 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1334 ifp->if_ibytes += (upperok & 0x0f) << 16;
1335 ifp->if_obytes += (upperok & 0xf0) << 12;
1336 GO_WINDOW(1);
1337 }
1338
1339 void
1340 ex_printstats(sc)
1341 struct ex_softc *sc;
1342 {
1343 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1344
1345 ex_getstats(sc);
1346 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1347 "%llu\n", (unsigned long long)ifp->if_ipackets,
1348 (unsigned long long)ifp->if_opackets,
1349 (unsigned long long)ifp->if_ierrors,
1350 (unsigned long long)ifp->if_oerrors,
1351 (unsigned long long)ifp->if_ibytes,
1352 (unsigned long long)ifp->if_obytes);
1353 }
1354
1355 void
1356 ex_tick(arg)
1357 void *arg;
1358 {
1359 struct ex_softc *sc = arg;
1360 int s;
1361
1362 if ((sc->sc_dev.dv_flags & DVF_ACTIVE) == 0)
1363 return;
1364
1365 s = splnet();
1366
1367 if (sc->ex_conf & EX_CONF_MII)
1368 mii_tick(&sc->ex_mii);
1369
1370 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1371 & S_COMMAND_IN_PROGRESS))
1372 ex_getstats(sc);
1373
1374 splx(s);
1375
1376 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1377 }
1378
1379 void
1380 ex_reset(sc)
1381 struct ex_softc *sc;
1382 {
1383 u_int16_t val = GLOBAL_RESET;
1384
1385 if (sc->ex_conf & EX_CONF_RESETHACK)
1386 val |= 0xff;
1387 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1388 delay(400);
1389 ex_waitcmd(sc);
1390 }
1391
1392 void
1393 ex_watchdog(ifp)
1394 struct ifnet *ifp;
1395 {
1396 struct ex_softc *sc = ifp->if_softc;
1397
1398 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1399 ++sc->sc_ethercom.ec_if.if_oerrors;
1400
1401 ex_reset(sc);
1402 ex_init(ifp);
1403 }
1404
1405 void
1406 ex_stop(ifp, disable)
1407 struct ifnet *ifp;
1408 int disable;
1409 {
1410 struct ex_softc *sc = ifp->if_softc;
1411 bus_space_tag_t iot = sc->sc_iot;
1412 bus_space_handle_t ioh = sc->sc_ioh;
1413 struct ex_txdesc *tx;
1414 struct ex_rxdesc *rx;
1415 int i;
1416
1417 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1418 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1419 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1420
1421 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1422 if (tx->tx_mbhead == NULL)
1423 continue;
1424 m_freem(tx->tx_mbhead);
1425 tx->tx_mbhead = NULL;
1426 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1427 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1428 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1429 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1430 sizeof (struct ex_dpd),
1431 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1432 }
1433 sc->tx_tail = sc->tx_head = NULL;
1434 ex_init_txdescs(sc);
1435
1436 sc->rx_tail = sc->rx_head = 0;
1437 for (i = 0; i < EX_NUPD; i++) {
1438 rx = &sc->sc_rxdescs[i];
1439 if (rx->rx_mbhead != NULL) {
1440 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1441 m_freem(rx->rx_mbhead);
1442 rx->rx_mbhead = NULL;
1443 }
1444 ex_add_rxbuf(sc, rx);
1445 }
1446
1447 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1448
1449 callout_stop(&sc->ex_mii_callout);
1450 if (sc->ex_conf & EX_CONF_MII)
1451 mii_down(&sc->ex_mii);
1452
1453 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1454 ifp->if_timer = 0;
1455 }
1456
1457 static void
1458 ex_init_txdescs(sc)
1459 struct ex_softc *sc;
1460 {
1461 int i;
1462
1463 for (i = 0; i < EX_NDPD; i++) {
1464 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1465 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1466 if (i < EX_NDPD - 1)
1467 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1468 else
1469 sc->sc_txdescs[i].tx_next = NULL;
1470 }
1471 sc->tx_free = &sc->sc_txdescs[0];
1472 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1473 }
1474
1475
1476 int
1477 ex_activate(self, act)
1478 struct device *self;
1479 enum devact act;
1480 {
1481 struct ex_softc *sc = (void *) self;
1482 int s, error = 0;
1483
1484 s = splnet();
1485 switch (act) {
1486 case DVACT_ACTIVATE:
1487 error = EOPNOTSUPP;
1488 break;
1489
1490 case DVACT_DEACTIVATE:
1491 if (sc->ex_conf & EX_CONF_MII)
1492 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1493 MII_OFFSET_ANY);
1494 if_deactivate(&sc->sc_ethercom.ec_if);
1495 break;
1496 }
1497 splx(s);
1498
1499 return (error);
1500 }
1501
1502 int
1503 ex_detach(sc)
1504 struct ex_softc *sc;
1505 {
1506 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1507 struct ex_rxdesc *rxd;
1508 int i;
1509
1510 /* Succeed now if there's no work to do. */
1511 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1512 return (0);
1513
1514 /* Unhook our tick handler. */
1515 callout_stop(&sc->ex_mii_callout);
1516
1517 if (sc->ex_conf & EX_CONF_MII) {
1518 /* Detach all PHYs */
1519 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1520 }
1521
1522 /* Delete all remaining media. */
1523 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1524
1525 #if NRND > 0
1526 rnd_detach_source(&sc->rnd_source);
1527 #endif
1528 #if NBPFILTER > 0
1529 bpfdetach(ifp);
1530 #endif
1531 ether_ifdetach(ifp);
1532 if_detach(ifp);
1533
1534 for (i = 0; i < EX_NUPD; i++) {
1535 rxd = &sc->sc_rxdescs[i];
1536 if (rxd->rx_mbhead != NULL) {
1537 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1538 m_freem(rxd->rx_mbhead);
1539 rxd->rx_mbhead = NULL;
1540 }
1541 }
1542 for (i = 0; i < EX_NUPD; i++)
1543 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1544 for (i = 0; i < EX_NDPD; i++)
1545 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1546 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1547 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1548 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1549 EX_NDPD * sizeof (struct ex_dpd));
1550 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1551 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1552 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1553 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1554 EX_NUPD * sizeof (struct ex_upd));
1555 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1556
1557 shutdownhook_disestablish(sc->sc_sdhook);
1558
1559 return (0);
1560 }
1561
1562 /*
1563 * Before reboots, reset card completely.
1564 */
1565 static void
1566 ex_shutdown(arg)
1567 void *arg;
1568 {
1569 struct ex_softc *sc = arg;
1570
1571 ex_stop(&sc->sc_ethercom.ec_if, 0);
1572 }
1573
1574 /*
1575 * Read EEPROM data.
1576 * XXX what to do if EEPROM doesn't unbusy?
1577 */
1578 u_int16_t
1579 ex_read_eeprom(sc, offset)
1580 struct ex_softc *sc;
1581 int offset;
1582 {
1583 bus_space_tag_t iot = sc->sc_iot;
1584 bus_space_handle_t ioh = sc->sc_ioh;
1585 u_int16_t data = 0, cmd = READ_EEPROM;
1586 int off;
1587
1588 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1589 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1590
1591 GO_WINDOW(0);
1592 if (ex_eeprom_busy(sc))
1593 goto out;
1594 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1595 cmd | (off + (offset & 0x3f)));
1596 if (ex_eeprom_busy(sc))
1597 goto out;
1598 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1599 out:
1600 return data;
1601 }
1602
1603 static int
1604 ex_eeprom_busy(sc)
1605 struct ex_softc *sc;
1606 {
1607 bus_space_tag_t iot = sc->sc_iot;
1608 bus_space_handle_t ioh = sc->sc_ioh;
1609 int i = 100;
1610
1611 while (i--) {
1612 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1613 EEPROM_BUSY))
1614 return 0;
1615 delay(100);
1616 }
1617 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1618 return (1);
1619 }
1620
1621 /*
1622 * Create a new rx buffer and add it to the 'soft' rx list.
1623 */
1624 static int
1625 ex_add_rxbuf(sc, rxd)
1626 struct ex_softc *sc;
1627 struct ex_rxdesc *rxd;
1628 {
1629 struct mbuf *m, *oldm;
1630 bus_dmamap_t rxmap;
1631 int error, rval = 0;
1632
1633 oldm = rxd->rx_mbhead;
1634 rxmap = rxd->rx_dmamap;
1635
1636 MGETHDR(m, M_DONTWAIT, MT_DATA);
1637 if (m != NULL) {
1638 MCLGET(m, M_DONTWAIT);
1639 if ((m->m_flags & M_EXT) == 0) {
1640 m_freem(m);
1641 if (oldm == NULL)
1642 return 1;
1643 m = oldm;
1644 m->m_data = m->m_ext.ext_buf;
1645 rval = 1;
1646 }
1647 } else {
1648 if (oldm == NULL)
1649 return 1;
1650 m = oldm;
1651 m->m_data = m->m_ext.ext_buf;
1652 rval = 1;
1653 }
1654
1655 /*
1656 * Setup the DMA map for this receive buffer.
1657 */
1658 if (m != oldm) {
1659 if (oldm != NULL)
1660 bus_dmamap_unload(sc->sc_dmat, rxmap);
1661 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1662 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1663 if (error) {
1664 printf("%s: can't load rx buffer, error = %d\n",
1665 sc->sc_dev.dv_xname, error);
1666 panic("ex_add_rxbuf"); /* XXX */
1667 }
1668 }
1669
1670 /*
1671 * Align for data after 14 byte header.
1672 */
1673 m->m_data += 2;
1674
1675 rxd->rx_mbhead = m;
1676 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1677 rxd->rx_upd->upd_frags[0].fr_addr =
1678 htole32(rxmap->dm_segs[0].ds_addr + 2);
1679 rxd->rx_upd->upd_nextptr = 0;
1680
1681 /*
1682 * Attach it to the end of the list.
1683 */
1684 if (sc->rx_head != NULL) {
1685 sc->rx_tail->rx_next = rxd;
1686 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1687 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1688 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1689 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1690 sizeof (struct ex_upd),
1691 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1692 } else {
1693 sc->rx_head = rxd;
1694 }
1695 sc->rx_tail = rxd;
1696
1697 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1698 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1699 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1700 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1701 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1702 return (rval);
1703 }
1704
1705 u_int32_t
1706 ex_mii_bitbang_read(self)
1707 struct device *self;
1708 {
1709 struct ex_softc *sc = (void *) self;
1710
1711 /* We're already in Window 4. */
1712 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1713 }
1714
1715 void
1716 ex_mii_bitbang_write(self, val)
1717 struct device *self;
1718 u_int32_t val;
1719 {
1720 struct ex_softc *sc = (void *) self;
1721
1722 /* We're already in Window 4. */
1723 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1724 }
1725
1726 int
1727 ex_mii_readreg(v, phy, reg)
1728 struct device *v;
1729 int phy, reg;
1730 {
1731 struct ex_softc *sc = (struct ex_softc *)v;
1732 int val;
1733
1734 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1735 return 0;
1736
1737 GO_WINDOW(4);
1738
1739 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1740
1741 GO_WINDOW(1);
1742
1743 return (val);
1744 }
1745
1746 void
1747 ex_mii_writereg(v, phy, reg, data)
1748 struct device *v;
1749 int phy;
1750 int reg;
1751 int data;
1752 {
1753 struct ex_softc *sc = (struct ex_softc *)v;
1754
1755 GO_WINDOW(4);
1756
1757 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1758
1759 GO_WINDOW(1);
1760 }
1761
1762 void
1763 ex_mii_statchg(v)
1764 struct device *v;
1765 {
1766 struct ex_softc *sc = (struct ex_softc *)v;
1767 bus_space_tag_t iot = sc->sc_iot;
1768 bus_space_handle_t ioh = sc->sc_ioh;
1769 int mctl;
1770
1771 GO_WINDOW(3);
1772 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1773 if (sc->ex_mii.mii_media_active & IFM_FDX)
1774 mctl |= MAC_CONTROL_FDX;
1775 else
1776 mctl &= ~MAC_CONTROL_FDX;
1777 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1778 GO_WINDOW(1); /* back to operating window */
1779 }
1780