elinkxl.c revision 1.12 1 /* $NetBSD: elinkxl.c,v 1.12 1999/05/14 15:54:16 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include "opt_inet.h"
40 #include "opt_ns.h"
41 #include "bpfilter.h"
42 #include "rnd.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/ioctl.h>
50 #include <sys/errno.h>
51 #include <sys/syslog.h>
52 #include <sys/select.h>
53 #include <sys/device.h>
54 #if NRND > 0
55 #include <sys/rnd.h>
56 #endif
57
58 #include <net/if.h>
59 #include <net/if_dl.h>
60 #include <net/if_ether.h>
61 #include <net/if_media.h>
62
63 #ifdef INET
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/if_inarp.h>
69 #endif
70
71 #ifdef NS
72 #include <netns/ns.h>
73 #include <netns/ns_if.h>
74 #endif
75
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #include <net/bpfdesc.h>
79 #endif
80
81 #include <machine/cpu.h>
82 #include <machine/bus.h>
83 #include <machine/intr.h>
84
85 #if BYTE_ORDER == BIG_ENDIAN
86 #include <machine/bswap.h>
87 #define htopci(x) bswap32(x)
88 #define pcitoh(x) bswap32(x)
89 #else
90 #define htopci(x) (x)
91 #define pcitoh(x) (x)
92 #endif
93
94 #include <vm/vm.h>
95 #include <vm/pmap.h>
96
97 #include <dev/mii/miivar.h>
98 #include <dev/mii/mii.h>
99
100 #include <dev/ic/elink3reg.h>
101 /* #include <dev/ic/elink3var.h> */
102 #include <dev/ic/elinkxlreg.h>
103 #include <dev/ic/elinkxlvar.h>
104
105 #ifdef DEBUG
106 int exdebug = 0;
107 #endif
108
109 /* ifmedia callbacks */
110 int ex_media_chg __P((struct ifnet *ifp));
111 void ex_media_stat __P((struct ifnet *ifp, struct ifmediareq *req));
112
113 void ex_probe_media __P((struct ex_softc *));
114 void ex_set_filter __P((struct ex_softc *));
115 void ex_set_media __P((struct ex_softc *));
116 struct mbuf *ex_get __P((struct ex_softc *, int));
117 u_int16_t ex_read_eeprom __P((struct ex_softc *, int));
118 void ex_init __P((struct ex_softc *));
119 void ex_read __P((struct ex_softc *));
120 void ex_reset __P((struct ex_softc *));
121 void ex_set_mc __P((struct ex_softc *));
122 void ex_getstats __P((struct ex_softc *));
123 void ex_printstats __P((struct ex_softc *));
124 void ex_tick __P((void *));
125
126 static int ex_eeprom_busy __P((struct ex_softc *));
127 static int ex_add_rxbuf __P((struct ex_softc *, struct ex_rxdesc *));
128 static void ex_init_txdescs __P((struct ex_softc *));
129
130 static void ex_shutdown __P((void *));
131 static void ex_start __P((struct ifnet *));
132 static void ex_txstat __P((struct ex_softc *));
133 static u_int16_t ex_mchash __P((u_char *));
134 static void ex_mii_writebits __P((struct ex_softc *, u_int, int));
135
136 void ex_mii_setbit __P((void *, u_int16_t));
137 void ex_mii_clrbit __P((void *, u_int16_t));
138 u_int16_t ex_mii_readbit __P((void *, u_int16_t));
139 int ex_mii_readreg __P((struct device *, int, int));
140 void ex_mii_writereg __P((struct device *, int, int, int));
141 void ex_mii_statchg __P((struct device *));
142
143 void ex_probemedia __P((struct ex_softc *));
144
145 /*
146 * Structure to map media-present bits in boards to ifmedia codes and
147 * printable media names. Used for table-driven ifmedia initialization.
148 */
149 struct ex_media {
150 int exm_mpbit; /* media present bit */
151 const char *exm_name; /* name of medium */
152 int exm_ifmedia; /* ifmedia word for medium */
153 int exm_epmedia; /* ELINKMEDIA_* constant */
154 };
155
156 /*
157 * Media table for 3c90x chips. Note that chips with MII have no
158 * `native' media.
159 */
160 struct ex_media ex_native_media[] = {
161 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
162 ELINKMEDIA_10BASE_T },
163 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
164 ELINKMEDIA_10BASE_T },
165 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
166 ELINKMEDIA_AUI },
167 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
168 ELINKMEDIA_10BASE_2 },
169 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
170 ELINKMEDIA_100BASE_TX },
171 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
172 ELINKMEDIA_100BASE_TX },
173 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
174 ELINKMEDIA_100BASE_FX },
175 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
176 ELINKMEDIA_MII },
177 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
178 ELINKMEDIA_100BASE_T4 },
179 { 0, NULL, 0,
180 0 },
181 };
182
183 /*
184 * Back-end attach and configure.
185 */
186 void
187 ex_config(sc)
188 struct ex_softc *sc;
189 {
190 struct ifnet *ifp;
191 u_int16_t val;
192 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
193 bus_space_tag_t iot = sc->sc_iot;
194 bus_space_handle_t ioh = sc->sc_ioh;
195 bus_dma_segment_t useg, dseg;
196 int urseg, drseg, i, error, attach_stage;
197
198 ex_reset(sc);
199
200 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
201 macaddr[0] = val >> 8;
202 macaddr[1] = val & 0xff;
203 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
204 macaddr[2] = val >> 8;
205 macaddr[3] = val & 0xff;
206 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
207 macaddr[4] = val >> 8;
208 macaddr[5] = val & 0xff;
209
210 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname,
211 ether_sprintf(macaddr));
212
213 attach_stage = 0;
214
215 /*
216 * Allocate the upload descriptors, and create and load the DMA
217 * map for them.
218 */
219 if ((error = bus_dmamem_alloc(sc->sc_dmat,
220 EX_NUPD * sizeof (struct ex_upd), NBPG, 0, &useg, 1, &urseg,
221 BUS_DMA_NOWAIT)) != 0) {
222 printf("%s: can't allocate upload descriptors, error = %d\n",
223 sc->sc_dev.dv_xname, error);
224 goto fail;
225 }
226
227 attach_stage = 1;
228
229 if ((error = bus_dmamem_map(sc->sc_dmat, &useg, urseg,
230 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
231 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
232 printf("%s: can't map upload descriptors, error = %d\n",
233 sc->sc_dev.dv_xname, error);
234 goto fail;
235 }
236
237 attach_stage = 2;
238
239 if ((error = bus_dmamap_create(sc->sc_dmat,
240 EX_NUPD * sizeof (struct ex_upd), 1,
241 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
242 &sc->sc_upd_dmamap)) != 0) {
243 printf("%s: can't create upload desc. DMA map, error = %d\n",
244 sc->sc_dev.dv_xname, error);
245 goto fail;
246 }
247
248 attach_stage = 3;
249
250 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
251 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
252 BUS_DMA_NOWAIT)) != 0) {
253 printf("%s: can't load upload desc. DMA map, error = %d\n",
254 sc->sc_dev.dv_xname, error);
255 goto fail;
256 }
257
258 attach_stage = 4;
259
260 /*
261 * Allocate the download descriptors, and create and load the DMA
262 * map for them.
263 */
264 if ((error = bus_dmamem_alloc(sc->sc_dmat,
265 EX_NDPD * sizeof (struct ex_dpd), NBPG, 0, &dseg, 1, &drseg,
266 BUS_DMA_NOWAIT)) != 0) {
267 printf("%s: can't allocate download descriptors, error = %d\n",
268 sc->sc_dev.dv_xname, error);
269 goto fail;
270 }
271
272 attach_stage = 5;
273
274 if ((error = bus_dmamem_map(sc->sc_dmat, &dseg, drseg,
275 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
276 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
277 printf("%s: can't map download descriptors, error = %d\n",
278 sc->sc_dev.dv_xname, error);
279 goto fail;
280 }
281 bzero(sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd));
282
283 attach_stage = 6;
284
285 if ((error = bus_dmamap_create(sc->sc_dmat,
286 EX_NDPD * sizeof (struct ex_dpd), 1,
287 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
288 &sc->sc_dpd_dmamap)) != 0) {
289 printf("%s: can't create download desc. DMA map, error = %d\n",
290 sc->sc_dev.dv_xname, error);
291 goto fail;
292 }
293
294 attach_stage = 7;
295
296 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
297 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
298 BUS_DMA_NOWAIT)) != 0) {
299 printf("%s: can't load download desc. DMA map, error = %d\n",
300 sc->sc_dev.dv_xname, error);
301 goto fail;
302 }
303
304 attach_stage = 8;
305
306
307 /*
308 * Create the transmit buffer DMA maps.
309 */
310 for (i = 0; i < EX_NDPD; i++) {
311 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
312 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
313 &sc->sc_tx_dmamaps[i])) != 0) {
314 printf("%s: can't create tx DMA map %d, error = %d\n",
315 sc->sc_dev.dv_xname, i, error);
316 goto fail;
317 }
318 }
319
320 attach_stage = 9;
321
322 /*
323 * Create the receive buffer DMA maps.
324 */
325 for (i = 0; i < EX_NUPD; i++) {
326 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
327 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
328 &sc->sc_rx_dmamaps[i])) != 0) {
329 printf("%s: can't create rx DMA map %d, error = %d\n",
330 sc->sc_dev.dv_xname, i, error);
331 goto fail;
332 }
333 }
334
335 attach_stage = 10;
336
337 /*
338 * Create ring of upload descriptors, only once. The DMA engine
339 * will loop over this when receiving packets, stalling if it
340 * hits an UPD with a finished receive.
341 */
342 for (i = 0; i < EX_NUPD; i++) {
343 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
344 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
345 sc->sc_upd[i].upd_frags[0].fr_len =
346 htopci((MCLBYTES - 2) | EX_FR_LAST);
347 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
348 printf("%s: can't allocate or map rx buffers\n",
349 sc->sc_dev.dv_xname);
350 goto fail;
351 }
352 }
353
354 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
355 EX_NUPD * sizeof (struct ex_upd),
356 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
357
358 ex_init_txdescs(sc);
359
360 attach_stage = 11;
361
362
363 GO_WINDOW(3);
364 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
365 if (val & ELINK_MEDIACAP_MII)
366 sc->ex_conf |= EX_CONF_MII;
367
368 ifp = &sc->sc_ethercom.ec_if;
369
370 /*
371 * Initialize our media structures and MII info. We'll
372 * probe the MII if we discover that we have one.
373 */
374 sc->ex_mii.mii_ifp = ifp;
375 sc->ex_mii.mii_readreg = ex_mii_readreg;
376 sc->ex_mii.mii_writereg = ex_mii_writereg;
377 sc->ex_mii.mii_statchg = ex_mii_statchg;
378 ifmedia_init(&sc->ex_mii.mii_media, 0, ex_media_chg,
379 ex_media_stat);
380
381 if (sc->ex_conf & EX_CONF_MII) {
382 /*
383 * Find PHY, extract media information from it.
384 */
385 mii_phy_probe(&sc->sc_dev, &sc->ex_mii, 0xffffffff);
386 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
387 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
388 0, NULL);
389 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
390 } else {
391 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
392 }
393 } else
394 ex_probemedia(sc);
395
396 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
397 ifp->if_softc = sc;
398 ifp->if_start = ex_start;
399 ifp->if_ioctl = ex_ioctl;
400 ifp->if_watchdog = ex_watchdog;
401 ifp->if_flags =
402 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
403
404 if_attach(ifp);
405 ether_ifattach(ifp, macaddr);
406
407 GO_WINDOW(1);
408
409 sc->tx_start_thresh = 20;
410 sc->tx_succ_ok = 0;
411
412 /* TODO: set queues to 0 */
413
414 #if NBPFILTER > 0
415 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
416 sizeof(struct ether_header));
417 #endif
418
419 #if NRND > 0
420 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
421 RND_TYPE_NET, 0);
422 #endif
423
424 /* Establish callback to reset card when we reboot. */
425 shutdownhook_establish(ex_shutdown, sc);
426 return;
427
428 fail:
429 /*
430 * Free any resources we've allocated during the failed attach
431 * attempt. Do this in reverse order and fall though.
432 */
433 switch (attach_stage) {
434 case 11:
435 {
436 struct ex_rxdesc *rxd;
437
438 for (i = 0; i < EX_NUPD; i++) {
439 rxd = &sc->sc_rxdescs[i];
440 if (rxd->rx_mbhead != NULL) {
441 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
442 m_freem(rxd->rx_mbhead);
443 }
444 }
445 }
446 /* FALLTHROUGH */
447
448 case 10:
449 for (i = 0; i < EX_NUPD; i++)
450 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
451 /* FALLTHROUGH */
452
453 case 9:
454 for (i = 0; i < EX_NDPD; i++)
455 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
456 /* FALLTHROUGH */
457 case 8:
458 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
459 /* FALLTHROUGH */
460
461 case 7:
462 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
463 /* FALLTHROUGH */
464
465 case 6:
466 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
467 EX_NDPD * sizeof (struct ex_dpd));
468 /* FALLTHROUGH */
469
470 case 5:
471 bus_dmamem_free(sc->sc_dmat, &dseg, drseg);
472 break;
473
474 case 4:
475 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
476 /* FALLTHROUGH */
477
478 case 3:
479 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
480 /* FALLTHROUGH */
481
482 case 2:
483 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
484 EX_NUPD * sizeof (struct ex_upd));
485 /* FALLTHROUGH */
486
487 case 1:
488 bus_dmamem_free(sc->sc_dmat, &useg, urseg);
489 break;
490 }
491
492 }
493
494 /*
495 * Find the media present on non-MII chips.
496 */
497 void
498 ex_probemedia(sc)
499 struct ex_softc *sc;
500 {
501 bus_space_tag_t iot = sc->sc_iot;
502 bus_space_handle_t ioh = sc->sc_ioh;
503 struct ifmedia *ifm = &sc->ex_mii.mii_media;
504 struct ex_media *exm;
505 u_int16_t config1, reset_options, default_media;
506 int defmedia = 0;
507 const char *sep = "", *defmedianame = NULL;
508
509 GO_WINDOW(3);
510 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
511 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
512 GO_WINDOW(0);
513
514 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
515
516 printf("%s: ", sc->sc_dev.dv_xname);
517
518 /* Sanity check that there are any media! */
519 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
520 printf("no media present!\n");
521 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
522 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
523 return;
524 }
525
526 #define PRINT(s) printf("%s%s", sep, s); sep = ", "
527
528 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
529 if (reset_options & exm->exm_mpbit) {
530 /*
531 * Default media is a little complicated. We
532 * support full-duplex which uses the same
533 * reset options bit.
534 *
535 * XXX Check EEPROM for default to FDX?
536 */
537 if (exm->exm_epmedia == default_media) {
538 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
539 defmedia = exm->exm_ifmedia;
540 defmedianame = exm->exm_name;
541 }
542 } else if (defmedia == 0) {
543 defmedia = exm->exm_ifmedia;
544 defmedianame = exm->exm_name;
545 }
546 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
547 NULL);
548 PRINT(exm->exm_name);
549 }
550 }
551
552 #undef PRINT
553
554 #ifdef DIAGNOSTIC
555 if (defmedia == 0)
556 panic("ex_probemedia: impossible");
557 #endif
558
559 printf(", default %s\n", defmedianame);
560 ifmedia_set(ifm, defmedia);
561 }
562
563 /*
564 * Bring device up.
565 */
566 void
567 ex_init(sc)
568 struct ex_softc *sc;
569 {
570 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
571 bus_space_tag_t iot = sc->sc_iot;
572 bus_space_handle_t ioh = sc->sc_ioh;
573 int s, i;
574
575 s = splnet();
576
577 ex_waitcmd(sc);
578 ex_stop(sc);
579
580 /*
581 * Set the station address and clear the station mask. The latter
582 * is needed for 90x cards, 0 is the default for 90xB cards.
583 */
584 GO_WINDOW(2);
585 for (i = 0; i < ETHER_ADDR_LEN; i++) {
586 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
587 LLADDR(ifp->if_sadl)[i]);
588 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
589 }
590
591 GO_WINDOW(3);
592
593 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
594 ex_waitcmd(sc);
595 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
596 ex_waitcmd(sc);
597
598 /*
599 * Disable reclaim threshold for 90xB, set free threshold to
600 * 6 * 256 = 1536 for 90x.
601 */
602 if (sc->ex_conf & EX_CONF_90XB)
603 bus_space_write_2(iot, ioh, ELINK_COMMAND,
604 ELINK_TXRECLTHRESH | 255);
605 else
606 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
607
608 bus_space_write_2(iot, ioh, ELINK_COMMAND,
609 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
610
611 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
612 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
613
614 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_RD_0_MASK | S_MASK);
615 bus_space_write_2(iot, ioh, ELINK_COMMAND, SET_INTR_MASK | S_MASK);
616
617 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
618
619 ex_set_media(sc);
620 ex_set_mc(sc);
621
622
623 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
624 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
625 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
626 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
627 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
628
629 ifp->if_flags |= IFF_RUNNING;
630 ifp->if_flags &= ~IFF_OACTIVE;
631 ex_start(ifp);
632
633 GO_WINDOW(1);
634
635 splx(s);
636
637 timeout(ex_tick, sc, hz);
638 }
639
640 /*
641 * Multicast hash filter according to the 3Com spec.
642 */
643 static u_int16_t
644 ex_mchash(addr)
645 u_char *addr;
646 {
647 u_int32_t crc, carry;
648 int i, j;
649 u_char c;
650
651 /* Compute CRC for the address value. */
652 crc = 0xffffffff; /* initial value */
653
654 for (i = 0; i < 6; i++) {
655 c = addr[i];
656 for (j = 0; j < 8; j++) {
657 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
658 crc <<= 1;
659 c >>= 1;
660 if (carry)
661 crc = (crc ^ 0x04c11db6) | carry;
662 }
663 }
664
665 /* Return the filter bit position. */
666 return(crc & 0x000000ff);
667 }
668
669
670 /*
671 * Set multicast receive filter. Also take care of promiscuous mode
672 * here (XXX).
673 */
674 void
675 ex_set_mc(sc)
676 register struct ex_softc *sc;
677 {
678 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
679 struct ethercom *ec = &sc->sc_ethercom;
680 struct ether_multi *enm;
681 struct ether_multistep estep;
682 int i;
683 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
684
685 if (ifp->if_flags & IFF_PROMISC)
686 mask |= FIL_PROMISC;
687
688 if (!(ifp->if_flags & IFF_MULTICAST))
689 goto out;
690
691 if (!(sc->ex_conf & EX_CONF_90XB) || ifp->if_flags & IFF_ALLMULTI) {
692 mask |= (ifp->if_flags & IFF_MULTICAST) ? FIL_MULTICAST : 0;
693 } else {
694 ETHER_FIRST_MULTI(estep, ec, enm);
695 while (enm != NULL) {
696 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
697 ETHER_ADDR_LEN) != 0)
698 goto out;
699 i = ex_mchash(enm->enm_addrlo);
700 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
701 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
702 ETHER_NEXT_MULTI(estep, enm);
703 }
704 mask |= FIL_MULTIHASH;
705 }
706 out:
707 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
708 SET_RX_FILTER | mask);
709 }
710
711
712 static void
713 ex_txstat(sc)
714 struct ex_softc *sc;
715 {
716 bus_space_tag_t iot = sc->sc_iot;
717 bus_space_handle_t ioh = sc->sc_ioh;
718 int i;
719
720 /*
721 * We need to read+write TX_STATUS until we get a 0 status
722 * in order to turn off the interrupt flag.
723 */
724 while ((i = bus_space_read_1(iot, ioh, ELINK_TXSTATUS)) & TXS_COMPLETE) {
725 bus_space_write_1(iot, ioh, ELINK_TXSTATUS, 0x0);
726
727 if (i & TXS_JABBER) {
728 ++sc->sc_ethercom.ec_if.if_oerrors;
729 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
730 printf("%s: jabber (%x)\n",
731 sc->sc_dev.dv_xname, i);
732 ex_init(sc);
733 /* TODO: be more subtle here */
734 } else if (i & TXS_UNDERRUN) {
735 ++sc->sc_ethercom.ec_if.if_oerrors;
736 if (sc->sc_ethercom.ec_if.if_flags & IFF_DEBUG)
737 printf("%s: fifo underrun (%x) @%d\n",
738 sc->sc_dev.dv_xname, i,
739 sc->tx_start_thresh);
740 if (sc->tx_succ_ok < 100)
741 sc->tx_start_thresh = min(ETHER_MAX_LEN,
742 sc->tx_start_thresh + 20);
743 sc->tx_succ_ok = 0;
744 ex_init(sc);
745 /* TODO: be more subtle here */
746 } else if (i & TXS_MAX_COLLISION) {
747 ++sc->sc_ethercom.ec_if.if_collisions;
748 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
749 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
750 } else
751 sc->tx_succ_ok = (sc->tx_succ_ok+1) & 127;
752 }
753 }
754
755 int
756 ex_media_chg(ifp)
757 struct ifnet *ifp;
758 {
759 struct ex_softc *sc = ifp->if_softc;
760
761 if (ifp->if_flags & IFF_UP)
762 ex_init(sc);
763 return 0;
764 }
765
766 void
767 ex_set_media(sc)
768 struct ex_softc *sc;
769 {
770 bus_space_tag_t iot = sc->sc_iot;
771 bus_space_handle_t ioh = sc->sc_ioh;
772 int config0, config1;
773
774 if (((sc->ex_conf & EX_CONF_MII) &&
775 (sc->ex_mii.mii_media_active & IFM_FDX))
776 || (!(sc->ex_conf & EX_CONF_MII) &&
777 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
778 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
779 MAC_CONTROL_FDX);
780 } else {
781 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
782 }
783
784 /*
785 * If the device has MII, select it, and then tell the
786 * PHY which media to use.
787 */
788 if (sc->ex_conf & EX_CONF_MII) {
789 GO_WINDOW(3);
790
791 config0 = (u_int)bus_space_read_2(iot, ioh,
792 ELINK_W3_INTERNAL_CONFIG);
793 config1 = (u_int)bus_space_read_2(iot, ioh,
794 ELINK_W3_INTERNAL_CONFIG + 2);
795
796 config1 = config1 & ~CONFIG_MEDIAMASK;
797 config1 |= (ELINKMEDIA_MII << CONFIG_MEDIAMASK_SHIFT);
798
799 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
800 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
801 mii_mediachg(&sc->ex_mii);
802 return;
803 }
804
805 GO_WINDOW(4);
806 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
807 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
808 delay(800);
809
810 /*
811 * Now turn on the selected media/transceiver.
812 */
813 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
814 case IFM_10_T:
815 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
816 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
817 break;
818
819 case IFM_10_2:
820 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
821 DELAY(800);
822 break;
823
824 case IFM_100_TX:
825 case IFM_100_FX:
826 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
827 LINKBEAT_ENABLE);
828 DELAY(800);
829 break;
830
831 case IFM_10_5:
832 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
833 SQE_ENABLE);
834 DELAY(800);
835 break;
836
837 case IFM_MANUAL:
838 break;
839
840 case IFM_NONE:
841 return;
842
843 default:
844 panic("ex_set_media: impossible");
845 }
846
847 GO_WINDOW(3);
848 config0 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
849 config1 = (u_int)bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
850
851 config1 = config1 & ~CONFIG_MEDIAMASK;
852 config1 |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
853 CONFIG_MEDIAMASK_SHIFT);
854
855 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG, config0);
856 bus_space_write_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2, config1);
857 }
858
859 /*
860 * Get currently-selected media from card.
861 * (if_media callback, may be called before interface is brought up).
862 */
863 void
864 ex_media_stat(ifp, req)
865 struct ifnet *ifp;
866 struct ifmediareq *req;
867 {
868 struct ex_softc *sc = ifp->if_softc;
869
870 if (sc->ex_conf & EX_CONF_MII) {
871 mii_pollstat(&sc->ex_mii);
872 req->ifm_status = sc->ex_mii.mii_media_status;
873 req->ifm_active = sc->ex_mii.mii_media_active;
874 } else {
875 GO_WINDOW(4);
876 req->ifm_status = IFM_AVALID;
877 req->ifm_active = sc->ex_mii.mii_media.ifm_cur->ifm_media;
878 if (bus_space_read_2(sc->sc_iot, sc->sc_ioh,
879 ELINK_W4_MEDIA_TYPE) & LINKBEAT_DETECT)
880 req->ifm_status |= IFM_ACTIVE;
881 GO_WINDOW(1);
882 }
883 }
884
885
886
887 /*
888 * Start outputting on the interface.
889 */
890 static void
891 ex_start(ifp)
892 struct ifnet *ifp;
893 {
894 struct ex_softc *sc = ifp->if_softc;
895 bus_space_tag_t iot = sc->sc_iot;
896 bus_space_handle_t ioh = sc->sc_ioh;
897 volatile struct ex_fraghdr *fr = NULL;
898 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
899 struct ex_txdesc *txp;
900 bus_dmamap_t dmamap;
901 int offset, totlen;
902
903 if (sc->tx_head || sc->tx_free == NULL)
904 return;
905
906 txp = NULL;
907
908 /*
909 * We're finished if there is nothing more to add to the list or if
910 * we're all filled up with buffers to transmit.
911 */
912 while (ifp->if_snd.ifq_head != NULL && sc->tx_free != NULL) {
913 struct mbuf *mb_head;
914 int segment, error;
915
916 /*
917 * Grab a packet to transmit.
918 */
919 IF_DEQUEUE(&ifp->if_snd, mb_head);
920
921 /*
922 * Get pointer to next available tx desc.
923 */
924 txp = sc->tx_free;
925 sc->tx_free = txp->tx_next;
926 txp->tx_next = NULL;
927 dmamap = txp->tx_dmamap;
928
929 /*
930 * Go through each of the mbufs in the chain and initialize
931 * the transmit buffer descriptors with the physical address
932 * and size of the mbuf.
933 */
934 reload:
935 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
936 mb_head, BUS_DMA_NOWAIT);
937 switch (error) {
938 case 0:
939 /* Success. */
940 break;
941
942 case EFBIG:
943 {
944 struct mbuf *mn;
945
946 /*
947 * We ran out of segments. We have to recopy this
948 * mbuf chain first. Bail out if we can't get the
949 * new buffers.
950 */
951 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
952
953 MGETHDR(mn, M_DONTWAIT, MT_DATA);
954 if (mn == NULL) {
955 m_freem(mb_head);
956 printf("aborting\n");
957 goto out;
958 }
959 if (mb_head->m_pkthdr.len > MHLEN) {
960 MCLGET(mn, M_DONTWAIT);
961 if ((mn->m_flags & M_EXT) == 0) {
962 m_freem(mn);
963 m_freem(mb_head);
964 printf("aborting\n");
965 goto out;
966 }
967 }
968 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
969 mtod(mn, caddr_t));
970 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
971 m_freem(mb_head);
972 mb_head = mn;
973 printf("retrying\n");
974 goto reload;
975 }
976
977 default:
978 /*
979 * Some other problem; report it.
980 */
981 printf("%s: can't load mbuf chain, error = %d\n",
982 sc->sc_dev.dv_xname, error);
983 m_freem(mb_head);
984 goto out;
985 }
986
987 fr = &txp->tx_dpd->dpd_frags[0];
988 totlen = 0;
989 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
990 fr->fr_addr = htopci(dmamap->dm_segs[segment].ds_addr);
991 fr->fr_len = htopci(dmamap->dm_segs[segment].ds_len);
992 totlen += dmamap->dm_segs[segment].ds_len;
993 }
994 fr--;
995 fr->fr_len |= htopci(EX_FR_LAST);
996 txp->tx_mbhead = mb_head;
997
998 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
999 BUS_DMASYNC_PREWRITE);
1000
1001 dpd = txp->tx_dpd;
1002 dpd->dpd_nextptr = 0;
1003 dpd->dpd_fsh = htopci(totlen);
1004
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1006 ((caddr_t)dpd - (caddr_t)sc->sc_dpd),
1007 sizeof (struct ex_dpd),
1008 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1009
1010 /*
1011 * No need to stall the download engine, we know it's
1012 * not busy right now.
1013 *
1014 * Fix up pointers in both the "soft" tx and the physical
1015 * tx list.
1016 */
1017 if (sc->tx_head != NULL) {
1018 prevdpd = sc->tx_tail->tx_dpd;
1019 offset = ((caddr_t)prevdpd - (caddr_t)sc->sc_dpd);
1020 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1021 offset, sizeof (struct ex_dpd),
1022 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1023 prevdpd->dpd_nextptr = htopci(DPD_DMADDR(sc, txp));
1024 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1025 offset, sizeof (struct ex_dpd),
1026 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1027 sc->tx_tail->tx_next = txp;
1028 sc->tx_tail = txp;
1029 } else {
1030 sc->tx_tail = sc->tx_head = txp;
1031 }
1032
1033 #if NBPFILTER > 0
1034 /*
1035 * Pass packet to bpf if there is a listener.
1036 */
1037 if (ifp->if_bpf)
1038 bpf_mtap(ifp->if_bpf, mb_head);
1039 #endif
1040 }
1041 out:
1042 if (sc->tx_head) {
1043 sc->tx_tail->tx_dpd->dpd_fsh |= htopci(EX_DPD_DNIND);
1044 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1045 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1046 sizeof (struct ex_dpd),
1047 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1048 ifp->if_flags |= IFF_OACTIVE;
1049 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1050 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1051 DPD_DMADDR(sc, sc->tx_head));
1052
1053 /* trigger watchdog */
1054 ifp->if_timer = 5;
1055 }
1056 }
1057
1058
1059 int
1060 ex_intr(arg)
1061 void *arg;
1062 {
1063 struct ex_softc *sc = arg;
1064 bus_space_tag_t iot = sc->sc_iot;
1065 bus_space_handle_t ioh = sc->sc_ioh;
1066 u_int16_t stat;
1067 int ret = 0;
1068 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1069
1070 for (;;) {
1071 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1072 if (!(stat & S_MASK))
1073 break;
1074 /*
1075 * Acknowledge interrupts.
1076 */
1077 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1078 (stat & S_MASK));
1079 ret = 1;
1080 if (stat & S_HOST_ERROR) {
1081 printf("%s: adapter failure (%x)\n",
1082 sc->sc_dev.dv_xname, stat);
1083 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1084 C_INTR_LATCH);
1085 ex_reset(sc);
1086 ex_init(sc);
1087 return 1;
1088 }
1089 if (stat & S_TX_COMPLETE) {
1090 ex_txstat(sc);
1091 }
1092 if (stat & S_UPD_STATS) {
1093 ex_getstats(sc);
1094 }
1095 if (stat & S_DN_COMPLETE) {
1096 struct ex_txdesc *txp, *ptxp = NULL;
1097 bus_dmamap_t txmap;
1098
1099 /* reset watchdog timer, was set in ex_start() */
1100 ifp->if_timer = 0;
1101
1102 for (txp = sc->tx_head; txp != NULL;
1103 txp = txp->tx_next) {
1104 bus_dmamap_sync(sc->sc_dmat,
1105 sc->sc_dpd_dmamap,
1106 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1107 sizeof (struct ex_dpd),
1108 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1109 if (txp->tx_mbhead != NULL) {
1110 txmap = txp->tx_dmamap;
1111 bus_dmamap_sync(sc->sc_dmat, txmap,
1112 0, txmap->dm_mapsize,
1113 BUS_DMASYNC_POSTWRITE);
1114 bus_dmamap_unload(sc->sc_dmat, txmap);
1115 m_freem(txp->tx_mbhead);
1116 txp->tx_mbhead = NULL;
1117 }
1118 ptxp = txp;
1119 }
1120
1121 /*
1122 * Move finished tx buffers back to the tx free list.
1123 */
1124 if (sc->tx_free) {
1125 sc->tx_ftail->tx_next = sc->tx_head;
1126 sc->tx_ftail = ptxp;
1127 } else
1128 sc->tx_ftail = sc->tx_free = sc->tx_head;
1129
1130 sc->tx_head = sc->tx_tail = NULL;
1131 ifp->if_flags &= ~IFF_OACTIVE;
1132 }
1133
1134 if (stat & S_UP_COMPLETE) {
1135 struct ex_rxdesc *rxd;
1136 struct mbuf *m;
1137 struct ex_upd *upd;
1138 bus_dmamap_t rxmap;
1139 u_int32_t pktstat;
1140
1141 rcvloop:
1142 rxd = sc->rx_head;
1143 rxmap = rxd->rx_dmamap;
1144 m = rxd->rx_mbhead;
1145 upd = rxd->rx_upd;
1146 pktstat = pcitoh(upd->upd_pktstatus);
1147
1148 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1149 rxmap->dm_mapsize,
1150 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1151 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1152 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1153 sizeof (struct ex_upd),
1154 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1155
1156 if (pktstat & EX_UPD_COMPLETE) {
1157 /*
1158 * Remove first packet from the chain.
1159 */
1160 sc->rx_head = rxd->rx_next;
1161 rxd->rx_next = NULL;
1162
1163 /*
1164 * Add a new buffer to the receive chain.
1165 * If this fails, the old buffer is recycled
1166 * instead.
1167 */
1168 if (ex_add_rxbuf(sc, rxd) == 0) {
1169 struct ether_header *eh;
1170 u_int16_t total_len;
1171
1172
1173 if (pktstat & EX_UPD_ERR) {
1174 ifp->if_ierrors++;
1175 m_freem(m);
1176 goto rcvloop;
1177 }
1178
1179 total_len = pktstat & EX_UPD_PKTLENMASK;
1180 if (total_len <
1181 sizeof(struct ether_header)) {
1182 m_freem(m);
1183 goto rcvloop;
1184 }
1185 m->m_pkthdr.rcvif = ifp;
1186 m->m_pkthdr.len = m->m_len =
1187 total_len -
1188 sizeof(struct ether_header);
1189 eh = mtod(m, struct ether_header *);
1190 #if NBPFILTER > 0
1191 if (ifp->if_bpf) {
1192 bpf_tap(ifp->if_bpf,
1193 mtod(m, caddr_t),
1194 total_len);
1195 /*
1196 * Only pass this packet up
1197 * if it is for us.
1198 */
1199 if ((ifp->if_flags &
1200 IFF_PROMISC) &&
1201 (eh->ether_dhost[0] & 1)
1202 == 0 &&
1203 bcmp(eh->ether_dhost,
1204 LLADDR(ifp->if_sadl),
1205 sizeof(eh->ether_dhost))
1206 != 0) {
1207 m_freem(m);
1208 goto rcvloop;
1209 }
1210 }
1211 #endif /* NBPFILTER > 0 */
1212 m->m_data +=
1213 sizeof(struct ether_header);
1214 ether_input(ifp, eh, m);
1215 }
1216 goto rcvloop;
1217 }
1218 /*
1219 * Just in case we filled up all UPDs and the DMA engine
1220 * stalled. We could be more subtle about this.
1221 */
1222 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1223 printf("%s: uplistptr was 0\n",
1224 sc->sc_dev.dv_xname);
1225 ex_init(sc);
1226 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1227 & 0x2000) {
1228 printf("%s: receive stalled\n",
1229 sc->sc_dev.dv_xname);
1230 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1231 ELINK_UPUNSTALL);
1232 }
1233 }
1234 }
1235 if (ret) {
1236 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1237 if (ifp->if_snd.ifq_head != NULL)
1238 ex_start(ifp);
1239 }
1240 return ret;
1241 }
1242
1243 int
1244 ex_ioctl(ifp, cmd, data)
1245 register struct ifnet *ifp;
1246 u_long cmd;
1247 caddr_t data;
1248 {
1249 struct ex_softc *sc = ifp->if_softc;
1250 struct ifaddr *ifa = (struct ifaddr *)data;
1251 struct ifreq *ifr = (struct ifreq *)data;
1252 int s, error = 0;
1253
1254 s = splnet();
1255
1256 switch (cmd) {
1257
1258 case SIOCSIFADDR:
1259 ifp->if_flags |= IFF_UP;
1260 switch (ifa->ifa_addr->sa_family) {
1261 #ifdef INET
1262 case AF_INET:
1263 ex_init(sc);
1264 arp_ifinit(&sc->sc_ethercom.ec_if, ifa);
1265 break;
1266 #endif
1267 #ifdef NS
1268 case AF_NS:
1269 {
1270 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1271
1272 if (ns_nullhost(*ina))
1273 ina->x_host = *(union ns_host *)
1274 LLADDR(ifp->if_sadl);
1275 else
1276 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1277 ifp->if_addrlen);
1278 /* Set new address. */
1279 ex_init(sc);
1280 break;
1281 }
1282 #endif
1283 default:
1284 ex_init(sc);
1285 break;
1286 }
1287 break;
1288 case SIOCSIFMEDIA:
1289 case SIOCGIFMEDIA:
1290 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1291 break;
1292
1293 case SIOCSIFFLAGS:
1294 if ((ifp->if_flags & IFF_UP) == 0 &&
1295 (ifp->if_flags & IFF_RUNNING) != 0) {
1296 /*
1297 * If interface is marked down and it is running, then
1298 * stop it.
1299 */
1300 ex_stop(sc);
1301 ifp->if_flags &= ~IFF_RUNNING;
1302 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1303 (ifp->if_flags & IFF_RUNNING) == 0) {
1304 /*
1305 * If interface is marked up and it is stopped, then
1306 * start it.
1307 */
1308 ex_init(sc);
1309 } else if ((ifp->if_flags & IFF_UP) != 0) {
1310 /*
1311 * Deal with other flags that change hardware
1312 * state, i.e. IFF_PROMISC.
1313 */
1314 ex_set_mc(sc);
1315 }
1316 break;
1317
1318 case SIOCADDMULTI:
1319 case SIOCDELMULTI:
1320 error = (cmd == SIOCADDMULTI) ?
1321 ether_addmulti(ifr, &sc->sc_ethercom) :
1322 ether_delmulti(ifr, &sc->sc_ethercom);
1323
1324 if (error == ENETRESET) {
1325 /*
1326 * Multicast list has changed; set the hardware filter
1327 * accordingly.
1328 */
1329 ex_set_mc(sc);
1330 error = 0;
1331 }
1332 break;
1333
1334 default:
1335 error = EINVAL;
1336 break;
1337 }
1338
1339 splx(s);
1340 return (error);
1341 }
1342
1343 void
1344 ex_getstats(sc)
1345 struct ex_softc *sc;
1346 {
1347 bus_space_handle_t ioh = sc->sc_ioh;
1348 bus_space_tag_t iot = sc->sc_iot;
1349 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1350 u_int8_t upperok;
1351
1352 GO_WINDOW(6);
1353 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1354 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1355 ifp->if_ipackets += (upperok & 0x03) << 8;
1356 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1357 ifp->if_opackets += (upperok & 0x30) << 4;
1358 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1359 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1360 /*
1361 * There seems to be no way to get the exact number of collisions,
1362 * this is the number that occured at the very least.
1363 */
1364 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1365 TX_AFTER_X_COLLISIONS);
1366 ifp->if_ibytes += bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1367 ifp->if_obytes += bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1368
1369 /*
1370 * Clear the following to avoid stats overflow interrupts
1371 */
1372 bus_space_read_1(iot, ioh, TX_DEFERRALS);
1373 bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1374 bus_space_read_1(iot, ioh, TX_NO_SQE);
1375 bus_space_read_1(iot, ioh, TX_CD_LOST);
1376 GO_WINDOW(4);
1377 bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1378 upperok = bus_space_read_1(iot, ioh, ELINK_W4_UBYTESOK);
1379 ifp->if_ibytes += (upperok & 0x0f) << 16;
1380 ifp->if_obytes += (upperok & 0xf0) << 12;
1381 GO_WINDOW(1);
1382 }
1383
1384 void
1385 ex_printstats(sc)
1386 struct ex_softc *sc;
1387 {
1388 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1389
1390 ex_getstats(sc);
1391 printf("in %ld out %ld ierror %ld oerror %ld ibytes %ld obytes %ld\n",
1392 ifp->if_ipackets, ifp->if_opackets, ifp->if_ierrors,
1393 ifp->if_oerrors, ifp->if_ibytes, ifp->if_obytes);
1394 }
1395
1396 void
1397 ex_tick(arg)
1398 void *arg;
1399 {
1400 struct ex_softc *sc = arg;
1401 int s = splnet();
1402
1403 if (sc->ex_conf & EX_CONF_MII)
1404 mii_tick(&sc->ex_mii);
1405
1406 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1407 & S_COMMAND_IN_PROGRESS))
1408 ex_getstats(sc);
1409
1410 splx(s);
1411
1412 timeout(ex_tick, sc, hz);
1413 }
1414
1415
1416 void
1417 ex_reset(sc)
1418 struct ex_softc *sc;
1419 {
1420 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, GLOBAL_RESET);
1421 delay(400);
1422 ex_waitcmd(sc);
1423 }
1424
1425 void
1426 ex_watchdog(ifp)
1427 struct ifnet *ifp;
1428 {
1429 struct ex_softc *sc = ifp->if_softc;
1430
1431 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1432 ++sc->sc_ethercom.ec_if.if_oerrors;
1433
1434 ex_reset(sc);
1435 ex_init(sc);
1436 }
1437
1438 void
1439 ex_stop(sc)
1440 struct ex_softc *sc;
1441 {
1442 bus_space_tag_t iot = sc->sc_iot;
1443 bus_space_handle_t ioh = sc->sc_ioh;
1444 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1445 struct ex_txdesc *tx;
1446 struct ex_rxdesc *rx;
1447 int i;
1448
1449 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1450 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1451 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1452
1453 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1454 if (tx->tx_mbhead == NULL)
1455 continue;
1456 m_freem(tx->tx_mbhead);
1457 tx->tx_mbhead = NULL;
1458 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1459 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1460 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1461 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1462 sizeof (struct ex_dpd),
1463 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1464 }
1465 sc->tx_tail = sc->tx_head = NULL;
1466 ex_init_txdescs(sc);
1467
1468 sc->rx_tail = sc->rx_head = 0;
1469 for (i = 0; i < EX_NUPD; i++) {
1470 rx = &sc->sc_rxdescs[i];
1471 if (rx->rx_mbhead != NULL) {
1472 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1473 m_freem(rx->rx_mbhead);
1474 rx->rx_mbhead = NULL;
1475 }
1476 ex_add_rxbuf(sc, rx);
1477 }
1478
1479 bus_space_write_2(iot, ioh, ELINK_COMMAND, C_INTR_LATCH);
1480
1481 untimeout(ex_tick, sc);
1482
1483 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1484 ifp->if_timer = 0;
1485 }
1486
1487 static void
1488 ex_init_txdescs(sc)
1489 struct ex_softc *sc;
1490 {
1491 int i;
1492
1493 for (i = 0; i < EX_NDPD; i++) {
1494 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1495 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1496 if (i < EX_NDPD - 1)
1497 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1498 else
1499 sc->sc_txdescs[i].tx_next = NULL;
1500 }
1501 sc->tx_free = &sc->sc_txdescs[0];
1502 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1503 }
1504
1505
1506 /*
1507 * Before reboots, reset card completely.
1508 */
1509 static void
1510 ex_shutdown(arg)
1511 void *arg;
1512 {
1513 register struct ex_softc *sc = arg;
1514
1515 ex_stop(sc);
1516 }
1517
1518 /*
1519 * Read EEPROM data.
1520 * XXX what to do if EEPROM doesn't unbusy?
1521 */
1522 u_int16_t
1523 ex_read_eeprom(sc, offset)
1524 struct ex_softc *sc;
1525 int offset;
1526 {
1527 bus_space_tag_t iot = sc->sc_iot;
1528 bus_space_handle_t ioh = sc->sc_ioh;
1529 u_int16_t data = 0;
1530
1531 GO_WINDOW(0);
1532 if (ex_eeprom_busy(sc))
1533 goto out;
1534 switch (sc->ex_bustype) {
1535 case EX_BUS_PCI:
1536 bus_space_write_1(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1537 READ_EEPROM | (offset & 0x3f));
1538 break;
1539 case EX_BUS_CARDBUS:
1540 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1541 0x230 + (offset & 0x3f));
1542 break;
1543 }
1544 if (ex_eeprom_busy(sc))
1545 goto out;
1546 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1547 out:
1548 return data;
1549 }
1550
1551 static int
1552 ex_eeprom_busy(sc)
1553 struct ex_softc *sc;
1554 {
1555 bus_space_tag_t iot = sc->sc_iot;
1556 bus_space_handle_t ioh = sc->sc_ioh;
1557 int i = 100;
1558
1559 while (i--) {
1560 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1561 EEPROM_BUSY))
1562 return 0;
1563 delay(100);
1564 }
1565 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1566 return (1);
1567 }
1568
1569 /*
1570 * Create a new rx buffer and add it to the 'soft' rx list.
1571 */
1572 static int
1573 ex_add_rxbuf(sc, rxd)
1574 struct ex_softc *sc;
1575 struct ex_rxdesc *rxd;
1576 {
1577 struct mbuf *m, *oldm;
1578 bus_dmamap_t rxmap;
1579 int error, rval = 0;
1580
1581 oldm = rxd->rx_mbhead;
1582 rxmap = rxd->rx_dmamap;
1583
1584 MGETHDR(m, M_DONTWAIT, MT_DATA);
1585 if (m != NULL) {
1586 MCLGET(m, M_DONTWAIT);
1587 if ((m->m_flags & M_EXT) == 0) {
1588 m_freem(m);
1589 if (oldm == NULL)
1590 return 1;
1591 m = oldm;
1592 m->m_data = m->m_ext.ext_buf;
1593 rval = 1;
1594 }
1595 } else {
1596 if (oldm == NULL)
1597 return 1;
1598 m = oldm;
1599 m->m_data = m->m_ext.ext_buf;
1600 rval = 1;
1601 }
1602
1603 /*
1604 * Setup the DMA map for this receive buffer.
1605 */
1606 if (m != oldm) {
1607 if (oldm != NULL)
1608 bus_dmamap_unload(sc->sc_dmat, rxmap);
1609 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1610 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT);
1611 if (error) {
1612 printf("%s: can't load rx buffer, error = %d\n",
1613 sc->sc_dev.dv_xname, error);
1614 panic("ex_add_rxbuf"); /* XXX */
1615 }
1616 }
1617
1618 /*
1619 * Align for data after 14 byte header.
1620 */
1621 m->m_data += 2;
1622
1623 rxd->rx_mbhead = m;
1624 rxd->rx_upd->upd_pktstatus = htopci(MCLBYTES - 2);
1625 rxd->rx_upd->upd_frags[0].fr_addr =
1626 htopci(rxmap->dm_segs[0].ds_addr + 2);
1627 rxd->rx_upd->upd_nextptr = 0;
1628
1629 /*
1630 * Attach it to the end of the list.
1631 */
1632 if (sc->rx_head != NULL) {
1633 sc->rx_tail->rx_next = rxd;
1634 sc->rx_tail->rx_upd->upd_nextptr = htopci(sc->sc_upddma +
1635 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1636 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1637 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1638 sizeof (struct ex_upd),
1639 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1640 } else {
1641 sc->rx_head = rxd;
1642 }
1643 sc->rx_tail = rxd;
1644
1645 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1646 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1647 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1648 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1649 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1650 return (rval);
1651 }
1652
1653 void
1654 ex_mii_setbit(v, bit)
1655 void *v;
1656 u_int16_t bit;
1657 {
1658 struct ex_softc *sc = v;
1659 u_int16_t val;
1660
1661 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT);
1662 val |= bit;
1663 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1664 }
1665
1666 void
1667 ex_mii_clrbit(v, bit)
1668 void *v;
1669 u_int16_t bit;
1670 {
1671 struct ex_softc *sc = v;
1672 u_int16_t val;
1673
1674 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT);
1675 val &= ~bit;
1676 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1677 }
1678
1679 u_int16_t
1680 ex_mii_readbit(v, bit)
1681 void *v;
1682 u_int16_t bit;
1683 {
1684 struct ex_softc *sc = v;
1685 u_int16_t val;
1686
1687 val = bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT);
1688 return (val & bit);
1689 }
1690
1691 /*
1692 * The reason why all this stuff below is here, is that we need a special
1693 * readreg function. It needs to check if we're accessing the internal
1694 * PHY on 905B-TX boards, or not. If so, the read must fail immediately,
1695 * because 905B-TX boards seem to return garbage from the MII if you
1696 * try to access non-existing PHYs.
1697 */
1698
1699 int
1700 ex_mii_readreg(v, phy, reg)
1701 struct device *v;
1702 int phy;
1703 int reg;
1704 {
1705 struct ex_softc *sc = (struct ex_softc *)v;
1706 int val = 0;
1707 int err =0;
1708 int i;
1709
1710 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1711 return 0;
1712
1713 GO_WINDOW(4);
1714
1715 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, 0);
1716
1717 ex_mii_clrbit(sc, ELINK_PHY_DIR);
1718 for (i = 0; i < 32; i++) {
1719 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1720 ex_mii_setbit(sc, ELINK_PHY_CLK);
1721 }
1722 ex_mii_writebits(sc, MII_COMMAND_START, 2);
1723 ex_mii_writebits(sc, MII_COMMAND_READ, 2);
1724 ex_mii_writebits(sc, phy, 5);
1725 ex_mii_writebits(sc, reg, 5);
1726
1727 ex_mii_clrbit(sc, ELINK_PHY_DIR);
1728 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1729 ex_mii_setbit(sc, ELINK_PHY_CLK);
1730 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1731
1732 err = ex_mii_readbit(sc, ELINK_PHY_DATA);
1733 ex_mii_setbit(sc, ELINK_PHY_CLK);
1734
1735 for (i = 0; i < 16; i++) {
1736 val <<= 1;
1737 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1738 if (err == 0 && ex_mii_readbit(sc, ELINK_PHY_DATA))
1739 val |= 1;
1740 ex_mii_setbit(sc, ELINK_PHY_CLK);
1741 }
1742 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1743 ex_mii_setbit(sc, ELINK_PHY_CLK);
1744
1745 GO_WINDOW(1);
1746
1747 return (err ? 0 : val);
1748 }
1749
1750 static void
1751 ex_mii_writebits(sc, data, nbits)
1752 struct ex_softc *sc;
1753 unsigned int data;
1754 int nbits;
1755 {
1756 int i;
1757
1758 ex_mii_setbit(sc, ELINK_PHY_DIR);
1759 for (i = 1 << (nbits -1); i; i = i >> 1) {
1760 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1761 ex_mii_readbit(sc, ELINK_PHY_CLK);
1762 if (data & i)
1763 ex_mii_setbit(sc, ELINK_PHY_DATA);
1764 else
1765 ex_mii_clrbit(sc, ELINK_PHY_DATA);
1766 ex_mii_setbit(sc, ELINK_PHY_CLK);
1767 ex_mii_readbit(sc, ELINK_PHY_CLK);
1768 }
1769 }
1770
1771 void
1772 ex_mii_writereg(v, phy, reg, data)
1773 struct device *v;
1774 int phy;
1775 int reg;
1776 int data;
1777 {
1778 struct ex_softc *sc = (struct ex_softc *)v;
1779 int i;
1780
1781 GO_WINDOW(4);
1782
1783 ex_mii_clrbit(sc, ELINK_PHY_DIR);
1784 for (i = 0; i < 32; i++) {
1785 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1786 ex_mii_setbit(sc, ELINK_PHY_CLK);
1787 }
1788 ex_mii_writebits(sc, MII_COMMAND_START, 2);
1789 ex_mii_writebits(sc, MII_COMMAND_WRITE, 2);
1790 ex_mii_writebits(sc, phy, 5);
1791 ex_mii_writebits(sc, reg, 5);
1792 ex_mii_writebits(sc, MII_COMMAND_ACK, 2);
1793 ex_mii_writebits(sc, data, 16);
1794
1795 ex_mii_clrbit(sc, ELINK_PHY_CLK);
1796 ex_mii_setbit(sc, ELINK_PHY_CLK);
1797
1798 GO_WINDOW(1);
1799 }
1800
1801 void
1802 ex_mii_statchg(v)
1803 struct device *v;
1804 {
1805 struct ex_softc *sc = (struct ex_softc *)v;
1806 bus_space_tag_t iot = sc->sc_iot;
1807 bus_space_handle_t ioh = sc->sc_ioh;
1808 int mctl;
1809
1810 /* XXX Update ifp->if_baudrate */
1811
1812 GO_WINDOW(3);
1813 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1814 if (sc->ex_mii.mii_media_active & IFM_FDX)
1815 mctl |= MAC_CONTROL_FDX;
1816 else
1817 mctl &= ~MAC_CONTROL_FDX;
1818 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1819 GO_WINDOW(1); /* back to operating window */
1820 }
1821