elinkxl.c revision 1.92 1 /* $NetBSD: elinkxl.c,v 1.92 2006/11/05 07:59:21 itohy Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.92 2006/11/05 07:59:21 itohy Exp $");
41
42 #include "bpfilter.h"
43 #include "rnd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/errno.h>
53 #include <sys/syslog.h>
54 #include <sys/select.h>
55 #include <sys/device.h>
56 #if NRND > 0
57 #include <sys/rnd.h>
58 #endif
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_ether.h>
65 #include <net/if_media.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70 #endif
71
72 #include <machine/cpu.h>
73 #include <machine/bus.h>
74 #include <machine/intr.h>
75 #include <machine/endian.h>
76
77 #include <dev/mii/miivar.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/mii_bitbang.h>
80
81 #include <dev/ic/elink3reg.h>
82 /* #include <dev/ic/elink3var.h> */
83 #include <dev/ic/elinkxlreg.h>
84 #include <dev/ic/elinkxlvar.h>
85
86 #ifdef DEBUG
87 int exdebug = 0;
88 #endif
89
90 /* ifmedia callbacks */
91 int ex_media_chg(struct ifnet *ifp);
92 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
93
94 void ex_probe_media(struct ex_softc *);
95 void ex_set_filter(struct ex_softc *);
96 void ex_set_media(struct ex_softc *);
97 void ex_set_xcvr(struct ex_softc *, u_int16_t);
98 struct mbuf *ex_get(struct ex_softc *, int);
99 u_int16_t ex_read_eeprom(struct ex_softc *, int);
100 int ex_init(struct ifnet *);
101 void ex_read(struct ex_softc *);
102 void ex_reset(struct ex_softc *);
103 void ex_set_mc(struct ex_softc *);
104 void ex_getstats(struct ex_softc *);
105 void ex_printstats(struct ex_softc *);
106 void ex_tick(void *);
107
108 void ex_power(int, void *);
109
110 static int ex_eeprom_busy(struct ex_softc *);
111 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
112 static void ex_init_txdescs(struct ex_softc *);
113
114 static void ex_setup_tx(struct ex_softc *);
115 static void ex_shutdown(void *);
116 static void ex_start(struct ifnet *);
117 static void ex_txstat(struct ex_softc *);
118
119 int ex_mii_readreg(struct device *, int, int);
120 void ex_mii_writereg(struct device *, int, int, int);
121 void ex_mii_statchg(struct device *);
122
123 void ex_probemedia(struct ex_softc *);
124
125 /*
126 * Structure to map media-present bits in boards to ifmedia codes and
127 * printable media names. Used for table-driven ifmedia initialization.
128 */
129 struct ex_media {
130 int exm_mpbit; /* media present bit */
131 const char *exm_name; /* name of medium */
132 int exm_ifmedia; /* ifmedia word for medium */
133 int exm_epmedia; /* ELINKMEDIA_* constant */
134 };
135
136 /*
137 * Media table for 3c90x chips. Note that chips with MII have no
138 * `native' media.
139 */
140 struct ex_media ex_native_media[] = {
141 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
142 ELINKMEDIA_10BASE_T },
143 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
144 ELINKMEDIA_10BASE_T },
145 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
146 ELINKMEDIA_AUI },
147 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
148 ELINKMEDIA_10BASE_2 },
149 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
150 ELINKMEDIA_100BASE_TX },
151 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
152 ELINKMEDIA_100BASE_TX },
153 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
154 ELINKMEDIA_100BASE_FX },
155 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
156 ELINKMEDIA_MII },
157 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
158 ELINKMEDIA_100BASE_T4 },
159 { 0, NULL, 0,
160 0 },
161 };
162
163 /*
164 * MII bit-bang glue.
165 */
166 u_int32_t ex_mii_bitbang_read(struct device *);
167 void ex_mii_bitbang_write(struct device *, u_int32_t);
168
169 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
170 ex_mii_bitbang_read,
171 ex_mii_bitbang_write,
172 {
173 ELINK_PHY_DATA, /* MII_BIT_MDO */
174 ELINK_PHY_DATA, /* MII_BIT_MDI */
175 ELINK_PHY_CLK, /* MII_BIT_MDC */
176 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
177 0, /* MII_BIT_DIR_PHY_HOST */
178 }
179 };
180
181 /*
182 * Back-end attach and configure.
183 */
184 void
185 ex_config(sc)
186 struct ex_softc *sc;
187 {
188 struct ifnet *ifp;
189 u_int16_t val;
190 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
191 bus_space_tag_t iot = sc->sc_iot;
192 bus_space_handle_t ioh = sc->sc_ioh;
193 int i, error, attach_stage;
194
195 callout_init(&sc->ex_mii_callout);
196
197 ex_reset(sc);
198
199 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
200 macaddr[0] = val >> 8;
201 macaddr[1] = val & 0xff;
202 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
203 macaddr[2] = val >> 8;
204 macaddr[3] = val & 0xff;
205 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
206 macaddr[4] = val >> 8;
207 macaddr[5] = val & 0xff;
208
209 aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname,
210 ether_sprintf(macaddr));
211
212 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
213 GO_WINDOW(2);
214 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
215 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
216 val |= ELINK_RESET_OPT_LEDPOLAR;
217 if (sc->ex_conf & EX_CONF_PHY_POWER)
218 val |= ELINK_RESET_OPT_PHYPOWER;
219 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
220 }
221 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
222 GO_WINDOW(0);
223 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
224 EX_XCVR_PWR_MAGICBITS);
225 }
226
227 attach_stage = 0;
228
229 /*
230 * Allocate the upload descriptors, and create and load the DMA
231 * map for them.
232 */
233 if ((error = bus_dmamem_alloc(sc->sc_dmat,
234 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
235 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
236 aprint_error(
237 "%s: can't allocate upload descriptors, error = %d\n",
238 sc->sc_dev.dv_xname, error);
239 goto fail;
240 }
241
242 attach_stage = 1;
243
244 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
245 EX_NUPD * sizeof (struct ex_upd), (caddr_t *)&sc->sc_upd,
246 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
247 aprint_error("%s: can't map upload descriptors, error = %d\n",
248 sc->sc_dev.dv_xname, error);
249 goto fail;
250 }
251
252 attach_stage = 2;
253
254 if ((error = bus_dmamap_create(sc->sc_dmat,
255 EX_NUPD * sizeof (struct ex_upd), 1,
256 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
257 &sc->sc_upd_dmamap)) != 0) {
258 aprint_error(
259 "%s: can't create upload desc. DMA map, error = %d\n",
260 sc->sc_dev.dv_xname, error);
261 goto fail;
262 }
263
264 attach_stage = 3;
265
266 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
267 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
268 BUS_DMA_NOWAIT)) != 0) {
269 aprint_error(
270 "%s: can't load upload desc. DMA map, error = %d\n",
271 sc->sc_dev.dv_xname, error);
272 goto fail;
273 }
274
275 attach_stage = 4;
276
277 /*
278 * Allocate the download descriptors, and create and load the DMA
279 * map for them.
280 */
281 if ((error = bus_dmamem_alloc(sc->sc_dmat,
282 EX_NDPD * sizeof (struct ex_dpd), PAGE_SIZE, 0, &sc->sc_dseg, 1,
283 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
284 aprint_error(
285 "%s: can't allocate download descriptors, error = %d\n",
286 sc->sc_dev.dv_xname, error);
287 goto fail;
288 }
289
290 attach_stage = 5;
291
292 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
293 EX_NDPD * sizeof (struct ex_dpd), (caddr_t *)&sc->sc_dpd,
294 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
295 aprint_error("%s: can't map download descriptors, error = %d\n",
296 sc->sc_dev.dv_xname, error);
297 goto fail;
298 }
299 memset(sc->sc_dpd, 0, EX_NDPD * sizeof (struct ex_dpd));
300
301 attach_stage = 6;
302
303 if ((error = bus_dmamap_create(sc->sc_dmat,
304 EX_NDPD * sizeof (struct ex_dpd), 1,
305 EX_NDPD * sizeof (struct ex_dpd), 0, BUS_DMA_NOWAIT,
306 &sc->sc_dpd_dmamap)) != 0) {
307 aprint_error(
308 "%s: can't create download desc. DMA map, error = %d\n",
309 sc->sc_dev.dv_xname, error);
310 goto fail;
311 }
312
313 attach_stage = 7;
314
315 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
316 sc->sc_dpd, EX_NDPD * sizeof (struct ex_dpd), NULL,
317 BUS_DMA_NOWAIT)) != 0) {
318 aprint_error(
319 "%s: can't load download desc. DMA map, error = %d\n",
320 sc->sc_dev.dv_xname, error);
321 goto fail;
322 }
323
324 attach_stage = 8;
325
326
327 /*
328 * Create the transmit buffer DMA maps.
329 */
330 for (i = 0; i < EX_NDPD; i++) {
331 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
332 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
333 &sc->sc_tx_dmamaps[i])) != 0) {
334 aprint_error(
335 "%s: can't create tx DMA map %d, error = %d\n",
336 sc->sc_dev.dv_xname, i, error);
337 goto fail;
338 }
339 }
340
341 attach_stage = 9;
342
343 /*
344 * Create the receive buffer DMA maps.
345 */
346 for (i = 0; i < EX_NUPD; i++) {
347 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
348 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
349 &sc->sc_rx_dmamaps[i])) != 0) {
350 aprint_error(
351 "%s: can't create rx DMA map %d, error = %d\n",
352 sc->sc_dev.dv_xname, i, error);
353 goto fail;
354 }
355 }
356
357 attach_stage = 10;
358
359 /*
360 * Create ring of upload descriptors, only once. The DMA engine
361 * will loop over this when receiving packets, stalling if it
362 * hits an UPD with a finished receive.
363 */
364 for (i = 0; i < EX_NUPD; i++) {
365 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
366 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
367 sc->sc_upd[i].upd_frags[0].fr_len =
368 htole32((MCLBYTES - 2) | EX_FR_LAST);
369 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
370 aprint_error("%s: can't allocate or map rx buffers\n",
371 sc->sc_dev.dv_xname);
372 goto fail;
373 }
374 }
375
376 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
377 EX_NUPD * sizeof (struct ex_upd),
378 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
379
380 ex_init_txdescs(sc);
381
382 attach_stage = 11;
383
384
385 GO_WINDOW(3);
386 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
387 if (val & ELINK_MEDIACAP_MII)
388 sc->ex_conf |= EX_CONF_MII;
389
390 ifp = &sc->sc_ethercom.ec_if;
391
392 /*
393 * Initialize our media structures and MII info. We'll
394 * probe the MII if we discover that we have one.
395 */
396 sc->ex_mii.mii_ifp = ifp;
397 sc->ex_mii.mii_readreg = ex_mii_readreg;
398 sc->ex_mii.mii_writereg = ex_mii_writereg;
399 sc->ex_mii.mii_statchg = ex_mii_statchg;
400 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
401 ex_media_stat);
402
403 if (sc->ex_conf & EX_CONF_MII) {
404 /*
405 * Find PHY, extract media information from it.
406 * First, select the right transceiver.
407 */
408 ex_set_xcvr(sc, val);
409
410 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
411 MII_PHY_ANY, MII_OFFSET_ANY, 0);
412 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
413 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
414 0, NULL);
415 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
416 } else {
417 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
418 }
419 } else
420 ex_probemedia(sc);
421
422 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
423 ifp->if_softc = sc;
424 ifp->if_start = ex_start;
425 ifp->if_ioctl = ex_ioctl;
426 ifp->if_watchdog = ex_watchdog;
427 ifp->if_init = ex_init;
428 ifp->if_stop = ex_stop;
429 ifp->if_flags =
430 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
431 sc->sc_if_flags = ifp->if_flags;
432 IFQ_SET_READY(&ifp->if_snd);
433
434 /*
435 * We can support 802.1Q VLAN-sized frames.
436 */
437 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
438
439 /*
440 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
441 */
442 if (sc->ex_conf & EX_CONF_90XB)
443 sc->sc_ethercom.ec_if.if_capabilities |=
444 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
445 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
446 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
447
448 if_attach(ifp);
449 ether_ifattach(ifp, macaddr);
450
451 GO_WINDOW(1);
452
453 sc->tx_start_thresh = 20;
454 sc->tx_succ_ok = 0;
455
456 /* TODO: set queues to 0 */
457
458 #if NRND > 0
459 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
460 RND_TYPE_NET, 0);
461 #endif
462
463 /* Establish callback to reset card when we reboot. */
464 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
465 if (sc->sc_sdhook == NULL)
466 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
467 sc->sc_dev.dv_xname);
468
469 /* Add a suspend hook to make sure we come back up after a resume. */
470 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
471 ex_power, sc);
472 if (sc->sc_powerhook == NULL)
473 aprint_error("%s: WARNING: unable to establish power hook\n",
474 sc->sc_dev.dv_xname);
475
476 /* The attach is successful. */
477 sc->ex_flags |= EX_FLAGS_ATTACHED;
478 return;
479
480 fail:
481 /*
482 * Free any resources we've allocated during the failed attach
483 * attempt. Do this in reverse order and fall though.
484 */
485 switch (attach_stage) {
486 case 11:
487 {
488 struct ex_rxdesc *rxd;
489
490 for (i = 0; i < EX_NUPD; i++) {
491 rxd = &sc->sc_rxdescs[i];
492 if (rxd->rx_mbhead != NULL) {
493 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
494 m_freem(rxd->rx_mbhead);
495 }
496 }
497 }
498 /* FALLTHROUGH */
499
500 case 10:
501 for (i = 0; i < EX_NUPD; i++)
502 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
503 /* FALLTHROUGH */
504
505 case 9:
506 for (i = 0; i < EX_NDPD; i++)
507 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
508 /* FALLTHROUGH */
509 case 8:
510 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
511 /* FALLTHROUGH */
512
513 case 7:
514 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
515 /* FALLTHROUGH */
516
517 case 6:
518 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
519 EX_NDPD * sizeof (struct ex_dpd));
520 /* FALLTHROUGH */
521
522 case 5:
523 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
524 break;
525
526 case 4:
527 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
528 /* FALLTHROUGH */
529
530 case 3:
531 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
532 /* FALLTHROUGH */
533
534 case 2:
535 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
536 EX_NUPD * sizeof (struct ex_upd));
537 /* FALLTHROUGH */
538
539 case 1:
540 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
541 break;
542 }
543
544 }
545
546 /*
547 * Find the media present on non-MII chips.
548 */
549 void
550 ex_probemedia(sc)
551 struct ex_softc *sc;
552 {
553 bus_space_tag_t iot = sc->sc_iot;
554 bus_space_handle_t ioh = sc->sc_ioh;
555 struct ifmedia *ifm = &sc->ex_mii.mii_media;
556 struct ex_media *exm;
557 u_int16_t config1, reset_options, default_media;
558 int defmedia = 0;
559 const char *sep = "", *defmedianame = NULL;
560
561 GO_WINDOW(3);
562 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
563 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
564 GO_WINDOW(0);
565
566 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
567
568 aprint_normal("%s: ", sc->sc_dev.dv_xname);
569
570 /* Sanity check that there are any media! */
571 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
572 aprint_error("no media present!\n");
573 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
574 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
575 return;
576 }
577
578 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
579
580 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
581 if (reset_options & exm->exm_mpbit) {
582 /*
583 * Default media is a little complicated. We
584 * support full-duplex which uses the same
585 * reset options bit.
586 *
587 * XXX Check EEPROM for default to FDX?
588 */
589 if (exm->exm_epmedia == default_media) {
590 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
591 defmedia = exm->exm_ifmedia;
592 defmedianame = exm->exm_name;
593 }
594 } else if (defmedia == 0) {
595 defmedia = exm->exm_ifmedia;
596 defmedianame = exm->exm_name;
597 }
598 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
599 NULL);
600 PRINT(exm->exm_name);
601 }
602 }
603
604 #undef PRINT
605
606 #ifdef DIAGNOSTIC
607 if (defmedia == 0)
608 panic("ex_probemedia: impossible");
609 #endif
610
611 aprint_normal(", default %s\n", defmedianame);
612 ifmedia_set(ifm, defmedia);
613 }
614
615 /*
616 * Setup transmitter parameters.
617 */
618 static void
619 ex_setup_tx(sc)
620 struct ex_softc *sc;
621 {
622 bus_space_tag_t iot = sc->sc_iot;
623 bus_space_handle_t ioh = sc->sc_ioh;
624
625 /*
626 * Disable reclaim threshold for 90xB, set free threshold to
627 * 6 * 256 = 1536 for 90x.
628 */
629 if (sc->ex_conf & EX_CONF_90XB)
630 bus_space_write_2(iot, ioh, ELINK_COMMAND,
631 ELINK_TXRECLTHRESH | 255);
632 else
633 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
634
635 /* Setup early transmission start threshold. */
636 bus_space_write_2(iot, ioh, ELINK_COMMAND,
637 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
638 }
639
640 /*
641 * Bring device up.
642 */
643 int
644 ex_init(ifp)
645 struct ifnet *ifp;
646 {
647 struct ex_softc *sc = ifp->if_softc;
648 bus_space_tag_t iot = sc->sc_iot;
649 bus_space_handle_t ioh = sc->sc_ioh;
650 int i;
651 u_int16_t val;
652 int error = 0;
653
654 if ((error = ex_enable(sc)) != 0)
655 goto out;
656
657 ex_waitcmd(sc);
658 ex_stop(ifp, 0);
659
660 GO_WINDOW(2);
661
662 /* Turn on PHY power. */
663 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
664 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
665 if (sc->ex_conf & EX_CONF_PHY_POWER)
666 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
667 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
668 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
669 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
670 }
671
672 /*
673 * Set the station address and clear the station mask. The latter
674 * is needed for 90x cards, 0 is the default for 90xB cards.
675 */
676 for (i = 0; i < ETHER_ADDR_LEN; i++) {
677 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
678 LLADDR(ifp->if_sadl)[i]);
679 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
680 }
681
682 GO_WINDOW(3);
683
684 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
685 ex_waitcmd(sc);
686 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
687 ex_waitcmd(sc);
688
689 /* Load Tx parameters. */
690 ex_setup_tx(sc);
691
692 bus_space_write_2(iot, ioh, ELINK_COMMAND,
693 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
694
695 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
696 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
697
698 bus_space_write_2(iot, ioh, ELINK_COMMAND,
699 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
700 bus_space_write_2(iot, ioh, ELINK_COMMAND,
701 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
702
703 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
704 if (sc->intr_ack)
705 (* sc->intr_ack)(sc);
706 ex_set_media(sc);
707 ex_set_mc(sc);
708
709
710 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
711 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
712 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
713 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
714 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
715
716 ifp->if_flags |= IFF_RUNNING;
717 ifp->if_flags &= ~IFF_OACTIVE;
718 ex_start(ifp);
719 sc->sc_if_flags = ifp->if_flags;
720
721 GO_WINDOW(1);
722
723 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
724
725 out:
726 if (error) {
727 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
728 ifp->if_timer = 0;
729 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
730 }
731 return (error);
732 }
733
734 #define MCHASHSIZE 256
735 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
736 (MCHASHSIZE - 1))
737
738 /*
739 * Set multicast receive filter. Also take care of promiscuous mode
740 * here (XXX).
741 */
742 void
743 ex_set_mc(sc)
744 struct ex_softc *sc;
745 {
746 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
747 struct ethercom *ec = &sc->sc_ethercom;
748 struct ether_multi *enm;
749 struct ether_multistep estep;
750 int i;
751 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
752
753 if (ifp->if_flags & IFF_PROMISC) {
754 mask |= FIL_PROMISC;
755 goto allmulti;
756 }
757
758 ETHER_FIRST_MULTI(estep, ec, enm);
759 if (enm == NULL)
760 goto nomulti;
761
762 if ((sc->ex_conf & EX_CONF_90XB) == 0)
763 /* No multicast hash filtering. */
764 goto allmulti;
765
766 for (i = 0; i < MCHASHSIZE; i++)
767 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
768 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
769
770 do {
771 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
772 ETHER_ADDR_LEN) != 0)
773 goto allmulti;
774
775 i = ex_mchash(enm->enm_addrlo);
776 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
777 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
778 ETHER_NEXT_MULTI(estep, enm);
779 } while (enm != NULL);
780 mask |= FIL_MULTIHASH;
781
782 nomulti:
783 ifp->if_flags &= ~IFF_ALLMULTI;
784 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
785 SET_RX_FILTER | mask);
786 return;
787
788 allmulti:
789 ifp->if_flags |= IFF_ALLMULTI;
790 mask |= FIL_MULTICAST;
791 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
792 SET_RX_FILTER | mask);
793 }
794
795
796 /*
797 * The Tx Complete interrupts occur only on errors,
798 * and this is the error handler.
799 */
800 static void
801 ex_txstat(sc)
802 struct ex_softc *sc;
803 {
804 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
805 bus_space_tag_t iot = sc->sc_iot;
806 bus_space_handle_t ioh = sc->sc_ioh;
807 int i, err = 0;
808
809 /*
810 * We need to read+write TX_STATUS until we get a 0 status
811 * in order to turn off the interrupt flag.
812 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
813 */
814 for (;;) {
815 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
816 if ((i & TXS_COMPLETE) == 0)
817 break;
818 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
819 err |= i;
820 }
821 err &= ~TXS_TIMER;
822
823 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
824 || err == 0 /* should not happen, just in case */) {
825 /*
826 * Make sure the transmission is stopped.
827 */
828 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
829 for (i = 1000; i > 0; i--)
830 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
831 ELINK_DMAC_DNINPROG) == 0)
832 break;
833
834 /*
835 * Reset the transmitter.
836 */
837 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
838
839 /* Resetting takes a while and we will do more than wait. */
840
841 ifp->if_flags &= ~IFF_OACTIVE;
842 ++sc->sc_ethercom.ec_if.if_oerrors;
843 printf("%s:%s%s%s", sc->sc_dev.dv_xname,
844 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
845 (err & TXS_JABBER) ? " jabber" : "",
846 (err & TXS_RECLAIM) ? " reclaim" : "");
847 if (err == 0)
848 printf(" unknown Tx error");
849 printf(" (%x)", err);
850 if (err & TXS_UNDERRUN) {
851 printf(" @%d", sc->tx_start_thresh);
852 if (sc->tx_succ_ok < 256 &&
853 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
854 > sc->tx_start_thresh) {
855 printf(", new threshold is %d", i);
856 sc->tx_start_thresh = i;
857 }
858 sc->tx_succ_ok = 0;
859 }
860 printf("\n");
861 if (err & TXS_MAX_COLLISION)
862 ++sc->sc_ethercom.ec_if.if_collisions;
863
864 /* Wait for TX_RESET to finish. */
865 ex_waitcmd(sc);
866
867 /* Reload Tx parameters. */
868 ex_setup_tx(sc);
869
870 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
871 if (sc->tx_head) {
872 ifp->if_flags |= IFF_OACTIVE;
873 bus_space_write_2(iot, ioh, ELINK_COMMAND,
874 ELINK_DNUNSTALL);
875 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
876 DPD_DMADDR(sc, sc->tx_head));
877
878 /* retrigger watchdog */
879 ifp->if_timer = 5;
880 }
881 } else {
882 if (err & TXS_MAX_COLLISION)
883 ++sc->sc_ethercom.ec_if.if_collisions;
884 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
885 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
886 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
887 }
888 }
889
890 int
891 ex_media_chg(ifp)
892 struct ifnet *ifp;
893 {
894
895 if (ifp->if_flags & IFF_UP)
896 ex_init(ifp);
897 return 0;
898 }
899
900 void
901 ex_set_xcvr(sc, media)
902 struct ex_softc *sc;
903 const u_int16_t media;
904 {
905 bus_space_tag_t iot = sc->sc_iot;
906 bus_space_handle_t ioh = sc->sc_ioh;
907 u_int32_t icfg;
908
909 /*
910 * We're already in Window 3
911 */
912 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
913 icfg &= ~(CONFIG_XCVR_SEL << 16);
914 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
915 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
916 if (media & ELINK_MEDIACAP_100BASETX)
917 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
918 if (media & ELINK_MEDIACAP_100BASEFX)
919 icfg |= ELINKMEDIA_100BASE_FX
920 << (CONFIG_XCVR_SEL_SHIFT + 16);
921 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
922 }
923
924 void
925 ex_set_media(sc)
926 struct ex_softc *sc;
927 {
928 bus_space_tag_t iot = sc->sc_iot;
929 bus_space_handle_t ioh = sc->sc_ioh;
930 u_int32_t configreg;
931
932 if (((sc->ex_conf & EX_CONF_MII) &&
933 (sc->ex_mii.mii_media_active & IFM_FDX))
934 || (!(sc->ex_conf & EX_CONF_MII) &&
935 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
936 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
937 MAC_CONTROL_FDX);
938 } else {
939 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
940 }
941
942 /*
943 * If the device has MII, select it, and then tell the
944 * PHY which media to use.
945 */
946 if (sc->ex_conf & EX_CONF_MII) {
947 u_int16_t val;
948
949 GO_WINDOW(3);
950 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
951 ex_set_xcvr(sc, val);
952 mii_mediachg(&sc->ex_mii);
953 return;
954 }
955
956 GO_WINDOW(4);
957 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
958 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
959 delay(800);
960
961 /*
962 * Now turn on the selected media/transceiver.
963 */
964 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
965 case IFM_10_T:
966 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
967 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
968 break;
969
970 case IFM_10_2:
971 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
972 DELAY(800);
973 break;
974
975 case IFM_100_TX:
976 case IFM_100_FX:
977 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
978 LINKBEAT_ENABLE);
979 DELAY(800);
980 break;
981
982 case IFM_10_5:
983 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
984 SQE_ENABLE);
985 DELAY(800);
986 break;
987
988 case IFM_MANUAL:
989 break;
990
991 case IFM_NONE:
992 return;
993
994 default:
995 panic("ex_set_media: impossible");
996 }
997
998 GO_WINDOW(3);
999 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
1000
1001 configreg &= ~(CONFIG_MEDIAMASK << 16);
1002 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
1003 (CONFIG_MEDIAMASK_SHIFT + 16));
1004
1005 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
1006 }
1007
1008 /*
1009 * Get currently-selected media from card.
1010 * (if_media callback, may be called before interface is brought up).
1011 */
1012 void
1013 ex_media_stat(ifp, req)
1014 struct ifnet *ifp;
1015 struct ifmediareq *req;
1016 {
1017 struct ex_softc *sc = ifp->if_softc;
1018 u_int16_t help;
1019
1020 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
1021 if (sc->ex_conf & EX_CONF_MII) {
1022 mii_pollstat(&sc->ex_mii);
1023 req->ifm_status = sc->ex_mii.mii_media_status;
1024 req->ifm_active = sc->ex_mii.mii_media_active;
1025 } else {
1026 GO_WINDOW(4);
1027 req->ifm_status = IFM_AVALID;
1028 req->ifm_active =
1029 sc->ex_mii.mii_media.ifm_cur->ifm_media;
1030 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
1031 ELINK_W4_MEDIA_TYPE);
1032 if (help & LINKBEAT_DETECT)
1033 req->ifm_status |= IFM_ACTIVE;
1034 GO_WINDOW(1);
1035 }
1036 }
1037 }
1038
1039
1040
1041 /*
1042 * Start outputting on the interface.
1043 */
1044 static void
1045 ex_start(ifp)
1046 struct ifnet *ifp;
1047 {
1048 struct ex_softc *sc = ifp->if_softc;
1049 bus_space_tag_t iot = sc->sc_iot;
1050 bus_space_handle_t ioh = sc->sc_ioh;
1051 volatile struct ex_fraghdr *fr = NULL;
1052 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1053 struct ex_txdesc *txp;
1054 struct mbuf *mb_head;
1055 bus_dmamap_t dmamap;
1056 int m_csumflags, offset, totlen, segment, error;
1057 u_int32_t csum_flags;
1058
1059 if (sc->tx_head || sc->tx_free == NULL)
1060 return;
1061
1062 txp = NULL;
1063
1064 /*
1065 * We're finished if there is nothing more to add to the list or if
1066 * we're all filled up with buffers to transmit.
1067 */
1068 while (sc->tx_free != NULL) {
1069 /*
1070 * Grab a packet to transmit.
1071 */
1072 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1073 if (mb_head == NULL)
1074 break;
1075
1076 /*
1077 * mb_head might be updated later,
1078 * so preserve csum_flags here.
1079 */
1080 m_csumflags = mb_head->m_pkthdr.csum_flags;
1081
1082 /*
1083 * Get pointer to next available tx desc.
1084 */
1085 txp = sc->tx_free;
1086 dmamap = txp->tx_dmamap;
1087
1088 /*
1089 * Go through each of the mbufs in the chain and initialize
1090 * the transmit buffer descriptors with the physical address
1091 * and size of the mbuf.
1092 */
1093 reload:
1094 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1095 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1096 switch (error) {
1097 case 0:
1098 /* Success. */
1099 break;
1100
1101 case EFBIG:
1102 {
1103 struct mbuf *mn;
1104
1105 /*
1106 * We ran out of segments. We have to recopy this
1107 * mbuf chain first. Bail out if we can't get the
1108 * new buffers.
1109 */
1110 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
1111
1112 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1113 if (mn == NULL) {
1114 m_freem(mb_head);
1115 printf("aborting\n");
1116 goto out;
1117 }
1118 if (mb_head->m_pkthdr.len > MHLEN) {
1119 MCLGET(mn, M_DONTWAIT);
1120 if ((mn->m_flags & M_EXT) == 0) {
1121 m_freem(mn);
1122 m_freem(mb_head);
1123 printf("aborting\n");
1124 goto out;
1125 }
1126 }
1127 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1128 mtod(mn, caddr_t));
1129 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1130 m_freem(mb_head);
1131 mb_head = mn;
1132 printf("retrying\n");
1133 goto reload;
1134 }
1135
1136 default:
1137 /*
1138 * Some other problem; report it.
1139 */
1140 printf("%s: can't load mbuf chain, error = %d\n",
1141 sc->sc_dev.dv_xname, error);
1142 m_freem(mb_head);
1143 goto out;
1144 }
1145
1146 /*
1147 * remove our tx desc from freelist.
1148 */
1149 sc->tx_free = txp->tx_next;
1150 txp->tx_next = NULL;
1151
1152 fr = &txp->tx_dpd->dpd_frags[0];
1153 totlen = 0;
1154 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1155 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1156 fr->fr_len = htole32(dmamap->dm_segs[segment].ds_len);
1157 totlen += dmamap->dm_segs[segment].ds_len;
1158 }
1159 fr--;
1160 fr->fr_len |= htole32(EX_FR_LAST);
1161 txp->tx_mbhead = mb_head;
1162
1163 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1164 BUS_DMASYNC_PREWRITE);
1165
1166 dpd = txp->tx_dpd;
1167 dpd->dpd_nextptr = 0;
1168 dpd->dpd_fsh = htole32(totlen);
1169
1170 /* Byte-swap constants so compiler can optimize. */
1171
1172 if (sc->ex_conf & EX_CONF_90XB) {
1173 csum_flags = 0;
1174
1175 if (m_csumflags & M_CSUM_IPv4)
1176 csum_flags |= htole32(EX_DPD_IPCKSUM);
1177
1178 if (m_csumflags & M_CSUM_TCPv4)
1179 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1180 else if (m_csumflags & M_CSUM_UDPv4)
1181 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1182
1183 dpd->dpd_fsh |= csum_flags;
1184 } else {
1185 KDASSERT((mb_head->m_pkthdr.csum_flags &
1186 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1187 }
1188
1189 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1190 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1191 sizeof (struct ex_dpd),
1192 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1193
1194 /*
1195 * No need to stall the download engine, we know it's
1196 * not busy right now.
1197 *
1198 * Fix up pointers in both the "soft" tx and the physical
1199 * tx list.
1200 */
1201 if (sc->tx_head != NULL) {
1202 prevdpd = sc->tx_tail->tx_dpd;
1203 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1204 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1205 offset, sizeof (struct ex_dpd),
1206 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1207 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1208 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1209 offset, sizeof (struct ex_dpd),
1210 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1211 sc->tx_tail->tx_next = txp;
1212 sc->tx_tail = txp;
1213 } else {
1214 sc->tx_tail = sc->tx_head = txp;
1215 }
1216
1217 #if NBPFILTER > 0
1218 /*
1219 * Pass packet to bpf if there is a listener.
1220 */
1221 if (ifp->if_bpf)
1222 bpf_mtap(ifp->if_bpf, mb_head);
1223 #endif
1224 }
1225 out:
1226 if (sc->tx_head) {
1227 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1228 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1229 ((caddr_t)sc->tx_tail->tx_dpd - (caddr_t)sc->sc_dpd),
1230 sizeof (struct ex_dpd),
1231 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1232 ifp->if_flags |= IFF_OACTIVE;
1233 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1234 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1235 DPD_DMADDR(sc, sc->tx_head));
1236
1237 /* trigger watchdog */
1238 ifp->if_timer = 5;
1239 }
1240 }
1241
1242
1243 int
1244 ex_intr(arg)
1245 void *arg;
1246 {
1247 struct ex_softc *sc = arg;
1248 bus_space_tag_t iot = sc->sc_iot;
1249 bus_space_handle_t ioh = sc->sc_ioh;
1250 u_int16_t stat;
1251 int ret = 0;
1252 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1253
1254 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1255 !device_is_active(&sc->sc_dev))
1256 return (0);
1257
1258 for (;;) {
1259 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1260
1261 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1262 if ((stat & INTR_LATCH) == 0) {
1263 #if 0
1264 printf("%s: intr latch cleared\n",
1265 sc->sc_dev.dv_xname);
1266 #endif
1267 break;
1268 }
1269 }
1270
1271 ret = 1;
1272
1273 /*
1274 * Acknowledge interrupts.
1275 */
1276 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1277 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1278 if (sc->intr_ack)
1279 (*sc->intr_ack)(sc);
1280
1281 if (stat & HOST_ERROR) {
1282 printf("%s: adapter failure (%x)\n",
1283 sc->sc_dev.dv_xname, stat);
1284 ex_reset(sc);
1285 ex_init(ifp);
1286 return 1;
1287 }
1288 if (stat & TX_COMPLETE) {
1289 ex_txstat(sc);
1290 }
1291 if (stat & UPD_STATS) {
1292 ex_getstats(sc);
1293 }
1294 if (stat & DN_COMPLETE) {
1295 struct ex_txdesc *txp, *ptxp = NULL;
1296 bus_dmamap_t txmap;
1297
1298 /* reset watchdog timer, was set in ex_start() */
1299 ifp->if_timer = 0;
1300
1301 for (txp = sc->tx_head; txp != NULL;
1302 txp = txp->tx_next) {
1303 bus_dmamap_sync(sc->sc_dmat,
1304 sc->sc_dpd_dmamap,
1305 (caddr_t)txp->tx_dpd - (caddr_t)sc->sc_dpd,
1306 sizeof (struct ex_dpd),
1307 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1308 if (txp->tx_mbhead != NULL) {
1309 txmap = txp->tx_dmamap;
1310 bus_dmamap_sync(sc->sc_dmat, txmap,
1311 0, txmap->dm_mapsize,
1312 BUS_DMASYNC_POSTWRITE);
1313 bus_dmamap_unload(sc->sc_dmat, txmap);
1314 m_freem(txp->tx_mbhead);
1315 txp->tx_mbhead = NULL;
1316 }
1317 ptxp = txp;
1318 }
1319
1320 /*
1321 * Move finished tx buffers back to the tx free list.
1322 */
1323 if (sc->tx_free) {
1324 sc->tx_ftail->tx_next = sc->tx_head;
1325 sc->tx_ftail = ptxp;
1326 } else
1327 sc->tx_ftail = sc->tx_free = sc->tx_head;
1328
1329 sc->tx_head = sc->tx_tail = NULL;
1330 ifp->if_flags &= ~IFF_OACTIVE;
1331
1332 if (sc->tx_succ_ok < 256)
1333 sc->tx_succ_ok++;
1334 }
1335
1336 if (stat & UP_COMPLETE) {
1337 struct ex_rxdesc *rxd;
1338 struct mbuf *m;
1339 struct ex_upd *upd;
1340 bus_dmamap_t rxmap;
1341 u_int32_t pktstat;
1342
1343 rcvloop:
1344 rxd = sc->rx_head;
1345 rxmap = rxd->rx_dmamap;
1346 m = rxd->rx_mbhead;
1347 upd = rxd->rx_upd;
1348
1349 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1350 rxmap->dm_mapsize,
1351 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1352 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1353 ((caddr_t)upd - (caddr_t)sc->sc_upd),
1354 sizeof (struct ex_upd),
1355 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1356 pktstat = le32toh(upd->upd_pktstatus);
1357
1358 if (pktstat & EX_UPD_COMPLETE) {
1359 /*
1360 * Remove first packet from the chain.
1361 */
1362 sc->rx_head = rxd->rx_next;
1363 rxd->rx_next = NULL;
1364
1365 /*
1366 * Add a new buffer to the receive chain.
1367 * If this fails, the old buffer is recycled
1368 * instead.
1369 */
1370 if (ex_add_rxbuf(sc, rxd) == 0) {
1371 u_int16_t total_len;
1372
1373 if (pktstat &
1374 ((sc->sc_ethercom.ec_capenable &
1375 ETHERCAP_VLAN_MTU) ?
1376 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1377 ifp->if_ierrors++;
1378 m_freem(m);
1379 goto rcvloop;
1380 }
1381
1382 total_len = pktstat & EX_UPD_PKTLENMASK;
1383 if (total_len <
1384 sizeof(struct ether_header)) {
1385 m_freem(m);
1386 goto rcvloop;
1387 }
1388 m->m_pkthdr.rcvif = ifp;
1389 m->m_pkthdr.len = m->m_len = total_len;
1390 #if NBPFILTER > 0
1391 if (ifp->if_bpf)
1392 bpf_mtap(ifp->if_bpf, m);
1393 #endif
1394 /*
1395 * Set the incoming checksum information for the packet.
1396 */
1397 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1398 (pktstat & EX_UPD_IPCHECKED) != 0) {
1399 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1400 if (pktstat & EX_UPD_IPCKSUMERR)
1401 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1402 if (pktstat & EX_UPD_TCPCHECKED) {
1403 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1404 if (pktstat & EX_UPD_TCPCKSUMERR)
1405 m->m_pkthdr.csum_flags |=
1406 M_CSUM_TCP_UDP_BAD;
1407 } else if (pktstat & EX_UPD_UDPCHECKED) {
1408 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1409 if (pktstat & EX_UPD_UDPCKSUMERR)
1410 m->m_pkthdr.csum_flags |=
1411 M_CSUM_TCP_UDP_BAD;
1412 }
1413 }
1414 (*ifp->if_input)(ifp, m);
1415 }
1416 goto rcvloop;
1417 }
1418 /*
1419 * Just in case we filled up all UPDs and the DMA engine
1420 * stalled. We could be more subtle about this.
1421 */
1422 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1423 printf("%s: uplistptr was 0\n",
1424 sc->sc_dev.dv_xname);
1425 ex_init(ifp);
1426 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1427 & 0x2000) {
1428 printf("%s: receive stalled\n",
1429 sc->sc_dev.dv_xname);
1430 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1431 ELINK_UPUNSTALL);
1432 }
1433 }
1434
1435 #if NRND > 0
1436 if (stat)
1437 rnd_add_uint32(&sc->rnd_source, stat);
1438 #endif
1439 }
1440
1441 /* no more interrupts */
1442 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1443 ex_start(ifp);
1444 return ret;
1445 }
1446
1447 int
1448 ex_ioctl(ifp, cmd, data)
1449 struct ifnet *ifp;
1450 u_long cmd;
1451 caddr_t data;
1452 {
1453 struct ex_softc *sc = ifp->if_softc;
1454 struct ifreq *ifr = (struct ifreq *)data;
1455 int s, error;
1456
1457 s = splnet();
1458
1459 switch (cmd) {
1460 case SIOCSIFMEDIA:
1461 case SIOCGIFMEDIA:
1462 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1463 break;
1464 case SIOCSIFFLAGS:
1465 /* If the interface is up and running, only modify the receive
1466 * filter when setting promiscuous or debug mode. Otherwise
1467 * fall through to ether_ioctl, which will reset the chip.
1468 */
1469 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
1470 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
1471 == (IFF_UP|IFF_RUNNING))
1472 && ((ifp->if_flags & (~RESETIGN))
1473 == (sc->sc_if_flags & (~RESETIGN)))) {
1474 ex_set_mc(sc);
1475 error = 0;
1476 break;
1477 #undef RESETIGN
1478 }
1479 /* FALLTHROUGH */
1480 default:
1481 error = ether_ioctl(ifp, cmd, data);
1482 if (error == ENETRESET) {
1483 /*
1484 * Multicast list has changed; set the hardware filter
1485 * accordingly.
1486 */
1487 if (ifp->if_flags & IFF_RUNNING)
1488 ex_set_mc(sc);
1489 error = 0;
1490 }
1491 break;
1492 }
1493
1494 sc->sc_if_flags = ifp->if_flags;
1495 splx(s);
1496 return (error);
1497 }
1498
1499 void
1500 ex_getstats(sc)
1501 struct ex_softc *sc;
1502 {
1503 bus_space_handle_t ioh = sc->sc_ioh;
1504 bus_space_tag_t iot = sc->sc_iot;
1505 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1506 u_int8_t upperok;
1507
1508 GO_WINDOW(6);
1509 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1510 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1511 ifp->if_ipackets += (upperok & 0x03) << 8;
1512 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1513 ifp->if_opackets += (upperok & 0x30) << 4;
1514 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1515 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1516 /*
1517 * There seems to be no way to get the exact number of collisions,
1518 * this is the number that occurred at the very least.
1519 */
1520 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1521 TX_AFTER_X_COLLISIONS);
1522 /*
1523 * Interface byte counts are counted by ether_input() and
1524 * ether_output(), so don't accumulate them here. Just
1525 * read the NIC counters so they don't generate overflow interrupts.
1526 * Upper byte counters are latched from reading the totals, so
1527 * they don't need to be read if we don't need their values.
1528 */
1529 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1530 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1531
1532 /*
1533 * Clear the following to avoid stats overflow interrupts
1534 */
1535 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1536 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1537 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1538 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1539 GO_WINDOW(4);
1540 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1541 GO_WINDOW(1);
1542 }
1543
1544 void
1545 ex_printstats(sc)
1546 struct ex_softc *sc;
1547 {
1548 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1549
1550 ex_getstats(sc);
1551 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1552 "%llu\n", (unsigned long long)ifp->if_ipackets,
1553 (unsigned long long)ifp->if_opackets,
1554 (unsigned long long)ifp->if_ierrors,
1555 (unsigned long long)ifp->if_oerrors,
1556 (unsigned long long)ifp->if_ibytes,
1557 (unsigned long long)ifp->if_obytes);
1558 }
1559
1560 void
1561 ex_tick(arg)
1562 void *arg;
1563 {
1564 struct ex_softc *sc = arg;
1565 int s;
1566
1567 if (!device_is_active(&sc->sc_dev))
1568 return;
1569
1570 s = splnet();
1571
1572 if (sc->ex_conf & EX_CONF_MII)
1573 mii_tick(&sc->ex_mii);
1574
1575 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1576 & COMMAND_IN_PROGRESS))
1577 ex_getstats(sc);
1578
1579 splx(s);
1580
1581 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1582 }
1583
1584 void
1585 ex_reset(sc)
1586 struct ex_softc *sc;
1587 {
1588 u_int16_t val = GLOBAL_RESET;
1589
1590 if (sc->ex_conf & EX_CONF_RESETHACK)
1591 val |= 0x10;
1592 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1593 /*
1594 * XXX apparently the command in progress bit can't be trusted
1595 * during a reset, so we just always wait this long. Fortunately
1596 * we normally only reset the chip during autoconfig.
1597 */
1598 delay(100000);
1599 ex_waitcmd(sc);
1600 }
1601
1602 void
1603 ex_watchdog(ifp)
1604 struct ifnet *ifp;
1605 {
1606 struct ex_softc *sc = ifp->if_softc;
1607
1608 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1609 ++sc->sc_ethercom.ec_if.if_oerrors;
1610
1611 ex_reset(sc);
1612 ex_init(ifp);
1613 }
1614
1615 void
1616 ex_stop(ifp, disable)
1617 struct ifnet *ifp;
1618 int disable;
1619 {
1620 struct ex_softc *sc = ifp->if_softc;
1621 bus_space_tag_t iot = sc->sc_iot;
1622 bus_space_handle_t ioh = sc->sc_ioh;
1623 struct ex_txdesc *tx;
1624 struct ex_rxdesc *rx;
1625 int i;
1626
1627 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1628 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1629 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1630
1631 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1632 if (tx->tx_mbhead == NULL)
1633 continue;
1634 m_freem(tx->tx_mbhead);
1635 tx->tx_mbhead = NULL;
1636 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1637 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1638 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1639 ((caddr_t)tx->tx_dpd - (caddr_t)sc->sc_dpd),
1640 sizeof (struct ex_dpd),
1641 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1642 }
1643 sc->tx_tail = sc->tx_head = NULL;
1644 ex_init_txdescs(sc);
1645
1646 sc->rx_tail = sc->rx_head = 0;
1647 for (i = 0; i < EX_NUPD; i++) {
1648 rx = &sc->sc_rxdescs[i];
1649 if (rx->rx_mbhead != NULL) {
1650 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1651 m_freem(rx->rx_mbhead);
1652 rx->rx_mbhead = NULL;
1653 }
1654 ex_add_rxbuf(sc, rx);
1655 }
1656
1657 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1658
1659 callout_stop(&sc->ex_mii_callout);
1660 if (sc->ex_conf & EX_CONF_MII)
1661 mii_down(&sc->ex_mii);
1662
1663 if (disable)
1664 ex_disable(sc);
1665
1666 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1667 sc->sc_if_flags = ifp->if_flags;
1668 ifp->if_timer = 0;
1669 }
1670
1671 static void
1672 ex_init_txdescs(sc)
1673 struct ex_softc *sc;
1674 {
1675 int i;
1676
1677 for (i = 0; i < EX_NDPD; i++) {
1678 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1679 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1680 if (i < EX_NDPD - 1)
1681 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1682 else
1683 sc->sc_txdescs[i].tx_next = NULL;
1684 }
1685 sc->tx_free = &sc->sc_txdescs[0];
1686 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1687 }
1688
1689
1690 int
1691 ex_activate(self, act)
1692 struct device *self;
1693 enum devact act;
1694 {
1695 struct ex_softc *sc = (void *) self;
1696 int s, error = 0;
1697
1698 s = splnet();
1699 switch (act) {
1700 case DVACT_ACTIVATE:
1701 error = EOPNOTSUPP;
1702 break;
1703
1704 case DVACT_DEACTIVATE:
1705 if (sc->ex_conf & EX_CONF_MII)
1706 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1707 MII_OFFSET_ANY);
1708 if_deactivate(&sc->sc_ethercom.ec_if);
1709 break;
1710 }
1711 splx(s);
1712
1713 return (error);
1714 }
1715
1716 int
1717 ex_detach(sc)
1718 struct ex_softc *sc;
1719 {
1720 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1721 struct ex_rxdesc *rxd;
1722 int i;
1723
1724 /* Succeed now if there's no work to do. */
1725 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1726 return (0);
1727
1728 /* Unhook our tick handler. */
1729 callout_stop(&sc->ex_mii_callout);
1730
1731 if (sc->ex_conf & EX_CONF_MII) {
1732 /* Detach all PHYs */
1733 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1734 }
1735
1736 /* Delete all remaining media. */
1737 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1738
1739 #if NRND > 0
1740 rnd_detach_source(&sc->rnd_source);
1741 #endif
1742 ether_ifdetach(ifp);
1743 if_detach(ifp);
1744
1745 for (i = 0; i < EX_NUPD; i++) {
1746 rxd = &sc->sc_rxdescs[i];
1747 if (rxd->rx_mbhead != NULL) {
1748 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1749 m_freem(rxd->rx_mbhead);
1750 rxd->rx_mbhead = NULL;
1751 }
1752 }
1753 for (i = 0; i < EX_NUPD; i++)
1754 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1755 for (i = 0; i < EX_NDPD; i++)
1756 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1757 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1758 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1759 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_dpd,
1760 EX_NDPD * sizeof (struct ex_dpd));
1761 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1762 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1763 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1764 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_upd,
1765 EX_NUPD * sizeof (struct ex_upd));
1766 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1767
1768 shutdownhook_disestablish(sc->sc_sdhook);
1769 powerhook_disestablish(sc->sc_powerhook);
1770
1771 return (0);
1772 }
1773
1774 /*
1775 * Before reboots, reset card completely.
1776 */
1777 static void
1778 ex_shutdown(arg)
1779 void *arg;
1780 {
1781 struct ex_softc *sc = arg;
1782
1783 ex_stop(&sc->sc_ethercom.ec_if, 1);
1784 /*
1785 * Make sure the interface is powered up when we reboot,
1786 * otherwise firmware on some systems gets really confused.
1787 */
1788 (void) ex_enable(sc);
1789 }
1790
1791 /*
1792 * Read EEPROM data.
1793 * XXX what to do if EEPROM doesn't unbusy?
1794 */
1795 u_int16_t
1796 ex_read_eeprom(sc, offset)
1797 struct ex_softc *sc;
1798 int offset;
1799 {
1800 bus_space_tag_t iot = sc->sc_iot;
1801 bus_space_handle_t ioh = sc->sc_ioh;
1802 u_int16_t data = 0, cmd = READ_EEPROM;
1803 int off;
1804
1805 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1806 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1807
1808 GO_WINDOW(0);
1809 if (ex_eeprom_busy(sc))
1810 goto out;
1811 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1812 cmd | (off + (offset & 0x3f)));
1813 if (ex_eeprom_busy(sc))
1814 goto out;
1815 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1816 out:
1817 return data;
1818 }
1819
1820 static int
1821 ex_eeprom_busy(sc)
1822 struct ex_softc *sc;
1823 {
1824 bus_space_tag_t iot = sc->sc_iot;
1825 bus_space_handle_t ioh = sc->sc_ioh;
1826 int i = 100;
1827
1828 while (i--) {
1829 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1830 EEPROM_BUSY))
1831 return 0;
1832 delay(100);
1833 }
1834 printf("\n%s: eeprom stays busy.\n", sc->sc_dev.dv_xname);
1835 return (1);
1836 }
1837
1838 /*
1839 * Create a new rx buffer and add it to the 'soft' rx list.
1840 */
1841 static int
1842 ex_add_rxbuf(sc, rxd)
1843 struct ex_softc *sc;
1844 struct ex_rxdesc *rxd;
1845 {
1846 struct mbuf *m, *oldm;
1847 bus_dmamap_t rxmap;
1848 int error, rval = 0;
1849
1850 oldm = rxd->rx_mbhead;
1851 rxmap = rxd->rx_dmamap;
1852
1853 MGETHDR(m, M_DONTWAIT, MT_DATA);
1854 if (m != NULL) {
1855 MCLGET(m, M_DONTWAIT);
1856 if ((m->m_flags & M_EXT) == 0) {
1857 m_freem(m);
1858 if (oldm == NULL)
1859 return 1;
1860 m = oldm;
1861 MRESETDATA(m);
1862 rval = 1;
1863 }
1864 } else {
1865 if (oldm == NULL)
1866 return 1;
1867 m = oldm;
1868 MRESETDATA(m);
1869 rval = 1;
1870 }
1871
1872 /*
1873 * Setup the DMA map for this receive buffer.
1874 */
1875 if (m != oldm) {
1876 if (oldm != NULL)
1877 bus_dmamap_unload(sc->sc_dmat, rxmap);
1878 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1879 m->m_ext.ext_buf, MCLBYTES, NULL,
1880 BUS_DMA_READ|BUS_DMA_NOWAIT);
1881 if (error) {
1882 printf("%s: can't load rx buffer, error = %d\n",
1883 sc->sc_dev.dv_xname, error);
1884 panic("ex_add_rxbuf"); /* XXX */
1885 }
1886 }
1887
1888 /*
1889 * Align for data after 14 byte header.
1890 */
1891 m->m_data += 2;
1892
1893 rxd->rx_mbhead = m;
1894 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1895 rxd->rx_upd->upd_frags[0].fr_addr =
1896 htole32(rxmap->dm_segs[0].ds_addr + 2);
1897 rxd->rx_upd->upd_nextptr = 0;
1898
1899 /*
1900 * Attach it to the end of the list.
1901 */
1902 if (sc->rx_head != NULL) {
1903 sc->rx_tail->rx_next = rxd;
1904 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1905 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd));
1906 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1907 (caddr_t)sc->rx_tail->rx_upd - (caddr_t)sc->sc_upd,
1908 sizeof (struct ex_upd),
1909 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1910 } else {
1911 sc->rx_head = rxd;
1912 }
1913 sc->rx_tail = rxd;
1914
1915 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1916 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1917 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1918 ((caddr_t)rxd->rx_upd - (caddr_t)sc->sc_upd),
1919 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1920 return (rval);
1921 }
1922
1923 u_int32_t
1924 ex_mii_bitbang_read(self)
1925 struct device *self;
1926 {
1927 struct ex_softc *sc = (void *) self;
1928
1929 /* We're already in Window 4. */
1930 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1931 }
1932
1933 void
1934 ex_mii_bitbang_write(self, val)
1935 struct device *self;
1936 u_int32_t val;
1937 {
1938 struct ex_softc *sc = (void *) self;
1939
1940 /* We're already in Window 4. */
1941 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1942 }
1943
1944 int
1945 ex_mii_readreg(v, phy, reg)
1946 struct device *v;
1947 int phy, reg;
1948 {
1949 struct ex_softc *sc = (struct ex_softc *)v;
1950 int val;
1951
1952 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1953 return 0;
1954
1955 GO_WINDOW(4);
1956
1957 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1958
1959 GO_WINDOW(1);
1960
1961 return (val);
1962 }
1963
1964 void
1965 ex_mii_writereg(v, phy, reg, data)
1966 struct device *v;
1967 int phy;
1968 int reg;
1969 int data;
1970 {
1971 struct ex_softc *sc = (struct ex_softc *)v;
1972
1973 GO_WINDOW(4);
1974
1975 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1976
1977 GO_WINDOW(1);
1978 }
1979
1980 void
1981 ex_mii_statchg(v)
1982 struct device *v;
1983 {
1984 struct ex_softc *sc = (struct ex_softc *)v;
1985 bus_space_tag_t iot = sc->sc_iot;
1986 bus_space_handle_t ioh = sc->sc_ioh;
1987 int mctl;
1988
1989 GO_WINDOW(3);
1990 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1991 if (sc->ex_mii.mii_media_active & IFM_FDX)
1992 mctl |= MAC_CONTROL_FDX;
1993 else
1994 mctl &= ~MAC_CONTROL_FDX;
1995 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1996 GO_WINDOW(1); /* back to operating window */
1997 }
1998
1999 int
2000 ex_enable(sc)
2001 struct ex_softc *sc;
2002 {
2003 if (sc->enabled == 0 && sc->enable != NULL) {
2004 if ((*sc->enable)(sc) != 0) {
2005 printf("%s: de/vice enable failed\n",
2006 sc->sc_dev.dv_xname);
2007 return (EIO);
2008 }
2009 sc->enabled = 1;
2010 }
2011 return (0);
2012 }
2013
2014 void
2015 ex_disable(sc)
2016 struct ex_softc *sc;
2017 {
2018 if (sc->enabled == 1 && sc->disable != NULL) {
2019 (*sc->disable)(sc);
2020 sc->enabled = 0;
2021 }
2022 }
2023
2024 void
2025 ex_power(why, arg)
2026 int why;
2027 void *arg;
2028 {
2029 struct ex_softc *sc = (void *)arg;
2030 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2031 int s;
2032
2033 s = splnet();
2034 switch (why) {
2035 case PWR_SUSPEND:
2036 case PWR_STANDBY:
2037 ex_stop(ifp, 0);
2038 if (sc->power != NULL)
2039 (*sc->power)(sc, why);
2040 break;
2041 case PWR_RESUME:
2042 if (ifp->if_flags & IFF_UP) {
2043 if (sc->power != NULL)
2044 (*sc->power)(sc, why);
2045 ex_init(ifp);
2046 }
2047 break;
2048 case PWR_SOFTSUSPEND:
2049 case PWR_SOFTSTANDBY:
2050 case PWR_SOFTRESUME:
2051 break;
2052 }
2053 splx(s);
2054 }
2055