elinkxl.c revision 1.101 1 /* $NetBSD: elinkxl.c,v 1.101 2008/04/08 12:07:26 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.101 2008/04/08 12:07:26 cegger Exp $");
41
42 #include "bpfilter.h"
43 #include "rnd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/mbuf.h>
50 #include <sys/socket.h>
51 #include <sys/ioctl.h>
52 #include <sys/errno.h>
53 #include <sys/syslog.h>
54 #include <sys/select.h>
55 #include <sys/device.h>
56 #if NRND > 0
57 #include <sys/rnd.h>
58 #endif
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_ether.h>
65 #include <net/if_media.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70 #endif
71
72 #include <sys/cpu.h>
73 #include <sys/bus.h>
74 #include <sys/intr.h>
75 #include <machine/endian.h>
76
77 #include <dev/mii/miivar.h>
78 #include <dev/mii/mii.h>
79 #include <dev/mii/mii_bitbang.h>
80
81 #include <dev/ic/elink3reg.h>
82 /* #include <dev/ic/elink3var.h> */
83 #include <dev/ic/elinkxlreg.h>
84 #include <dev/ic/elinkxlvar.h>
85
86 #ifdef DEBUG
87 int exdebug = 0;
88 #endif
89
90 /* ifmedia callbacks */
91 int ex_media_chg(struct ifnet *ifp);
92 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
93
94 void ex_probe_media(struct ex_softc *);
95 void ex_set_filter(struct ex_softc *);
96 void ex_set_media(struct ex_softc *);
97 void ex_set_xcvr(struct ex_softc *, u_int16_t);
98 struct mbuf *ex_get(struct ex_softc *, int);
99 u_int16_t ex_read_eeprom(struct ex_softc *, int);
100 int ex_init(struct ifnet *);
101 void ex_read(struct ex_softc *);
102 void ex_reset(struct ex_softc *);
103 void ex_set_mc(struct ex_softc *);
104 void ex_getstats(struct ex_softc *);
105 void ex_printstats(struct ex_softc *);
106 void ex_tick(void *);
107
108 void ex_power(int, void *);
109
110 static int ex_eeprom_busy(struct ex_softc *);
111 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
112 static void ex_init_txdescs(struct ex_softc *);
113
114 static void ex_setup_tx(struct ex_softc *);
115 static void ex_shutdown(void *);
116 static void ex_start(struct ifnet *);
117 static void ex_txstat(struct ex_softc *);
118
119 int ex_mii_readreg(struct device *, int, int);
120 void ex_mii_writereg(struct device *, int, int, int);
121 void ex_mii_statchg(struct device *);
122
123 void ex_probemedia(struct ex_softc *);
124
125 /*
126 * Structure to map media-present bits in boards to ifmedia codes and
127 * printable media names. Used for table-driven ifmedia initialization.
128 */
129 struct ex_media {
130 int exm_mpbit; /* media present bit */
131 const char *exm_name; /* name of medium */
132 int exm_ifmedia; /* ifmedia word for medium */
133 int exm_epmedia; /* ELINKMEDIA_* constant */
134 };
135
136 /*
137 * Media table for 3c90x chips. Note that chips with MII have no
138 * `native' media.
139 */
140 struct ex_media ex_native_media[] = {
141 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
142 ELINKMEDIA_10BASE_T },
143 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
144 ELINKMEDIA_10BASE_T },
145 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
146 ELINKMEDIA_AUI },
147 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
148 ELINKMEDIA_10BASE_2 },
149 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
150 ELINKMEDIA_100BASE_TX },
151 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
152 ELINKMEDIA_100BASE_TX },
153 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
154 ELINKMEDIA_100BASE_FX },
155 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
156 ELINKMEDIA_MII },
157 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
158 ELINKMEDIA_100BASE_T4 },
159 { 0, NULL, 0,
160 0 },
161 };
162
163 /*
164 * MII bit-bang glue.
165 */
166 u_int32_t ex_mii_bitbang_read(struct device *);
167 void ex_mii_bitbang_write(struct device *, u_int32_t);
168
169 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
170 ex_mii_bitbang_read,
171 ex_mii_bitbang_write,
172 {
173 ELINK_PHY_DATA, /* MII_BIT_MDO */
174 ELINK_PHY_DATA, /* MII_BIT_MDI */
175 ELINK_PHY_CLK, /* MII_BIT_MDC */
176 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
177 0, /* MII_BIT_DIR_PHY_HOST */
178 }
179 };
180
181 /*
182 * Back-end attach and configure.
183 */
184 void
185 ex_config(sc)
186 struct ex_softc *sc;
187 {
188 struct ifnet *ifp;
189 u_int16_t val;
190 u_int8_t macaddr[ETHER_ADDR_LEN] = {0};
191 bus_space_tag_t iot = sc->sc_iot;
192 bus_space_handle_t ioh = sc->sc_ioh;
193 int i, error, attach_stage;
194
195 callout_init(&sc->ex_mii_callout, 0);
196
197 ex_reset(sc);
198
199 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
200 macaddr[0] = val >> 8;
201 macaddr[1] = val & 0xff;
202 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
203 macaddr[2] = val >> 8;
204 macaddr[3] = val & 0xff;
205 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
206 macaddr[4] = val >> 8;
207 macaddr[5] = val & 0xff;
208
209 aprint_normal_dev(&sc->sc_dev, "MAC address %s\n",
210 ether_sprintf(macaddr));
211
212 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
213 GO_WINDOW(2);
214 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
215 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
216 val |= ELINK_RESET_OPT_LEDPOLAR;
217 if (sc->ex_conf & EX_CONF_PHY_POWER)
218 val |= ELINK_RESET_OPT_PHYPOWER;
219 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
220 }
221 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
222 GO_WINDOW(0);
223 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
224 EX_XCVR_PWR_MAGICBITS);
225 }
226
227 attach_stage = 0;
228
229 /*
230 * Allocate the upload descriptors, and create and load the DMA
231 * map for them.
232 */
233 if ((error = bus_dmamem_alloc(sc->sc_dmat,
234 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
235 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
236 aprint_error_dev(&sc->sc_dev,
237 "can't allocate upload descriptors, error = %d\n",
238 error);
239 goto fail;
240 }
241
242 attach_stage = 1;
243
244 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
245 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
246 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
247 aprint_error_dev(&sc->sc_dev, "can't map upload descriptors, error = %d\n", error);
248 goto fail;
249 }
250
251 attach_stage = 2;
252
253 if ((error = bus_dmamap_create(sc->sc_dmat,
254 EX_NUPD * sizeof (struct ex_upd), 1,
255 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
256 &sc->sc_upd_dmamap)) != 0) {
257 aprint_error_dev(&sc->sc_dev,
258 "can't create upload desc. DMA map, error = %d\n",
259 error);
260 goto fail;
261 }
262
263 attach_stage = 3;
264
265 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
266 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
267 BUS_DMA_NOWAIT)) != 0) {
268 aprint_error_dev(&sc->sc_dev,
269 "can't load upload desc. DMA map, error = %d\n",
270 error);
271 goto fail;
272 }
273
274 attach_stage = 4;
275
276 /*
277 * Allocate the download descriptors, and create and load the DMA
278 * map for them.
279 */
280 if ((error = bus_dmamem_alloc(sc->sc_dmat,
281 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
282 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
283 aprint_error_dev(&sc->sc_dev,
284 "can't allocate download descriptors, error = %d\n",
285 error);
286 goto fail;
287 }
288
289 attach_stage = 5;
290
291 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
292 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
293 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
294 aprint_error_dev(&sc->sc_dev, "can't map download descriptors, error = %d\n",
295 error);
296 goto fail;
297 }
298 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
299
300 attach_stage = 6;
301
302 if ((error = bus_dmamap_create(sc->sc_dmat,
303 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
304 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
305 &sc->sc_dpd_dmamap)) != 0) {
306 aprint_error_dev(&sc->sc_dev,
307 "can't create download desc. DMA map, error = %d\n",
308 error);
309 goto fail;
310 }
311
312 attach_stage = 7;
313
314 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
315 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
316 BUS_DMA_NOWAIT)) != 0) {
317 aprint_error_dev(&sc->sc_dev,
318 "can't load download desc. DMA map, error = %d\n",
319 error);
320 goto fail;
321 }
322 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
323 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
324
325 attach_stage = 8;
326
327
328 /*
329 * Create the transmit buffer DMA maps.
330 */
331 for (i = 0; i < EX_NDPD; i++) {
332 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
333 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
334 &sc->sc_tx_dmamaps[i])) != 0) {
335 aprint_error_dev(&sc->sc_dev,
336 "can't create tx DMA map %d, error = %d\n",
337 i, error);
338 goto fail;
339 }
340 }
341
342 attach_stage = 9;
343
344 /*
345 * Create the receive buffer DMA maps.
346 */
347 for (i = 0; i < EX_NUPD; i++) {
348 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
349 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
350 &sc->sc_rx_dmamaps[i])) != 0) {
351 aprint_error_dev(&sc->sc_dev,
352 "can't create rx DMA map %d, error = %d\n",
353 i, error);
354 goto fail;
355 }
356 }
357
358 attach_stage = 10;
359
360 /*
361 * Create ring of upload descriptors, only once. The DMA engine
362 * will loop over this when receiving packets, stalling if it
363 * hits an UPD with a finished receive.
364 */
365 for (i = 0; i < EX_NUPD; i++) {
366 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
367 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
368 sc->sc_upd[i].upd_frags[0].fr_len =
369 htole32((MCLBYTES - 2) | EX_FR_LAST);
370 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
371 aprint_error_dev(&sc->sc_dev, "can't allocate or map rx buffers\n");
372 goto fail;
373 }
374 }
375
376 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
377 EX_NUPD * sizeof (struct ex_upd),
378 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
379
380 ex_init_txdescs(sc);
381
382 attach_stage = 11;
383
384
385 GO_WINDOW(3);
386 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
387 if (val & ELINK_MEDIACAP_MII)
388 sc->ex_conf |= EX_CONF_MII;
389
390 ifp = &sc->sc_ethercom.ec_if;
391
392 /*
393 * Initialize our media structures and MII info. We'll
394 * probe the MII if we discover that we have one.
395 */
396 sc->ex_mii.mii_ifp = ifp;
397 sc->ex_mii.mii_readreg = ex_mii_readreg;
398 sc->ex_mii.mii_writereg = ex_mii_writereg;
399 sc->ex_mii.mii_statchg = ex_mii_statchg;
400 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
401 ex_media_stat);
402
403 if (sc->ex_conf & EX_CONF_MII) {
404 /*
405 * Find PHY, extract media information from it.
406 * First, select the right transceiver.
407 */
408 ex_set_xcvr(sc, val);
409
410 mii_attach(&sc->sc_dev, &sc->ex_mii, 0xffffffff,
411 MII_PHY_ANY, MII_OFFSET_ANY, 0);
412 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
413 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
414 0, NULL);
415 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
416 } else {
417 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
418 }
419 } else
420 ex_probemedia(sc);
421
422 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
423 ifp->if_softc = sc;
424 ifp->if_start = ex_start;
425 ifp->if_ioctl = ex_ioctl;
426 ifp->if_watchdog = ex_watchdog;
427 ifp->if_init = ex_init;
428 ifp->if_stop = ex_stop;
429 ifp->if_flags =
430 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
431 sc->sc_if_flags = ifp->if_flags;
432 IFQ_SET_READY(&ifp->if_snd);
433
434 /*
435 * We can support 802.1Q VLAN-sized frames.
436 */
437 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
438
439 /*
440 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
441 */
442 if (sc->ex_conf & EX_CONF_90XB)
443 sc->sc_ethercom.ec_if.if_capabilities |=
444 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
445 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
446 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
447
448 if_attach(ifp);
449 ether_ifattach(ifp, macaddr);
450
451 GO_WINDOW(1);
452
453 sc->tx_start_thresh = 20;
454 sc->tx_succ_ok = 0;
455
456 /* TODO: set queues to 0 */
457
458 #if NRND > 0
459 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
460 RND_TYPE_NET, 0);
461 #endif
462
463 /* Establish callback to reset card when we reboot. */
464 sc->sc_sdhook = shutdownhook_establish(ex_shutdown, sc);
465 if (sc->sc_sdhook == NULL)
466 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
467
468 /* Add a suspend hook to make sure we come back up after a resume. */
469 sc->sc_powerhook = powerhook_establish(device_xname(&sc->sc_dev),
470 ex_power, sc);
471 if (sc->sc_powerhook == NULL)
472 aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish power hook\n");
473
474 /* The attach is successful. */
475 sc->ex_flags |= EX_FLAGS_ATTACHED;
476 return;
477
478 fail:
479 /*
480 * Free any resources we've allocated during the failed attach
481 * attempt. Do this in reverse order and fall though.
482 */
483 switch (attach_stage) {
484 case 11:
485 {
486 struct ex_rxdesc *rxd;
487
488 for (i = 0; i < EX_NUPD; i++) {
489 rxd = &sc->sc_rxdescs[i];
490 if (rxd->rx_mbhead != NULL) {
491 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
492 m_freem(rxd->rx_mbhead);
493 }
494 }
495 }
496 /* FALLTHROUGH */
497
498 case 10:
499 for (i = 0; i < EX_NUPD; i++)
500 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
501 /* FALLTHROUGH */
502
503 case 9:
504 for (i = 0; i < EX_NDPD; i++)
505 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
506 /* FALLTHROUGH */
507 case 8:
508 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
509 /* FALLTHROUGH */
510
511 case 7:
512 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
513 /* FALLTHROUGH */
514
515 case 6:
516 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
517 EX_NDPD * sizeof (struct ex_dpd));
518 /* FALLTHROUGH */
519
520 case 5:
521 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
522 break;
523
524 case 4:
525 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
526 /* FALLTHROUGH */
527
528 case 3:
529 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
530 /* FALLTHROUGH */
531
532 case 2:
533 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
534 EX_NUPD * sizeof (struct ex_upd));
535 /* FALLTHROUGH */
536
537 case 1:
538 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
539 break;
540 }
541
542 }
543
544 /*
545 * Find the media present on non-MII chips.
546 */
547 void
548 ex_probemedia(sc)
549 struct ex_softc *sc;
550 {
551 bus_space_tag_t iot = sc->sc_iot;
552 bus_space_handle_t ioh = sc->sc_ioh;
553 struct ifmedia *ifm = &sc->ex_mii.mii_media;
554 struct ex_media *exm;
555 u_int16_t config1, reset_options, default_media;
556 int defmedia = 0;
557 const char *sep = "", *defmedianame = NULL;
558
559 GO_WINDOW(3);
560 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
561 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
562 GO_WINDOW(0);
563
564 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
565
566 aprint_normal_dev(&sc->sc_dev, "");
567
568 /* Sanity check that there are any media! */
569 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
570 aprint_error("no media present!\n");
571 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
572 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
573 return;
574 }
575
576 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
577
578 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
579 if (reset_options & exm->exm_mpbit) {
580 /*
581 * Default media is a little complicated. We
582 * support full-duplex which uses the same
583 * reset options bit.
584 *
585 * XXX Check EEPROM for default to FDX?
586 */
587 if (exm->exm_epmedia == default_media) {
588 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
589 defmedia = exm->exm_ifmedia;
590 defmedianame = exm->exm_name;
591 }
592 } else if (defmedia == 0) {
593 defmedia = exm->exm_ifmedia;
594 defmedianame = exm->exm_name;
595 }
596 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
597 NULL);
598 PRINT(exm->exm_name);
599 }
600 }
601
602 #undef PRINT
603
604 #ifdef DIAGNOSTIC
605 if (defmedia == 0)
606 panic("ex_probemedia: impossible");
607 #endif
608
609 aprint_normal(", default %s\n", defmedianame);
610 ifmedia_set(ifm, defmedia);
611 }
612
613 /*
614 * Setup transmitter parameters.
615 */
616 static void
617 ex_setup_tx(sc)
618 struct ex_softc *sc;
619 {
620 bus_space_tag_t iot = sc->sc_iot;
621 bus_space_handle_t ioh = sc->sc_ioh;
622
623 /*
624 * Disable reclaim threshold for 90xB, set free threshold to
625 * 6 * 256 = 1536 for 90x.
626 */
627 if (sc->ex_conf & EX_CONF_90XB)
628 bus_space_write_2(iot, ioh, ELINK_COMMAND,
629 ELINK_TXRECLTHRESH | 255);
630 else
631 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
632
633 /* Setup early transmission start threshold. */
634 bus_space_write_2(iot, ioh, ELINK_COMMAND,
635 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
636 }
637
638 /*
639 * Bring device up.
640 */
641 int
642 ex_init(ifp)
643 struct ifnet *ifp;
644 {
645 struct ex_softc *sc = ifp->if_softc;
646 bus_space_tag_t iot = sc->sc_iot;
647 bus_space_handle_t ioh = sc->sc_ioh;
648 int i;
649 u_int16_t val;
650 int error = 0;
651
652 if ((error = ex_enable(sc)) != 0)
653 goto out;
654
655 ex_waitcmd(sc);
656 ex_stop(ifp, 0);
657
658 GO_WINDOW(2);
659
660 /* Turn on PHY power. */
661 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
662 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
663 if (sc->ex_conf & EX_CONF_PHY_POWER)
664 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
665 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
666 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
667 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
668 }
669
670 /*
671 * Set the station address and clear the station mask. The latter
672 * is needed for 90x cards, 0 is the default for 90xB cards.
673 */
674 for (i = 0; i < ETHER_ADDR_LEN; i++) {
675 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
676 CLLADDR(ifp->if_sadl)[i]);
677 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
678 }
679
680 GO_WINDOW(3);
681
682 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
683 ex_waitcmd(sc);
684 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
685 ex_waitcmd(sc);
686
687 /* Load Tx parameters. */
688 ex_setup_tx(sc);
689
690 bus_space_write_2(iot, ioh, ELINK_COMMAND,
691 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
692
693 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
694 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
695
696 bus_space_write_2(iot, ioh, ELINK_COMMAND,
697 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
698 bus_space_write_2(iot, ioh, ELINK_COMMAND,
699 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
700
701 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
702 if (sc->intr_ack)
703 (* sc->intr_ack)(sc);
704 ex_set_media(sc);
705 ex_set_mc(sc);
706
707
708 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
709 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
710 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
711 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
712 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
713
714 ifp->if_flags |= IFF_RUNNING;
715 ifp->if_flags &= ~IFF_OACTIVE;
716 ex_start(ifp);
717 sc->sc_if_flags = ifp->if_flags;
718
719 GO_WINDOW(1);
720
721 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
722
723 out:
724 if (error) {
725 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
726 ifp->if_timer = 0;
727 aprint_error_dev(&sc->sc_dev, "interface not running\n");
728 }
729 return (error);
730 }
731
732 #define MCHASHSIZE 256
733 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
734 (MCHASHSIZE - 1))
735
736 /*
737 * Set multicast receive filter. Also take care of promiscuous mode
738 * here (XXX).
739 */
740 void
741 ex_set_mc(sc)
742 struct ex_softc *sc;
743 {
744 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
745 struct ethercom *ec = &sc->sc_ethercom;
746 struct ether_multi *enm;
747 struct ether_multistep estep;
748 int i;
749 u_int16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
750
751 if (ifp->if_flags & IFF_PROMISC) {
752 mask |= FIL_PROMISC;
753 goto allmulti;
754 }
755
756 ETHER_FIRST_MULTI(estep, ec, enm);
757 if (enm == NULL)
758 goto nomulti;
759
760 if ((sc->ex_conf & EX_CONF_90XB) == 0)
761 /* No multicast hash filtering. */
762 goto allmulti;
763
764 for (i = 0; i < MCHASHSIZE; i++)
765 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
766 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
767
768 do {
769 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
770 ETHER_ADDR_LEN) != 0)
771 goto allmulti;
772
773 i = ex_mchash(enm->enm_addrlo);
774 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
775 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
776 ETHER_NEXT_MULTI(estep, enm);
777 } while (enm != NULL);
778 mask |= FIL_MULTIHASH;
779
780 nomulti:
781 ifp->if_flags &= ~IFF_ALLMULTI;
782 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
783 SET_RX_FILTER | mask);
784 return;
785
786 allmulti:
787 ifp->if_flags |= IFF_ALLMULTI;
788 mask |= FIL_MULTICAST;
789 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
790 SET_RX_FILTER | mask);
791 }
792
793
794 /*
795 * The Tx Complete interrupts occur only on errors,
796 * and this is the error handler.
797 */
798 static void
799 ex_txstat(sc)
800 struct ex_softc *sc;
801 {
802 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
803 bus_space_tag_t iot = sc->sc_iot;
804 bus_space_handle_t ioh = sc->sc_ioh;
805 int i, err = 0;
806
807 /*
808 * We need to read+write TX_STATUS until we get a 0 status
809 * in order to turn off the interrupt flag.
810 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
811 */
812 for (;;) {
813 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
814 if ((i & TXS_COMPLETE) == 0)
815 break;
816 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
817 err |= i;
818 }
819 err &= ~TXS_TIMER;
820
821 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
822 || err == 0 /* should not happen, just in case */) {
823 /*
824 * Make sure the transmission is stopped.
825 */
826 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
827 for (i = 1000; i > 0; i--)
828 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
829 ELINK_DMAC_DNINPROG) == 0)
830 break;
831
832 /*
833 * Reset the transmitter.
834 */
835 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
836
837 /* Resetting takes a while and we will do more than wait. */
838
839 ifp->if_flags &= ~IFF_OACTIVE;
840 ++sc->sc_ethercom.ec_if.if_oerrors;
841 printf("%s:%s%s%s", device_xname(&sc->sc_dev),
842 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
843 (err & TXS_JABBER) ? " jabber" : "",
844 (err & TXS_RECLAIM) ? " reclaim" : "");
845 if (err == 0)
846 printf(" unknown Tx error");
847 printf(" (%x)", err);
848 if (err & TXS_UNDERRUN) {
849 printf(" @%d", sc->tx_start_thresh);
850 if (sc->tx_succ_ok < 256 &&
851 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
852 > sc->tx_start_thresh) {
853 printf(", new threshold is %d", i);
854 sc->tx_start_thresh = i;
855 }
856 sc->tx_succ_ok = 0;
857 }
858 printf("\n");
859 if (err & TXS_MAX_COLLISION)
860 ++sc->sc_ethercom.ec_if.if_collisions;
861
862 /* Wait for TX_RESET to finish. */
863 ex_waitcmd(sc);
864
865 /* Reload Tx parameters. */
866 ex_setup_tx(sc);
867 } else {
868 if (err & TXS_MAX_COLLISION)
869 ++sc->sc_ethercom.ec_if.if_collisions;
870 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
871 }
872
873 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
874
875 /* Retransmit current packet if any. */
876 if (sc->tx_head) {
877 ifp->if_flags |= IFF_OACTIVE;
878 bus_space_write_2(iot, ioh, ELINK_COMMAND,
879 ELINK_DNUNSTALL);
880 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
881 DPD_DMADDR(sc, sc->tx_head));
882
883 /* Retrigger watchdog if stopped. */
884 if (ifp->if_timer == 0)
885 ifp->if_timer = 1;
886 }
887 }
888
889 int
890 ex_media_chg(ifp)
891 struct ifnet *ifp;
892 {
893
894 if (ifp->if_flags & IFF_UP)
895 ex_init(ifp);
896 return 0;
897 }
898
899 void
900 ex_set_xcvr(sc, media)
901 struct ex_softc *sc;
902 const u_int16_t media;
903 {
904 bus_space_tag_t iot = sc->sc_iot;
905 bus_space_handle_t ioh = sc->sc_ioh;
906 u_int32_t icfg;
907
908 /*
909 * We're already in Window 3
910 */
911 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
912 icfg &= ~(CONFIG_XCVR_SEL << 16);
913 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
914 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
915 if (media & ELINK_MEDIACAP_100BASETX)
916 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
917 if (media & ELINK_MEDIACAP_100BASEFX)
918 icfg |= ELINKMEDIA_100BASE_FX
919 << (CONFIG_XCVR_SEL_SHIFT + 16);
920 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
921 }
922
923 void
924 ex_set_media(sc)
925 struct ex_softc *sc;
926 {
927 bus_space_tag_t iot = sc->sc_iot;
928 bus_space_handle_t ioh = sc->sc_ioh;
929 u_int32_t configreg;
930
931 if (((sc->ex_conf & EX_CONF_MII) &&
932 (sc->ex_mii.mii_media_active & IFM_FDX))
933 || (!(sc->ex_conf & EX_CONF_MII) &&
934 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
935 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
936 MAC_CONTROL_FDX);
937 } else {
938 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
939 }
940
941 /*
942 * If the device has MII, select it, and then tell the
943 * PHY which media to use.
944 */
945 if (sc->ex_conf & EX_CONF_MII) {
946 u_int16_t val;
947
948 GO_WINDOW(3);
949 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
950 ex_set_xcvr(sc, val);
951 mii_mediachg(&sc->ex_mii);
952 return;
953 }
954
955 GO_WINDOW(4);
956 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
957 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
958 delay(800);
959
960 /*
961 * Now turn on the selected media/transceiver.
962 */
963 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
964 case IFM_10_T:
965 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
966 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
967 break;
968
969 case IFM_10_2:
970 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
971 DELAY(800);
972 break;
973
974 case IFM_100_TX:
975 case IFM_100_FX:
976 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
977 LINKBEAT_ENABLE);
978 DELAY(800);
979 break;
980
981 case IFM_10_5:
982 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
983 SQE_ENABLE);
984 DELAY(800);
985 break;
986
987 case IFM_MANUAL:
988 break;
989
990 case IFM_NONE:
991 return;
992
993 default:
994 panic("ex_set_media: impossible");
995 }
996
997 GO_WINDOW(3);
998 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
999
1000 configreg &= ~(CONFIG_MEDIAMASK << 16);
1001 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
1002 (CONFIG_MEDIAMASK_SHIFT + 16));
1003
1004 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
1005 }
1006
1007 /*
1008 * Get currently-selected media from card.
1009 * (if_media callback, may be called before interface is brought up).
1010 */
1011 void
1012 ex_media_stat(ifp, req)
1013 struct ifnet *ifp;
1014 struct ifmediareq *req;
1015 {
1016 struct ex_softc *sc = ifp->if_softc;
1017 u_int16_t help;
1018
1019 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
1020 if (sc->ex_conf & EX_CONF_MII) {
1021 mii_pollstat(&sc->ex_mii);
1022 req->ifm_status = sc->ex_mii.mii_media_status;
1023 req->ifm_active = sc->ex_mii.mii_media_active;
1024 } else {
1025 GO_WINDOW(4);
1026 req->ifm_status = IFM_AVALID;
1027 req->ifm_active =
1028 sc->ex_mii.mii_media.ifm_cur->ifm_media;
1029 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
1030 ELINK_W4_MEDIA_TYPE);
1031 if (help & LINKBEAT_DETECT)
1032 req->ifm_status |= IFM_ACTIVE;
1033 GO_WINDOW(1);
1034 }
1035 }
1036 }
1037
1038
1039
1040 /*
1041 * Start outputting on the interface.
1042 */
1043 static void
1044 ex_start(ifp)
1045 struct ifnet *ifp;
1046 {
1047 struct ex_softc *sc = ifp->if_softc;
1048 bus_space_tag_t iot = sc->sc_iot;
1049 bus_space_handle_t ioh = sc->sc_ioh;
1050 volatile struct ex_fraghdr *fr = NULL;
1051 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1052 struct ex_txdesc *txp;
1053 struct mbuf *mb_head;
1054 bus_dmamap_t dmamap;
1055 int m_csumflags, offset, seglen, totlen, segment, error;
1056 u_int32_t csum_flags;
1057
1058 if (sc->tx_head || sc->tx_free == NULL)
1059 return;
1060
1061 txp = NULL;
1062
1063 /*
1064 * We're finished if there is nothing more to add to the list or if
1065 * we're all filled up with buffers to transmit.
1066 */
1067 while (sc->tx_free != NULL) {
1068 /*
1069 * Grab a packet to transmit.
1070 */
1071 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1072 if (mb_head == NULL)
1073 break;
1074
1075 /*
1076 * mb_head might be updated later,
1077 * so preserve csum_flags here.
1078 */
1079 m_csumflags = mb_head->m_pkthdr.csum_flags;
1080
1081 /*
1082 * Get pointer to next available tx desc.
1083 */
1084 txp = sc->tx_free;
1085 dmamap = txp->tx_dmamap;
1086
1087 /*
1088 * Go through each of the mbufs in the chain and initialize
1089 * the transmit buffer descriptors with the physical address
1090 * and size of the mbuf.
1091 */
1092 reload:
1093 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1094 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1095 switch (error) {
1096 case 0:
1097 /* Success. */
1098 break;
1099
1100 case EFBIG:
1101 {
1102 struct mbuf *mn;
1103
1104 /*
1105 * We ran out of segments. We have to recopy this
1106 * mbuf chain first. Bail out if we can't get the
1107 * new buffers.
1108 */
1109 printf("%s: too many segments, ", device_xname(&sc->sc_dev));
1110
1111 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1112 if (mn == NULL) {
1113 m_freem(mb_head);
1114 printf("aborting\n");
1115 goto out;
1116 }
1117 if (mb_head->m_pkthdr.len > MHLEN) {
1118 MCLGET(mn, M_DONTWAIT);
1119 if ((mn->m_flags & M_EXT) == 0) {
1120 m_freem(mn);
1121 m_freem(mb_head);
1122 printf("aborting\n");
1123 goto out;
1124 }
1125 }
1126 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1127 mtod(mn, void *));
1128 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1129 m_freem(mb_head);
1130 mb_head = mn;
1131 printf("retrying\n");
1132 goto reload;
1133 }
1134
1135 default:
1136 /*
1137 * Some other problem; report it.
1138 */
1139 aprint_error_dev(&sc->sc_dev, "can't load mbuf chain, error = %d\n",
1140 error);
1141 m_freem(mb_head);
1142 goto out;
1143 }
1144
1145 /*
1146 * remove our tx desc from freelist.
1147 */
1148 sc->tx_free = txp->tx_next;
1149 txp->tx_next = NULL;
1150
1151 fr = &txp->tx_dpd->dpd_frags[0];
1152 totlen = 0;
1153 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1154 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1155 seglen = dmamap->dm_segs[segment].ds_len;
1156 fr->fr_len = htole32(seglen);
1157 totlen += seglen;
1158 }
1159 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1160 (m_csumflags & M_CSUM_IPv4) != 0)) {
1161 /*
1162 * Pad short packets to avoid ip4csum-tx bug.
1163 *
1164 * XXX Should we still consider if such short
1165 * (36 bytes or less) packets might already
1166 * occupy EX_NTFRAG (== 32) fragements here?
1167 */
1168 KASSERT(segment < EX_NTFRAGS);
1169 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1170 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1171 fr->fr_len = htole32(EX_FR_LAST | seglen);
1172 totlen += seglen;
1173 } else {
1174 fr--;
1175 fr->fr_len |= htole32(EX_FR_LAST);
1176 }
1177 txp->tx_mbhead = mb_head;
1178
1179 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1180 BUS_DMASYNC_PREWRITE);
1181
1182 dpd = txp->tx_dpd;
1183 dpd->dpd_nextptr = 0;
1184 dpd->dpd_fsh = htole32(totlen);
1185
1186 /* Byte-swap constants so compiler can optimize. */
1187
1188 if (sc->ex_conf & EX_CONF_90XB) {
1189 csum_flags = 0;
1190
1191 if (m_csumflags & M_CSUM_IPv4)
1192 csum_flags |= htole32(EX_DPD_IPCKSUM);
1193
1194 if (m_csumflags & M_CSUM_TCPv4)
1195 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1196 else if (m_csumflags & M_CSUM_UDPv4)
1197 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1198
1199 dpd->dpd_fsh |= csum_flags;
1200 } else {
1201 KDASSERT((mb_head->m_pkthdr.csum_flags &
1202 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1203 }
1204
1205 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1206 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1207 sizeof (struct ex_dpd),
1208 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1209
1210 /*
1211 * No need to stall the download engine, we know it's
1212 * not busy right now.
1213 *
1214 * Fix up pointers in both the "soft" tx and the physical
1215 * tx list.
1216 */
1217 if (sc->tx_head != NULL) {
1218 prevdpd = sc->tx_tail->tx_dpd;
1219 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1220 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1221 offset, sizeof (struct ex_dpd),
1222 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1223 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1224 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1225 offset, sizeof (struct ex_dpd),
1226 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1227 sc->tx_tail->tx_next = txp;
1228 sc->tx_tail = txp;
1229 } else {
1230 sc->tx_tail = sc->tx_head = txp;
1231 }
1232
1233 #if NBPFILTER > 0
1234 /*
1235 * Pass packet to bpf if there is a listener.
1236 */
1237 if (ifp->if_bpf)
1238 bpf_mtap(ifp->if_bpf, mb_head);
1239 #endif
1240 }
1241 out:
1242 if (sc->tx_head) {
1243 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1244 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1245 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1246 sizeof (struct ex_dpd),
1247 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1248 ifp->if_flags |= IFF_OACTIVE;
1249 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1250 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1251 DPD_DMADDR(sc, sc->tx_head));
1252
1253 /* trigger watchdog */
1254 ifp->if_timer = 5;
1255 }
1256 }
1257
1258
1259 int
1260 ex_intr(arg)
1261 void *arg;
1262 {
1263 struct ex_softc *sc = arg;
1264 bus_space_tag_t iot = sc->sc_iot;
1265 bus_space_handle_t ioh = sc->sc_ioh;
1266 u_int16_t stat;
1267 int ret = 0;
1268 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1269
1270 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1271 !device_is_active(&sc->sc_dev))
1272 return (0);
1273
1274 for (;;) {
1275 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1276
1277 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1278 if ((stat & INTR_LATCH) == 0) {
1279 #if 0
1280 printf("%s: intr latch cleared\n",
1281 device_xname(&sc->sc_dev));
1282 #endif
1283 break;
1284 }
1285 }
1286
1287 ret = 1;
1288
1289 /*
1290 * Acknowledge interrupts.
1291 */
1292 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1293 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1294 if (sc->intr_ack)
1295 (*sc->intr_ack)(sc);
1296
1297 if (stat & HOST_ERROR) {
1298 aprint_error_dev(&sc->sc_dev, "adapter failure (%x)\n",
1299 stat);
1300 ex_reset(sc);
1301 ex_init(ifp);
1302 return 1;
1303 }
1304 if (stat & UPD_STATS) {
1305 ex_getstats(sc);
1306 }
1307 if (stat & TX_COMPLETE) {
1308 ex_txstat(sc);
1309 #if 0
1310 if (stat & DN_COMPLETE)
1311 printf("%s: Ignoring Dn interrupt (%x)\n",
1312 device_xname(&sc->sc_dev), stat);
1313 #endif
1314 /*
1315 * In some rare cases, both Tx Complete and
1316 * Dn Complete bits are set. However, the packet
1317 * has been reloaded in ex_txstat() and should not
1318 * handle the Dn Complete event here.
1319 * Hence the "else" below.
1320 */
1321 } else if (stat & DN_COMPLETE) {
1322 struct ex_txdesc *txp, *ptxp = NULL;
1323 bus_dmamap_t txmap;
1324
1325 /* reset watchdog timer, was set in ex_start() */
1326 ifp->if_timer = 0;
1327
1328 for (txp = sc->tx_head; txp != NULL;
1329 txp = txp->tx_next) {
1330 bus_dmamap_sync(sc->sc_dmat,
1331 sc->sc_dpd_dmamap,
1332 (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1333 sizeof (struct ex_dpd),
1334 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1335 if (txp->tx_mbhead != NULL) {
1336 txmap = txp->tx_dmamap;
1337 bus_dmamap_sync(sc->sc_dmat, txmap,
1338 0, txmap->dm_mapsize,
1339 BUS_DMASYNC_POSTWRITE);
1340 bus_dmamap_unload(sc->sc_dmat, txmap);
1341 m_freem(txp->tx_mbhead);
1342 txp->tx_mbhead = NULL;
1343 }
1344 ptxp = txp;
1345 }
1346
1347 /*
1348 * Move finished tx buffers back to the tx free list.
1349 */
1350 if (sc->tx_free) {
1351 sc->tx_ftail->tx_next = sc->tx_head;
1352 sc->tx_ftail = ptxp;
1353 } else
1354 sc->tx_ftail = sc->tx_free = sc->tx_head;
1355
1356 sc->tx_head = sc->tx_tail = NULL;
1357 ifp->if_flags &= ~IFF_OACTIVE;
1358
1359 if (sc->tx_succ_ok < 256)
1360 sc->tx_succ_ok++;
1361 }
1362
1363 if (stat & UP_COMPLETE) {
1364 struct ex_rxdesc *rxd;
1365 struct mbuf *m;
1366 struct ex_upd *upd;
1367 bus_dmamap_t rxmap;
1368 u_int32_t pktstat;
1369
1370 rcvloop:
1371 rxd = sc->rx_head;
1372 rxmap = rxd->rx_dmamap;
1373 m = rxd->rx_mbhead;
1374 upd = rxd->rx_upd;
1375
1376 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1377 rxmap->dm_mapsize,
1378 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1379 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1380 ((char *)upd - (char *)sc->sc_upd),
1381 sizeof (struct ex_upd),
1382 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1383 pktstat = le32toh(upd->upd_pktstatus);
1384
1385 if (pktstat & EX_UPD_COMPLETE) {
1386 /*
1387 * Remove first packet from the chain.
1388 */
1389 sc->rx_head = rxd->rx_next;
1390 rxd->rx_next = NULL;
1391
1392 /*
1393 * Add a new buffer to the receive chain.
1394 * If this fails, the old buffer is recycled
1395 * instead.
1396 */
1397 if (ex_add_rxbuf(sc, rxd) == 0) {
1398 u_int16_t total_len;
1399
1400 if (pktstat &
1401 ((sc->sc_ethercom.ec_capenable &
1402 ETHERCAP_VLAN_MTU) ?
1403 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1404 ifp->if_ierrors++;
1405 m_freem(m);
1406 goto rcvloop;
1407 }
1408
1409 total_len = pktstat & EX_UPD_PKTLENMASK;
1410 if (total_len <
1411 sizeof(struct ether_header)) {
1412 m_freem(m);
1413 goto rcvloop;
1414 }
1415 m->m_pkthdr.rcvif = ifp;
1416 m->m_pkthdr.len = m->m_len = total_len;
1417 #if NBPFILTER > 0
1418 if (ifp->if_bpf)
1419 bpf_mtap(ifp->if_bpf, m);
1420 #endif
1421 /*
1422 * Set the incoming checksum information for the packet.
1423 */
1424 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1425 (pktstat & EX_UPD_IPCHECKED) != 0) {
1426 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1427 if (pktstat & EX_UPD_IPCKSUMERR)
1428 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1429 if (pktstat & EX_UPD_TCPCHECKED) {
1430 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1431 if (pktstat & EX_UPD_TCPCKSUMERR)
1432 m->m_pkthdr.csum_flags |=
1433 M_CSUM_TCP_UDP_BAD;
1434 } else if (pktstat & EX_UPD_UDPCHECKED) {
1435 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1436 if (pktstat & EX_UPD_UDPCKSUMERR)
1437 m->m_pkthdr.csum_flags |=
1438 M_CSUM_TCP_UDP_BAD;
1439 }
1440 }
1441 (*ifp->if_input)(ifp, m);
1442 }
1443 goto rcvloop;
1444 }
1445 /*
1446 * Just in case we filled up all UPDs and the DMA engine
1447 * stalled. We could be more subtle about this.
1448 */
1449 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1450 printf("%s: uplistptr was 0\n",
1451 device_xname(&sc->sc_dev));
1452 ex_init(ifp);
1453 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1454 & 0x2000) {
1455 printf("%s: receive stalled\n",
1456 device_xname(&sc->sc_dev));
1457 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1458 ELINK_UPUNSTALL);
1459 }
1460 }
1461
1462 #if NRND > 0
1463 if (stat)
1464 rnd_add_uint32(&sc->rnd_source, stat);
1465 #endif
1466 }
1467
1468 /* no more interrupts */
1469 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1470 ex_start(ifp);
1471 return ret;
1472 }
1473
1474 int
1475 ex_ioctl(ifp, cmd, data)
1476 struct ifnet *ifp;
1477 u_long cmd;
1478 void *data;
1479 {
1480 struct ex_softc *sc = ifp->if_softc;
1481 struct ifreq *ifr = (struct ifreq *)data;
1482 int s, error;
1483
1484 s = splnet();
1485
1486 switch (cmd) {
1487 case SIOCSIFMEDIA:
1488 case SIOCGIFMEDIA:
1489 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1490 break;
1491 case SIOCSIFFLAGS:
1492 /* If the interface is up and running, only modify the receive
1493 * filter when setting promiscuous or debug mode. Otherwise
1494 * fall through to ether_ioctl, which will reset the chip.
1495 */
1496 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
1497 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
1498 == (IFF_UP|IFF_RUNNING))
1499 && ((ifp->if_flags & (~RESETIGN))
1500 == (sc->sc_if_flags & (~RESETIGN)))) {
1501 ex_set_mc(sc);
1502 error = 0;
1503 break;
1504 #undef RESETIGN
1505 }
1506 /* FALLTHROUGH */
1507 default:
1508 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1509 break;
1510
1511 error = 0;
1512
1513 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1514 ;
1515 else if (ifp->if_flags & IFF_RUNNING) {
1516 /*
1517 * Multicast list has changed; set the hardware filter
1518 * accordingly.
1519 */
1520 ex_set_mc(sc);
1521 }
1522 break;
1523 }
1524
1525 sc->sc_if_flags = ifp->if_flags;
1526 splx(s);
1527 return (error);
1528 }
1529
1530 void
1531 ex_getstats(sc)
1532 struct ex_softc *sc;
1533 {
1534 bus_space_handle_t ioh = sc->sc_ioh;
1535 bus_space_tag_t iot = sc->sc_iot;
1536 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1537 u_int8_t upperok;
1538
1539 GO_WINDOW(6);
1540 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1541 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1542 ifp->if_ipackets += (upperok & 0x03) << 8;
1543 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1544 ifp->if_opackets += (upperok & 0x30) << 4;
1545 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1546 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1547 /*
1548 * There seems to be no way to get the exact number of collisions,
1549 * this is the number that occurred at the very least.
1550 */
1551 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1552 TX_AFTER_X_COLLISIONS);
1553 /*
1554 * Interface byte counts are counted by ether_input() and
1555 * ether_output(), so don't accumulate them here. Just
1556 * read the NIC counters so they don't generate overflow interrupts.
1557 * Upper byte counters are latched from reading the totals, so
1558 * they don't need to be read if we don't need their values.
1559 */
1560 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1561 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1562
1563 /*
1564 * Clear the following to avoid stats overflow interrupts
1565 */
1566 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1567 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1568 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1569 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1570 GO_WINDOW(4);
1571 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1572 GO_WINDOW(1);
1573 }
1574
1575 void
1576 ex_printstats(sc)
1577 struct ex_softc *sc;
1578 {
1579 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1580
1581 ex_getstats(sc);
1582 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1583 "%llu\n", (unsigned long long)ifp->if_ipackets,
1584 (unsigned long long)ifp->if_opackets,
1585 (unsigned long long)ifp->if_ierrors,
1586 (unsigned long long)ifp->if_oerrors,
1587 (unsigned long long)ifp->if_ibytes,
1588 (unsigned long long)ifp->if_obytes);
1589 }
1590
1591 void
1592 ex_tick(arg)
1593 void *arg;
1594 {
1595 struct ex_softc *sc = arg;
1596 int s;
1597
1598 if (!device_is_active(&sc->sc_dev))
1599 return;
1600
1601 s = splnet();
1602
1603 if (sc->ex_conf & EX_CONF_MII)
1604 mii_tick(&sc->ex_mii);
1605
1606 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1607 & COMMAND_IN_PROGRESS))
1608 ex_getstats(sc);
1609
1610 splx(s);
1611
1612 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1613 }
1614
1615 void
1616 ex_reset(sc)
1617 struct ex_softc *sc;
1618 {
1619 u_int16_t val = GLOBAL_RESET;
1620
1621 if (sc->ex_conf & EX_CONF_RESETHACK)
1622 val |= 0x10;
1623 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1624 /*
1625 * XXX apparently the command in progress bit can't be trusted
1626 * during a reset, so we just always wait this long. Fortunately
1627 * we normally only reset the chip during autoconfig.
1628 */
1629 delay(100000);
1630 ex_waitcmd(sc);
1631 }
1632
1633 void
1634 ex_watchdog(ifp)
1635 struct ifnet *ifp;
1636 {
1637 struct ex_softc *sc = ifp->if_softc;
1638
1639 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev));
1640 ++sc->sc_ethercom.ec_if.if_oerrors;
1641
1642 ex_reset(sc);
1643 ex_init(ifp);
1644 }
1645
1646 void
1647 ex_stop(ifp, disable)
1648 struct ifnet *ifp;
1649 int disable;
1650 {
1651 struct ex_softc *sc = ifp->if_softc;
1652 bus_space_tag_t iot = sc->sc_iot;
1653 bus_space_handle_t ioh = sc->sc_ioh;
1654 struct ex_txdesc *tx;
1655 struct ex_rxdesc *rx;
1656 int i;
1657
1658 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1659 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1660 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1661
1662 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1663 if (tx->tx_mbhead == NULL)
1664 continue;
1665 m_freem(tx->tx_mbhead);
1666 tx->tx_mbhead = NULL;
1667 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1668 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1669 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1670 ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1671 sizeof (struct ex_dpd),
1672 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1673 }
1674 sc->tx_tail = sc->tx_head = NULL;
1675 ex_init_txdescs(sc);
1676
1677 sc->rx_tail = sc->rx_head = 0;
1678 for (i = 0; i < EX_NUPD; i++) {
1679 rx = &sc->sc_rxdescs[i];
1680 if (rx->rx_mbhead != NULL) {
1681 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1682 m_freem(rx->rx_mbhead);
1683 rx->rx_mbhead = NULL;
1684 }
1685 ex_add_rxbuf(sc, rx);
1686 }
1687
1688 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1689
1690 callout_stop(&sc->ex_mii_callout);
1691 if (sc->ex_conf & EX_CONF_MII)
1692 mii_down(&sc->ex_mii);
1693
1694 if (disable)
1695 ex_disable(sc);
1696
1697 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1698 sc->sc_if_flags = ifp->if_flags;
1699 ifp->if_timer = 0;
1700 }
1701
1702 static void
1703 ex_init_txdescs(sc)
1704 struct ex_softc *sc;
1705 {
1706 int i;
1707
1708 for (i = 0; i < EX_NDPD; i++) {
1709 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1710 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1711 if (i < EX_NDPD - 1)
1712 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1713 else
1714 sc->sc_txdescs[i].tx_next = NULL;
1715 }
1716 sc->tx_free = &sc->sc_txdescs[0];
1717 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1718 }
1719
1720
1721 int
1722 ex_activate(self, act)
1723 struct device *self;
1724 enum devact act;
1725 {
1726 struct ex_softc *sc = (void *) self;
1727 int s, error = 0;
1728
1729 s = splnet();
1730 switch (act) {
1731 case DVACT_ACTIVATE:
1732 error = EOPNOTSUPP;
1733 break;
1734
1735 case DVACT_DEACTIVATE:
1736 if (sc->ex_conf & EX_CONF_MII)
1737 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1738 MII_OFFSET_ANY);
1739 if_deactivate(&sc->sc_ethercom.ec_if);
1740 break;
1741 }
1742 splx(s);
1743
1744 return (error);
1745 }
1746
1747 int
1748 ex_detach(sc)
1749 struct ex_softc *sc;
1750 {
1751 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1752 struct ex_rxdesc *rxd;
1753 int i;
1754
1755 /* Succeed now if there's no work to do. */
1756 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1757 return (0);
1758
1759 /* Unhook our tick handler. */
1760 callout_stop(&sc->ex_mii_callout);
1761
1762 if (sc->ex_conf & EX_CONF_MII) {
1763 /* Detach all PHYs */
1764 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1765 }
1766
1767 /* Delete all remaining media. */
1768 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1769
1770 #if NRND > 0
1771 rnd_detach_source(&sc->rnd_source);
1772 #endif
1773 ether_ifdetach(ifp);
1774 if_detach(ifp);
1775
1776 for (i = 0; i < EX_NUPD; i++) {
1777 rxd = &sc->sc_rxdescs[i];
1778 if (rxd->rx_mbhead != NULL) {
1779 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1780 m_freem(rxd->rx_mbhead);
1781 rxd->rx_mbhead = NULL;
1782 }
1783 }
1784 for (i = 0; i < EX_NUPD; i++)
1785 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1786 for (i = 0; i < EX_NDPD; i++)
1787 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1788 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1789 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1790 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1791 EX_NDPD * sizeof (struct ex_dpd));
1792 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1793 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1794 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1795 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1796 EX_NUPD * sizeof (struct ex_upd));
1797 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1798
1799 shutdownhook_disestablish(sc->sc_sdhook);
1800 powerhook_disestablish(sc->sc_powerhook);
1801
1802 return (0);
1803 }
1804
1805 /*
1806 * Before reboots, reset card completely.
1807 */
1808 static void
1809 ex_shutdown(arg)
1810 void *arg;
1811 {
1812 struct ex_softc *sc = arg;
1813
1814 ex_stop(&sc->sc_ethercom.ec_if, 1);
1815 /*
1816 * Make sure the interface is powered up when we reboot,
1817 * otherwise firmware on some systems gets really confused.
1818 */
1819 (void) ex_enable(sc);
1820 }
1821
1822 /*
1823 * Read EEPROM data.
1824 * XXX what to do if EEPROM doesn't unbusy?
1825 */
1826 u_int16_t
1827 ex_read_eeprom(sc, offset)
1828 struct ex_softc *sc;
1829 int offset;
1830 {
1831 bus_space_tag_t iot = sc->sc_iot;
1832 bus_space_handle_t ioh = sc->sc_ioh;
1833 u_int16_t data = 0, cmd = READ_EEPROM;
1834 int off;
1835
1836 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1837 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1838
1839 GO_WINDOW(0);
1840 if (ex_eeprom_busy(sc))
1841 goto out;
1842 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1843 cmd | (off + (offset & 0x3f)));
1844 if (ex_eeprom_busy(sc))
1845 goto out;
1846 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1847 out:
1848 return data;
1849 }
1850
1851 static int
1852 ex_eeprom_busy(sc)
1853 struct ex_softc *sc;
1854 {
1855 bus_space_tag_t iot = sc->sc_iot;
1856 bus_space_handle_t ioh = sc->sc_ioh;
1857 int i = 100;
1858
1859 while (i--) {
1860 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1861 EEPROM_BUSY))
1862 return 0;
1863 delay(100);
1864 }
1865 printf("\n%s: eeprom stays busy.\n", device_xname(&sc->sc_dev));
1866 return (1);
1867 }
1868
1869 /*
1870 * Create a new rx buffer and add it to the 'soft' rx list.
1871 */
1872 static int
1873 ex_add_rxbuf(sc, rxd)
1874 struct ex_softc *sc;
1875 struct ex_rxdesc *rxd;
1876 {
1877 struct mbuf *m, *oldm;
1878 bus_dmamap_t rxmap;
1879 int error, rval = 0;
1880
1881 oldm = rxd->rx_mbhead;
1882 rxmap = rxd->rx_dmamap;
1883
1884 MGETHDR(m, M_DONTWAIT, MT_DATA);
1885 if (m != NULL) {
1886 MCLGET(m, M_DONTWAIT);
1887 if ((m->m_flags & M_EXT) == 0) {
1888 m_freem(m);
1889 if (oldm == NULL)
1890 return 1;
1891 m = oldm;
1892 MRESETDATA(m);
1893 rval = 1;
1894 }
1895 } else {
1896 if (oldm == NULL)
1897 return 1;
1898 m = oldm;
1899 MRESETDATA(m);
1900 rval = 1;
1901 }
1902
1903 /*
1904 * Setup the DMA map for this receive buffer.
1905 */
1906 if (m != oldm) {
1907 if (oldm != NULL)
1908 bus_dmamap_unload(sc->sc_dmat, rxmap);
1909 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1910 m->m_ext.ext_buf, MCLBYTES, NULL,
1911 BUS_DMA_READ|BUS_DMA_NOWAIT);
1912 if (error) {
1913 aprint_error_dev(&sc->sc_dev, "can't load rx buffer, error = %d\n",
1914 error);
1915 panic("ex_add_rxbuf"); /* XXX */
1916 }
1917 }
1918
1919 /*
1920 * Align for data after 14 byte header.
1921 */
1922 m->m_data += 2;
1923
1924 rxd->rx_mbhead = m;
1925 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1926 rxd->rx_upd->upd_frags[0].fr_addr =
1927 htole32(rxmap->dm_segs[0].ds_addr + 2);
1928 rxd->rx_upd->upd_nextptr = 0;
1929
1930 /*
1931 * Attach it to the end of the list.
1932 */
1933 if (sc->rx_head != NULL) {
1934 sc->rx_tail->rx_next = rxd;
1935 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1936 ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1937 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1938 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1939 sizeof (struct ex_upd),
1940 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1941 } else {
1942 sc->rx_head = rxd;
1943 }
1944 sc->rx_tail = rxd;
1945
1946 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1947 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1948 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1949 ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1950 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1951 return (rval);
1952 }
1953
1954 u_int32_t
1955 ex_mii_bitbang_read(self)
1956 struct device *self;
1957 {
1958 struct ex_softc *sc = (void *) self;
1959
1960 /* We're already in Window 4. */
1961 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1962 }
1963
1964 void
1965 ex_mii_bitbang_write(self, val)
1966 struct device *self;
1967 u_int32_t val;
1968 {
1969 struct ex_softc *sc = (void *) self;
1970
1971 /* We're already in Window 4. */
1972 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1973 }
1974
1975 int
1976 ex_mii_readreg(v, phy, reg)
1977 struct device *v;
1978 int phy, reg;
1979 {
1980 struct ex_softc *sc = (struct ex_softc *)v;
1981 int val;
1982
1983 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1984 return 0;
1985
1986 GO_WINDOW(4);
1987
1988 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1989
1990 GO_WINDOW(1);
1991
1992 return (val);
1993 }
1994
1995 void
1996 ex_mii_writereg(v, phy, reg, data)
1997 struct device *v;
1998 int phy;
1999 int reg;
2000 int data;
2001 {
2002 struct ex_softc *sc = (struct ex_softc *)v;
2003
2004 GO_WINDOW(4);
2005
2006 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
2007
2008 GO_WINDOW(1);
2009 }
2010
2011 void
2012 ex_mii_statchg(v)
2013 struct device *v;
2014 {
2015 struct ex_softc *sc = (struct ex_softc *)v;
2016 bus_space_tag_t iot = sc->sc_iot;
2017 bus_space_handle_t ioh = sc->sc_ioh;
2018 int mctl;
2019
2020 GO_WINDOW(3);
2021 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
2022 if (sc->ex_mii.mii_media_active & IFM_FDX)
2023 mctl |= MAC_CONTROL_FDX;
2024 else
2025 mctl &= ~MAC_CONTROL_FDX;
2026 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
2027 GO_WINDOW(1); /* back to operating window */
2028 }
2029
2030 int
2031 ex_enable(sc)
2032 struct ex_softc *sc;
2033 {
2034 if (sc->enabled == 0 && sc->enable != NULL) {
2035 if ((*sc->enable)(sc) != 0) {
2036 aprint_error_dev(&sc->sc_dev, "de/vice enable failed\n");
2037 return (EIO);
2038 }
2039 sc->enabled = 1;
2040 }
2041 return (0);
2042 }
2043
2044 void
2045 ex_disable(sc)
2046 struct ex_softc *sc;
2047 {
2048 if (sc->enabled == 1 && sc->disable != NULL) {
2049 (*sc->disable)(sc);
2050 sc->enabled = 0;
2051 }
2052 }
2053
2054 void
2055 ex_power(why, arg)
2056 int why;
2057 void *arg;
2058 {
2059 struct ex_softc *sc = (void *)arg;
2060 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2061 int s;
2062
2063 s = splnet();
2064 switch (why) {
2065 case PWR_SUSPEND:
2066 case PWR_STANDBY:
2067 ex_stop(ifp, 0);
2068 if (sc->power != NULL)
2069 (*sc->power)(sc, why);
2070 break;
2071 case PWR_RESUME:
2072 if (ifp->if_flags & IFF_UP) {
2073 if (sc->power != NULL)
2074 (*sc->power)(sc, why);
2075 ex_init(ifp);
2076 }
2077 break;
2078 case PWR_SOFTSUSPEND:
2079 case PWR_SOFTSTANDBY:
2080 case PWR_SOFTRESUME:
2081 break;
2082 }
2083 splx(s);
2084 }
2085