elinkxl.c revision 1.110.4.2 1 /* $NetBSD: elinkxl.c,v 1.110.4.2 2011/03/05 20:53:15 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.110.4.2 2011/03/05 20:53:15 rmind Exp $");
34
35 #include "rnd.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/callout.h>
40 #include <sys/kernel.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/syslog.h>
46 #include <sys/select.h>
47 #include <sys/device.h>
48 #if NRND > 0
49 #include <sys/rnd.h>
50 #endif
51
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_ether.h>
55 #include <net/if_media.h>
56
57 #include <net/bpf.h>
58 #include <net/bpfdesc.h>
59
60 #include <sys/cpu.h>
61 #include <sys/bus.h>
62 #include <sys/intr.h>
63 #include <machine/endian.h>
64
65 #include <dev/mii/miivar.h>
66 #include <dev/mii/mii.h>
67 #include <dev/mii/mii_bitbang.h>
68
69 #include <dev/ic/elink3reg.h>
70 /* #include <dev/ic/elink3var.h> */
71 #include <dev/ic/elinkxlreg.h>
72 #include <dev/ic/elinkxlvar.h>
73
74 #ifdef DEBUG
75 int exdebug = 0;
76 #endif
77
78 /* ifmedia callbacks */
79 int ex_media_chg(struct ifnet *ifp);
80 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
81
82 static int ex_ifflags_cb(struct ethercom *);
83
84 void ex_probe_media(struct ex_softc *);
85 void ex_set_filter(struct ex_softc *);
86 void ex_set_media(struct ex_softc *);
87 void ex_set_xcvr(struct ex_softc *, uint16_t);
88 struct mbuf *ex_get(struct ex_softc *, int);
89 uint16_t ex_read_eeprom(struct ex_softc *, int);
90 int ex_init(struct ifnet *);
91 void ex_read(struct ex_softc *);
92 void ex_reset(struct ex_softc *);
93 void ex_set_mc(struct ex_softc *);
94 void ex_getstats(struct ex_softc *);
95 void ex_printstats(struct ex_softc *);
96 void ex_tick(void *);
97
98 static int ex_eeprom_busy(struct ex_softc *);
99 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
100 static void ex_init_txdescs(struct ex_softc *);
101
102 static void ex_setup_tx(struct ex_softc *);
103 static bool ex_shutdown(device_t, int);
104 static void ex_start(struct ifnet *);
105 static void ex_txstat(struct ex_softc *);
106
107 int ex_mii_readreg(device_t, int, int);
108 void ex_mii_writereg(device_t, int, int, int);
109 void ex_mii_statchg(device_t);
110
111 void ex_probemedia(struct ex_softc *);
112
113 /*
114 * Structure to map media-present bits in boards to ifmedia codes and
115 * printable media names. Used for table-driven ifmedia initialization.
116 */
117 struct ex_media {
118 int exm_mpbit; /* media present bit */
119 const char *exm_name; /* name of medium */
120 int exm_ifmedia; /* ifmedia word for medium */
121 int exm_epmedia; /* ELINKMEDIA_* constant */
122 };
123
124 /*
125 * Media table for 3c90x chips. Note that chips with MII have no
126 * `native' media.
127 */
128 struct ex_media ex_native_media[] = {
129 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
130 ELINKMEDIA_10BASE_T },
131 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
132 ELINKMEDIA_10BASE_T },
133 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
134 ELINKMEDIA_AUI },
135 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
136 ELINKMEDIA_10BASE_2 },
137 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
138 ELINKMEDIA_100BASE_TX },
139 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
140 ELINKMEDIA_100BASE_TX },
141 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
142 ELINKMEDIA_100BASE_FX },
143 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
144 ELINKMEDIA_MII },
145 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
146 ELINKMEDIA_100BASE_T4 },
147 { 0, NULL, 0,
148 0 },
149 };
150
151 /*
152 * MII bit-bang glue.
153 */
154 uint32_t ex_mii_bitbang_read(device_t);
155 void ex_mii_bitbang_write(device_t, uint32_t);
156
157 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
158 ex_mii_bitbang_read,
159 ex_mii_bitbang_write,
160 {
161 ELINK_PHY_DATA, /* MII_BIT_MDO */
162 ELINK_PHY_DATA, /* MII_BIT_MDI */
163 ELINK_PHY_CLK, /* MII_BIT_MDC */
164 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
165 0, /* MII_BIT_DIR_PHY_HOST */
166 }
167 };
168
169 /*
170 * Back-end attach and configure.
171 */
172 void
173 ex_config(struct ex_softc *sc)
174 {
175 struct ifnet *ifp;
176 uint16_t val;
177 uint8_t macaddr[ETHER_ADDR_LEN] = {0};
178 bus_space_tag_t iot = sc->sc_iot;
179 bus_space_handle_t ioh = sc->sc_ioh;
180 int i, error, attach_stage;
181
182 pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual);
183
184 callout_init(&sc->ex_mii_callout, 0);
185
186 ex_reset(sc);
187
188 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
189 macaddr[0] = val >> 8;
190 macaddr[1] = val & 0xff;
191 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
192 macaddr[2] = val >> 8;
193 macaddr[3] = val & 0xff;
194 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
195 macaddr[4] = val >> 8;
196 macaddr[5] = val & 0xff;
197
198 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr));
199
200 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
201 GO_WINDOW(2);
202 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
203 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
204 val |= ELINK_RESET_OPT_LEDPOLAR;
205 if (sc->ex_conf & EX_CONF_PHY_POWER)
206 val |= ELINK_RESET_OPT_PHYPOWER;
207 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
208 }
209 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
210 GO_WINDOW(0);
211 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
212 EX_XCVR_PWR_MAGICBITS);
213 }
214
215 attach_stage = 0;
216
217 /*
218 * Allocate the upload descriptors, and create and load the DMA
219 * map for them.
220 */
221 if ((error = bus_dmamem_alloc(sc->sc_dmat,
222 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
223 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
224 aprint_error_dev(sc->sc_dev,
225 "can't allocate upload descriptors, error = %d\n", error);
226 goto fail;
227 }
228
229 attach_stage = 1;
230
231 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
232 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
233 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
234 aprint_error_dev(sc->sc_dev,
235 "can't map upload descriptors, error = %d\n", error);
236 goto fail;
237 }
238
239 attach_stage = 2;
240
241 if ((error = bus_dmamap_create(sc->sc_dmat,
242 EX_NUPD * sizeof (struct ex_upd), 1,
243 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
244 &sc->sc_upd_dmamap)) != 0) {
245 aprint_error_dev(sc->sc_dev,
246 "can't create upload desc. DMA map, error = %d\n", error);
247 goto fail;
248 }
249
250 attach_stage = 3;
251
252 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
253 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
254 BUS_DMA_NOWAIT)) != 0) {
255 aprint_error_dev(sc->sc_dev,
256 "can't load upload desc. DMA map, error = %d\n", error);
257 goto fail;
258 }
259
260 attach_stage = 4;
261
262 /*
263 * Allocate the download descriptors, and create and load the DMA
264 * map for them.
265 */
266 if ((error = bus_dmamem_alloc(sc->sc_dmat,
267 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
268 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
269 aprint_error_dev(sc->sc_dev,
270 "can't allocate download descriptors, error = %d\n", error);
271 goto fail;
272 }
273
274 attach_stage = 5;
275
276 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
277 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
278 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
279 aprint_error_dev(sc->sc_dev,
280 "can't map download descriptors, error = %d\n", error);
281 goto fail;
282 }
283 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
284
285 attach_stage = 6;
286
287 if ((error = bus_dmamap_create(sc->sc_dmat,
288 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
289 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
290 &sc->sc_dpd_dmamap)) != 0) {
291 aprint_error_dev(sc->sc_dev,
292 "can't create download desc. DMA map, error = %d\n", error);
293 goto fail;
294 }
295
296 attach_stage = 7;
297
298 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
299 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
300 BUS_DMA_NOWAIT)) != 0) {
301 aprint_error_dev(sc->sc_dev,
302 "can't load download desc. DMA map, error = %d\n", error);
303 goto fail;
304 }
305 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
306 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
307
308 attach_stage = 8;
309
310
311 /*
312 * Create the transmit buffer DMA maps.
313 */
314 for (i = 0; i < EX_NDPD; i++) {
315 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
316 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
317 &sc->sc_tx_dmamaps[i])) != 0) {
318 aprint_error_dev(sc->sc_dev,
319 "can't create tx DMA map %d, error = %d\n",
320 i, error);
321 goto fail;
322 }
323 }
324
325 attach_stage = 9;
326
327 /*
328 * Create the receive buffer DMA maps.
329 */
330 for (i = 0; i < EX_NUPD; i++) {
331 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
332 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
333 &sc->sc_rx_dmamaps[i])) != 0) {
334 aprint_error_dev(sc->sc_dev,
335 "can't create rx DMA map %d, error = %d\n",
336 i, error);
337 goto fail;
338 }
339 }
340
341 attach_stage = 10;
342
343 /*
344 * Create ring of upload descriptors, only once. The DMA engine
345 * will loop over this when receiving packets, stalling if it
346 * hits an UPD with a finished receive.
347 */
348 for (i = 0; i < EX_NUPD; i++) {
349 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
350 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
351 sc->sc_upd[i].upd_frags[0].fr_len =
352 htole32((MCLBYTES - 2) | EX_FR_LAST);
353 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
354 aprint_error_dev(sc->sc_dev,
355 "can't allocate or map rx buffers\n");
356 goto fail;
357 }
358 }
359
360 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
361 EX_NUPD * sizeof (struct ex_upd),
362 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
363
364 ex_init_txdescs(sc);
365
366 attach_stage = 11;
367
368
369 GO_WINDOW(3);
370 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
371 if (val & ELINK_MEDIACAP_MII)
372 sc->ex_conf |= EX_CONF_MII;
373
374 ifp = &sc->sc_ethercom.ec_if;
375
376 /*
377 * Initialize our media structures and MII info. We'll
378 * probe the MII if we discover that we have one.
379 */
380 sc->ex_mii.mii_ifp = ifp;
381 sc->ex_mii.mii_readreg = ex_mii_readreg;
382 sc->ex_mii.mii_writereg = ex_mii_writereg;
383 sc->ex_mii.mii_statchg = ex_mii_statchg;
384 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
385 ex_media_stat);
386
387 if (sc->ex_conf & EX_CONF_MII) {
388 /*
389 * Find PHY, extract media information from it.
390 * First, select the right transceiver.
391 */
392 ex_set_xcvr(sc, val);
393
394 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff,
395 MII_PHY_ANY, MII_OFFSET_ANY, 0);
396 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
397 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
398 0, NULL);
399 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
400 } else {
401 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
402 }
403 } else
404 ex_probemedia(sc);
405
406 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
407 ifp->if_softc = sc;
408 ifp->if_start = ex_start;
409 ifp->if_ioctl = ex_ioctl;
410 ifp->if_watchdog = ex_watchdog;
411 ifp->if_init = ex_init;
412 ifp->if_stop = ex_stop;
413 ifp->if_flags =
414 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
415 sc->sc_if_flags = ifp->if_flags;
416 IFQ_SET_READY(&ifp->if_snd);
417
418 /*
419 * We can support 802.1Q VLAN-sized frames.
420 */
421 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
422
423 /*
424 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
425 */
426 if (sc->ex_conf & EX_CONF_90XB)
427 sc->sc_ethercom.ec_if.if_capabilities |=
428 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
429 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
430 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
431
432 if_attach(ifp);
433 ether_ifattach(ifp, macaddr);
434 ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb);
435
436 GO_WINDOW(1);
437
438 sc->tx_start_thresh = 20;
439 sc->tx_succ_ok = 0;
440
441 /* TODO: set queues to 0 */
442
443 #if NRND > 0
444 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
445 RND_TYPE_NET, 0);
446 #endif
447
448 if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown))
449 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
450 else
451 aprint_error_dev(sc->sc_dev,
452 "couldn't establish power handler\n");
453
454 /* The attach is successful. */
455 sc->ex_flags |= EX_FLAGS_ATTACHED;
456 return;
457
458 fail:
459 /*
460 * Free any resources we've allocated during the failed attach
461 * attempt. Do this in reverse order and fall though.
462 */
463 switch (attach_stage) {
464 case 11:
465 {
466 struct ex_rxdesc *rxd;
467
468 for (i = 0; i < EX_NUPD; i++) {
469 rxd = &sc->sc_rxdescs[i];
470 if (rxd->rx_mbhead != NULL) {
471 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
472 m_freem(rxd->rx_mbhead);
473 }
474 }
475 }
476 /* FALLTHROUGH */
477
478 case 10:
479 for (i = 0; i < EX_NUPD; i++)
480 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
481 /* FALLTHROUGH */
482
483 case 9:
484 for (i = 0; i < EX_NDPD; i++)
485 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
486 /* FALLTHROUGH */
487 case 8:
488 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
489 /* FALLTHROUGH */
490
491 case 7:
492 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
493 /* FALLTHROUGH */
494
495 case 6:
496 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
497 EX_NDPD * sizeof (struct ex_dpd));
498 /* FALLTHROUGH */
499
500 case 5:
501 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
502 break;
503
504 case 4:
505 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
506 /* FALLTHROUGH */
507
508 case 3:
509 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
510 /* FALLTHROUGH */
511
512 case 2:
513 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
514 EX_NUPD * sizeof (struct ex_upd));
515 /* FALLTHROUGH */
516
517 case 1:
518 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
519 break;
520 }
521
522 }
523
524 /*
525 * Find the media present on non-MII chips.
526 */
527 void
528 ex_probemedia(struct ex_softc *sc)
529 {
530 bus_space_tag_t iot = sc->sc_iot;
531 bus_space_handle_t ioh = sc->sc_ioh;
532 struct ifmedia *ifm = &sc->ex_mii.mii_media;
533 struct ex_media *exm;
534 uint16_t config1, reset_options, default_media;
535 int defmedia = 0;
536 const char *sep = "", *defmedianame = NULL;
537
538 GO_WINDOW(3);
539 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
540 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
541 GO_WINDOW(0);
542
543 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
544
545 /* Sanity check that there are any media! */
546 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
547 aprint_error_dev(sc->sc_dev, "no media present!\n");
548 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
549 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
550 return;
551 }
552
553 aprint_normal_dev(sc->sc_dev, "");
554
555 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
556
557 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
558 if (reset_options & exm->exm_mpbit) {
559 /*
560 * Default media is a little complicated. We
561 * support full-duplex which uses the same
562 * reset options bit.
563 *
564 * XXX Check EEPROM for default to FDX?
565 */
566 if (exm->exm_epmedia == default_media) {
567 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
568 defmedia = exm->exm_ifmedia;
569 defmedianame = exm->exm_name;
570 }
571 } else if (defmedia == 0) {
572 defmedia = exm->exm_ifmedia;
573 defmedianame = exm->exm_name;
574 }
575 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
576 NULL);
577 PRINT(exm->exm_name);
578 }
579 }
580
581 #undef PRINT
582
583 #ifdef DIAGNOSTIC
584 if (defmedia == 0)
585 panic("ex_probemedia: impossible");
586 #endif
587
588 aprint_normal(", default %s\n", defmedianame);
589 ifmedia_set(ifm, defmedia);
590 }
591
592 /*
593 * Setup transmitter parameters.
594 */
595 static void
596 ex_setup_tx(struct ex_softc *sc)
597 {
598 bus_space_tag_t iot = sc->sc_iot;
599 bus_space_handle_t ioh = sc->sc_ioh;
600
601 /*
602 * Disable reclaim threshold for 90xB, set free threshold to
603 * 6 * 256 = 1536 for 90x.
604 */
605 if (sc->ex_conf & EX_CONF_90XB)
606 bus_space_write_2(iot, ioh, ELINK_COMMAND,
607 ELINK_TXRECLTHRESH | 255);
608 else
609 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
610
611 /* Setup early transmission start threshold. */
612 bus_space_write_2(iot, ioh, ELINK_COMMAND,
613 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
614 }
615
616 /*
617 * Bring device up.
618 */
619 int
620 ex_init(struct ifnet *ifp)
621 {
622 struct ex_softc *sc = ifp->if_softc;
623 bus_space_tag_t iot = sc->sc_iot;
624 bus_space_handle_t ioh = sc->sc_ioh;
625 int i;
626 uint16_t val;
627 int error = 0;
628
629 if ((error = ex_enable(sc)) != 0)
630 goto out;
631
632 ex_waitcmd(sc);
633 ex_stop(ifp, 0);
634
635 GO_WINDOW(2);
636
637 /* Turn on PHY power. */
638 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
639 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
640 if (sc->ex_conf & EX_CONF_PHY_POWER)
641 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
642 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
643 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
644 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
645 }
646
647 /*
648 * Set the station address and clear the station mask. The latter
649 * is needed for 90x cards, 0 is the default for 90xB cards.
650 */
651 for (i = 0; i < ETHER_ADDR_LEN; i++) {
652 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
653 CLLADDR(ifp->if_sadl)[i]);
654 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
655 }
656
657 GO_WINDOW(3);
658
659 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
660 ex_waitcmd(sc);
661 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
662 ex_waitcmd(sc);
663
664 /* Load Tx parameters. */
665 ex_setup_tx(sc);
666
667 bus_space_write_2(iot, ioh, ELINK_COMMAND,
668 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
669
670 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
671 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
672
673 bus_space_write_2(iot, ioh, ELINK_COMMAND,
674 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
675 bus_space_write_2(iot, ioh, ELINK_COMMAND,
676 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
677
678 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
679 if (sc->intr_ack)
680 (* sc->intr_ack)(sc);
681 ex_set_media(sc);
682 ex_set_mc(sc);
683
684
685 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
686 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
687 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
688 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
689 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
690
691 ifp->if_flags |= IFF_RUNNING;
692 ifp->if_flags &= ~IFF_OACTIVE;
693 ex_start(ifp);
694 sc->sc_if_flags = ifp->if_flags;
695
696 GO_WINDOW(1);
697
698 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
699
700 out:
701 if (error) {
702 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
703 ifp->if_timer = 0;
704 aprint_error_dev(sc->sc_dev, "interface not running\n");
705 }
706 return (error);
707 }
708
709 #define MCHASHSIZE 256
710 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
711 (MCHASHSIZE - 1))
712
713 /*
714 * Set multicast receive filter. Also take care of promiscuous mode
715 * here (XXX).
716 */
717 void
718 ex_set_mc(struct ex_softc *sc)
719 {
720 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
721 struct ethercom *ec = &sc->sc_ethercom;
722 struct ether_multi *enm;
723 struct ether_multistep estep;
724 int i;
725 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
726
727 if (ifp->if_flags & IFF_PROMISC) {
728 mask |= FIL_PROMISC;
729 goto allmulti;
730 }
731
732 ETHER_FIRST_MULTI(estep, ec, enm);
733 if (enm == NULL)
734 goto nomulti;
735
736 if ((sc->ex_conf & EX_CONF_90XB) == 0)
737 /* No multicast hash filtering. */
738 goto allmulti;
739
740 for (i = 0; i < MCHASHSIZE; i++)
741 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
742 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
743
744 do {
745 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
746 ETHER_ADDR_LEN) != 0)
747 goto allmulti;
748
749 i = ex_mchash(enm->enm_addrlo);
750 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
751 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
752 ETHER_NEXT_MULTI(estep, enm);
753 } while (enm != NULL);
754 mask |= FIL_MULTIHASH;
755
756 nomulti:
757 ifp->if_flags &= ~IFF_ALLMULTI;
758 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
759 SET_RX_FILTER | mask);
760 return;
761
762 allmulti:
763 ifp->if_flags |= IFF_ALLMULTI;
764 mask |= FIL_MULTICAST;
765 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
766 SET_RX_FILTER | mask);
767 }
768
769
770 /*
771 * The Tx Complete interrupts occur only on errors,
772 * and this is the error handler.
773 */
774 static void
775 ex_txstat(struct ex_softc *sc)
776 {
777 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
778 bus_space_tag_t iot = sc->sc_iot;
779 bus_space_handle_t ioh = sc->sc_ioh;
780 int i, err = 0;
781
782 /*
783 * We need to read+write TX_STATUS until we get a 0 status
784 * in order to turn off the interrupt flag.
785 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
786 */
787 for (;;) {
788 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
789 if ((i & TXS_COMPLETE) == 0)
790 break;
791 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
792 err |= i;
793 }
794 err &= ~TXS_TIMER;
795
796 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
797 || err == 0 /* should not happen, just in case */) {
798 /*
799 * Make sure the transmission is stopped.
800 */
801 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
802 for (i = 1000; i > 0; i--)
803 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
804 ELINK_DMAC_DNINPROG) == 0)
805 break;
806
807 /*
808 * Reset the transmitter.
809 */
810 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
811
812 /* Resetting takes a while and we will do more than wait. */
813
814 ifp->if_flags &= ~IFF_OACTIVE;
815 ++sc->sc_ethercom.ec_if.if_oerrors;
816 aprint_error_dev(sc->sc_dev, "%s%s%s",
817 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
818 (err & TXS_JABBER) ? " jabber" : "",
819 (err & TXS_RECLAIM) ? " reclaim" : "");
820 if (err == 0)
821 aprint_error(" unknown Tx error");
822 printf(" (%x)", err);
823 if (err & TXS_UNDERRUN) {
824 aprint_error(" @%d", sc->tx_start_thresh);
825 if (sc->tx_succ_ok < 256 &&
826 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
827 > sc->tx_start_thresh) {
828 aprint_error(", new threshold is %d", i);
829 sc->tx_start_thresh = i;
830 }
831 sc->tx_succ_ok = 0;
832 }
833 aprint_error("\n");
834 if (err & TXS_MAX_COLLISION)
835 ++sc->sc_ethercom.ec_if.if_collisions;
836
837 /* Wait for TX_RESET to finish. */
838 ex_waitcmd(sc);
839
840 /* Reload Tx parameters. */
841 ex_setup_tx(sc);
842 } else {
843 if (err & TXS_MAX_COLLISION)
844 ++sc->sc_ethercom.ec_if.if_collisions;
845 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
846 }
847
848 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
849
850 /* Retransmit current packet if any. */
851 if (sc->tx_head) {
852 ifp->if_flags |= IFF_OACTIVE;
853 bus_space_write_2(iot, ioh, ELINK_COMMAND,
854 ELINK_DNUNSTALL);
855 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
856 DPD_DMADDR(sc, sc->tx_head));
857
858 /* Retrigger watchdog if stopped. */
859 if (ifp->if_timer == 0)
860 ifp->if_timer = 1;
861 }
862 }
863
864 int
865 ex_media_chg(struct ifnet *ifp)
866 {
867
868 if (ifp->if_flags & IFF_UP)
869 ex_init(ifp);
870 return 0;
871 }
872
873 void
874 ex_set_xcvr(struct ex_softc *sc, const uint16_t media)
875 {
876 bus_space_tag_t iot = sc->sc_iot;
877 bus_space_handle_t ioh = sc->sc_ioh;
878 uint32_t icfg;
879
880 /*
881 * We're already in Window 3
882 */
883 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
884 icfg &= ~(CONFIG_XCVR_SEL << 16);
885 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
886 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
887 if (media & ELINK_MEDIACAP_100BASETX)
888 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
889 if (media & ELINK_MEDIACAP_100BASEFX)
890 icfg |= ELINKMEDIA_100BASE_FX
891 << (CONFIG_XCVR_SEL_SHIFT + 16);
892 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
893 }
894
895 void
896 ex_set_media(struct ex_softc *sc)
897 {
898 bus_space_tag_t iot = sc->sc_iot;
899 bus_space_handle_t ioh = sc->sc_ioh;
900 uint32_t configreg;
901
902 if (((sc->ex_conf & EX_CONF_MII) &&
903 (sc->ex_mii.mii_media_active & IFM_FDX))
904 || (!(sc->ex_conf & EX_CONF_MII) &&
905 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
906 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
907 MAC_CONTROL_FDX);
908 } else {
909 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
910 }
911
912 /*
913 * If the device has MII, select it, and then tell the
914 * PHY which media to use.
915 */
916 if (sc->ex_conf & EX_CONF_MII) {
917 uint16_t val;
918
919 GO_WINDOW(3);
920 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
921 ex_set_xcvr(sc, val);
922 mii_mediachg(&sc->ex_mii);
923 return;
924 }
925
926 GO_WINDOW(4);
927 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
928 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
929 delay(800);
930
931 /*
932 * Now turn on the selected media/transceiver.
933 */
934 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
935 case IFM_10_T:
936 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
937 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
938 break;
939
940 case IFM_10_2:
941 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
942 DELAY(800);
943 break;
944
945 case IFM_100_TX:
946 case IFM_100_FX:
947 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
948 LINKBEAT_ENABLE);
949 DELAY(800);
950 break;
951
952 case IFM_10_5:
953 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
954 SQE_ENABLE);
955 DELAY(800);
956 break;
957
958 case IFM_MANUAL:
959 break;
960
961 case IFM_NONE:
962 return;
963
964 default:
965 panic("ex_set_media: impossible");
966 }
967
968 GO_WINDOW(3);
969 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
970
971 configreg &= ~(CONFIG_MEDIAMASK << 16);
972 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
973 (CONFIG_MEDIAMASK_SHIFT + 16));
974
975 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
976 }
977
978 /*
979 * Get currently-selected media from card.
980 * (if_media callback, may be called before interface is brought up).
981 */
982 void
983 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req)
984 {
985 struct ex_softc *sc = ifp->if_softc;
986 uint16_t help;
987
988 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
989 if (sc->ex_conf & EX_CONF_MII) {
990 mii_pollstat(&sc->ex_mii);
991 req->ifm_status = sc->ex_mii.mii_media_status;
992 req->ifm_active = sc->ex_mii.mii_media_active;
993 } else {
994 GO_WINDOW(4);
995 req->ifm_status = IFM_AVALID;
996 req->ifm_active =
997 sc->ex_mii.mii_media.ifm_cur->ifm_media;
998 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
999 ELINK_W4_MEDIA_TYPE);
1000 if (help & LINKBEAT_DETECT)
1001 req->ifm_status |= IFM_ACTIVE;
1002 GO_WINDOW(1);
1003 }
1004 }
1005 }
1006
1007
1008
1009 /*
1010 * Start outputting on the interface.
1011 */
1012 static void
1013 ex_start(struct ifnet *ifp)
1014 {
1015 struct ex_softc *sc = ifp->if_softc;
1016 bus_space_tag_t iot = sc->sc_iot;
1017 bus_space_handle_t ioh = sc->sc_ioh;
1018 volatile struct ex_fraghdr *fr = NULL;
1019 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1020 struct ex_txdesc *txp;
1021 struct mbuf *mb_head;
1022 bus_dmamap_t dmamap;
1023 int m_csumflags, offset, seglen, totlen, segment, error;
1024 uint32_t csum_flags;
1025
1026 if (sc->tx_head || sc->tx_free == NULL)
1027 return;
1028
1029 txp = NULL;
1030
1031 /*
1032 * We're finished if there is nothing more to add to the list or if
1033 * we're all filled up with buffers to transmit.
1034 */
1035 while (sc->tx_free != NULL) {
1036 /*
1037 * Grab a packet to transmit.
1038 */
1039 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1040 if (mb_head == NULL)
1041 break;
1042
1043 /*
1044 * mb_head might be updated later,
1045 * so preserve csum_flags here.
1046 */
1047 m_csumflags = mb_head->m_pkthdr.csum_flags;
1048
1049 /*
1050 * Get pointer to next available tx desc.
1051 */
1052 txp = sc->tx_free;
1053 dmamap = txp->tx_dmamap;
1054
1055 /*
1056 * Go through each of the mbufs in the chain and initialize
1057 * the transmit buffer descriptors with the physical address
1058 * and size of the mbuf.
1059 */
1060 reload:
1061 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1062 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1063 switch (error) {
1064 case 0:
1065 /* Success. */
1066 break;
1067
1068 case EFBIG:
1069 {
1070 struct mbuf *mn;
1071
1072 /*
1073 * We ran out of segments. We have to recopy this
1074 * mbuf chain first. Bail out if we can't get the
1075 * new buffers.
1076 */
1077 aprint_error_dev(sc->sc_dev, "too many segments, ");
1078
1079 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1080 if (mn == NULL) {
1081 m_freem(mb_head);
1082 aprint_error("aborting\n");
1083 goto out;
1084 }
1085 if (mb_head->m_pkthdr.len > MHLEN) {
1086 MCLGET(mn, M_DONTWAIT);
1087 if ((mn->m_flags & M_EXT) == 0) {
1088 m_freem(mn);
1089 m_freem(mb_head);
1090 aprint_error("aborting\n");
1091 goto out;
1092 }
1093 }
1094 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1095 mtod(mn, void *));
1096 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1097 m_freem(mb_head);
1098 mb_head = mn;
1099 aprint_error("retrying\n");
1100 goto reload;
1101 }
1102
1103 default:
1104 /*
1105 * Some other problem; report it.
1106 */
1107 aprint_error_dev(sc->sc_dev,
1108 "can't load mbuf chain, error = %d\n", error);
1109 m_freem(mb_head);
1110 goto out;
1111 }
1112
1113 /*
1114 * remove our tx desc from freelist.
1115 */
1116 sc->tx_free = txp->tx_next;
1117 txp->tx_next = NULL;
1118
1119 fr = &txp->tx_dpd->dpd_frags[0];
1120 totlen = 0;
1121 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1122 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1123 seglen = dmamap->dm_segs[segment].ds_len;
1124 fr->fr_len = htole32(seglen);
1125 totlen += seglen;
1126 }
1127 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1128 (m_csumflags & M_CSUM_IPv4) != 0)) {
1129 /*
1130 * Pad short packets to avoid ip4csum-tx bug.
1131 *
1132 * XXX Should we still consider if such short
1133 * (36 bytes or less) packets might already
1134 * occupy EX_NTFRAG (== 32) fragments here?
1135 */
1136 KASSERT(segment < EX_NTFRAGS);
1137 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1138 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1139 fr->fr_len = htole32(EX_FR_LAST | seglen);
1140 totlen += seglen;
1141 } else {
1142 fr--;
1143 fr->fr_len |= htole32(EX_FR_LAST);
1144 }
1145 txp->tx_mbhead = mb_head;
1146
1147 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1148 BUS_DMASYNC_PREWRITE);
1149
1150 dpd = txp->tx_dpd;
1151 dpd->dpd_nextptr = 0;
1152 dpd->dpd_fsh = htole32(totlen);
1153
1154 /* Byte-swap constants so compiler can optimize. */
1155
1156 if (sc->ex_conf & EX_CONF_90XB) {
1157 csum_flags = 0;
1158
1159 if (m_csumflags & M_CSUM_IPv4)
1160 csum_flags |= htole32(EX_DPD_IPCKSUM);
1161
1162 if (m_csumflags & M_CSUM_TCPv4)
1163 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1164 else if (m_csumflags & M_CSUM_UDPv4)
1165 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1166
1167 dpd->dpd_fsh |= csum_flags;
1168 } else {
1169 KDASSERT((mb_head->m_pkthdr.csum_flags &
1170 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1171 }
1172
1173 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1174 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1175 sizeof (struct ex_dpd),
1176 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1177
1178 /*
1179 * No need to stall the download engine, we know it's
1180 * not busy right now.
1181 *
1182 * Fix up pointers in both the "soft" tx and the physical
1183 * tx list.
1184 */
1185 if (sc->tx_head != NULL) {
1186 prevdpd = sc->tx_tail->tx_dpd;
1187 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1188 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1189 offset, sizeof (struct ex_dpd),
1190 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1191 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1192 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1193 offset, sizeof (struct ex_dpd),
1194 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1195 sc->tx_tail->tx_next = txp;
1196 sc->tx_tail = txp;
1197 } else {
1198 sc->tx_tail = sc->tx_head = txp;
1199 }
1200
1201 /*
1202 * Pass packet to bpf if there is a listener.
1203 */
1204 bpf_mtap(ifp, mb_head);
1205 }
1206 out:
1207 if (sc->tx_head) {
1208 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1209 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1210 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1211 sizeof (struct ex_dpd),
1212 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1213 ifp->if_flags |= IFF_OACTIVE;
1214 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1215 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1216 DPD_DMADDR(sc, sc->tx_head));
1217
1218 /* trigger watchdog */
1219 ifp->if_timer = 5;
1220 }
1221 }
1222
1223
1224 int
1225 ex_intr(void *arg)
1226 {
1227 struct ex_softc *sc = arg;
1228 bus_space_tag_t iot = sc->sc_iot;
1229 bus_space_handle_t ioh = sc->sc_ioh;
1230 uint16_t stat;
1231 int ret = 0;
1232 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1233
1234 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1235 !device_is_active(sc->sc_dev))
1236 return (0);
1237
1238 for (;;) {
1239 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1240
1241 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1242 if ((stat & INTR_LATCH) == 0) {
1243 #if 0
1244 aprint_error_dev(sc->sc_dev,
1245 "intr latch cleared\n");
1246 #endif
1247 break;
1248 }
1249 }
1250
1251 ret = 1;
1252
1253 /*
1254 * Acknowledge interrupts.
1255 */
1256 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1257 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1258 if (sc->intr_ack)
1259 (*sc->intr_ack)(sc);
1260
1261 if (stat & HOST_ERROR) {
1262 aprint_error_dev(sc->sc_dev,
1263 "adapter failure (%x)\n", stat);
1264 ex_reset(sc);
1265 ex_init(ifp);
1266 return 1;
1267 }
1268 if (stat & UPD_STATS) {
1269 ex_getstats(sc);
1270 }
1271 if (stat & TX_COMPLETE) {
1272 ex_txstat(sc);
1273 #if 0
1274 if (stat & DN_COMPLETE)
1275 aprint_error_dev(sc->sc_dev,
1276 "Ignoring Dn interrupt (%x)\n", stat);
1277 #endif
1278 /*
1279 * In some rare cases, both Tx Complete and
1280 * Dn Complete bits are set. However, the packet
1281 * has been reloaded in ex_txstat() and should not
1282 * handle the Dn Complete event here.
1283 * Hence the "else" below.
1284 */
1285 } else if (stat & DN_COMPLETE) {
1286 struct ex_txdesc *txp, *ptxp = NULL;
1287 bus_dmamap_t txmap;
1288
1289 /* reset watchdog timer, was set in ex_start() */
1290 ifp->if_timer = 0;
1291
1292 for (txp = sc->tx_head; txp != NULL;
1293 txp = txp->tx_next) {
1294 bus_dmamap_sync(sc->sc_dmat,
1295 sc->sc_dpd_dmamap,
1296 (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1297 sizeof (struct ex_dpd),
1298 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1299 if (txp->tx_mbhead != NULL) {
1300 txmap = txp->tx_dmamap;
1301 bus_dmamap_sync(sc->sc_dmat, txmap,
1302 0, txmap->dm_mapsize,
1303 BUS_DMASYNC_POSTWRITE);
1304 bus_dmamap_unload(sc->sc_dmat, txmap);
1305 m_freem(txp->tx_mbhead);
1306 txp->tx_mbhead = NULL;
1307 }
1308 ptxp = txp;
1309 }
1310
1311 /*
1312 * Move finished tx buffers back to the tx free list.
1313 */
1314 if (sc->tx_free) {
1315 sc->tx_ftail->tx_next = sc->tx_head;
1316 sc->tx_ftail = ptxp;
1317 } else
1318 sc->tx_ftail = sc->tx_free = sc->tx_head;
1319
1320 sc->tx_head = sc->tx_tail = NULL;
1321 ifp->if_flags &= ~IFF_OACTIVE;
1322
1323 if (sc->tx_succ_ok < 256)
1324 sc->tx_succ_ok++;
1325 }
1326
1327 if (stat & UP_COMPLETE) {
1328 struct ex_rxdesc *rxd;
1329 struct mbuf *m;
1330 struct ex_upd *upd;
1331 bus_dmamap_t rxmap;
1332 uint32_t pktstat;
1333
1334 rcvloop:
1335 rxd = sc->rx_head;
1336 rxmap = rxd->rx_dmamap;
1337 m = rxd->rx_mbhead;
1338 upd = rxd->rx_upd;
1339
1340 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1341 rxmap->dm_mapsize,
1342 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1343 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1344 ((char *)upd - (char *)sc->sc_upd),
1345 sizeof (struct ex_upd),
1346 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1347 pktstat = le32toh(upd->upd_pktstatus);
1348
1349 if (pktstat & EX_UPD_COMPLETE) {
1350 /*
1351 * Remove first packet from the chain.
1352 */
1353 sc->rx_head = rxd->rx_next;
1354 rxd->rx_next = NULL;
1355
1356 /*
1357 * Add a new buffer to the receive chain.
1358 * If this fails, the old buffer is recycled
1359 * instead.
1360 */
1361 if (ex_add_rxbuf(sc, rxd) == 0) {
1362 uint16_t total_len;
1363
1364 if (pktstat &
1365 ((sc->sc_ethercom.ec_capenable &
1366 ETHERCAP_VLAN_MTU) ?
1367 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1368 ifp->if_ierrors++;
1369 m_freem(m);
1370 goto rcvloop;
1371 }
1372
1373 total_len = pktstat & EX_UPD_PKTLENMASK;
1374 if (total_len <
1375 sizeof(struct ether_header)) {
1376 m_freem(m);
1377 goto rcvloop;
1378 }
1379 m->m_pkthdr.rcvif = ifp;
1380 m->m_pkthdr.len = m->m_len = total_len;
1381 bpf_mtap(ifp, m);
1382 /*
1383 * Set the incoming checksum information for the packet.
1384 */
1385 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1386 (pktstat & EX_UPD_IPCHECKED) != 0) {
1387 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1388 if (pktstat & EX_UPD_IPCKSUMERR)
1389 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1390 if (pktstat & EX_UPD_TCPCHECKED) {
1391 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1392 if (pktstat & EX_UPD_TCPCKSUMERR)
1393 m->m_pkthdr.csum_flags |=
1394 M_CSUM_TCP_UDP_BAD;
1395 } else if (pktstat & EX_UPD_UDPCHECKED) {
1396 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1397 if (pktstat & EX_UPD_UDPCKSUMERR)
1398 m->m_pkthdr.csum_flags |=
1399 M_CSUM_TCP_UDP_BAD;
1400 }
1401 }
1402 (*ifp->if_input)(ifp, m);
1403 }
1404 goto rcvloop;
1405 }
1406 /*
1407 * Just in case we filled up all UPDs and the DMA engine
1408 * stalled. We could be more subtle about this.
1409 */
1410 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1411 aprint_error_dev(sc->sc_dev,
1412 "uplistptr was 0\n");
1413 ex_init(ifp);
1414 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1415 & 0x2000) {
1416 aprint_error_dev(sc->sc_dev,
1417 "receive stalled\n");
1418 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1419 ELINK_UPUNSTALL);
1420 }
1421 }
1422
1423 #if NRND > 0
1424 if (stat)
1425 rnd_add_uint32(&sc->rnd_source, stat);
1426 #endif
1427 }
1428
1429 /* no more interrupts */
1430 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1431 ex_start(ifp);
1432 return ret;
1433 }
1434
1435 static int
1436 ex_ifflags_cb(struct ethercom *ec)
1437 {
1438 struct ifnet *ifp = &ec->ec_if;
1439 struct ex_softc *sc = ifp->if_softc;
1440 int change = ifp->if_flags ^ sc->sc_if_flags;
1441
1442 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
1443 return ENETRESET;
1444 else if ((change & IFF_PROMISC) != 0)
1445 ex_set_mc(sc);
1446 return 0;
1447 }
1448
1449 int
1450 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1451 {
1452 struct ex_softc *sc = ifp->if_softc;
1453 struct ifreq *ifr = (struct ifreq *)data;
1454 int s, error;
1455
1456 s = splnet();
1457
1458 switch (cmd) {
1459 case SIOCSIFMEDIA:
1460 case SIOCGIFMEDIA:
1461 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1462 break;
1463 default:
1464 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1465 break;
1466
1467 error = 0;
1468
1469 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1470 ;
1471 else if (ifp->if_flags & IFF_RUNNING) {
1472 /*
1473 * Multicast list has changed; set the hardware filter
1474 * accordingly.
1475 */
1476 ex_set_mc(sc);
1477 }
1478 break;
1479 }
1480
1481 sc->sc_if_flags = ifp->if_flags;
1482 splx(s);
1483 return (error);
1484 }
1485
1486 void
1487 ex_getstats(struct ex_softc *sc)
1488 {
1489 bus_space_handle_t ioh = sc->sc_ioh;
1490 bus_space_tag_t iot = sc->sc_iot;
1491 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1492 uint8_t upperok;
1493
1494 GO_WINDOW(6);
1495 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1496 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1497 ifp->if_ipackets += (upperok & 0x03) << 8;
1498 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1499 ifp->if_opackets += (upperok & 0x30) << 4;
1500 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1501 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1502 /*
1503 * There seems to be no way to get the exact number of collisions,
1504 * this is the number that occurred at the very least.
1505 */
1506 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1507 TX_AFTER_X_COLLISIONS);
1508 /*
1509 * Interface byte counts are counted by ether_input() and
1510 * ether_output(), so don't accumulate them here. Just
1511 * read the NIC counters so they don't generate overflow interrupts.
1512 * Upper byte counters are latched from reading the totals, so
1513 * they don't need to be read if we don't need their values.
1514 */
1515 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1516 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1517
1518 /*
1519 * Clear the following to avoid stats overflow interrupts
1520 */
1521 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1522 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1523 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1524 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1525 GO_WINDOW(4);
1526 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1527 GO_WINDOW(1);
1528 }
1529
1530 void
1531 ex_printstats(struct ex_softc *sc)
1532 {
1533 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1534
1535 ex_getstats(sc);
1536 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1537 "%llu\n", (unsigned long long)ifp->if_ipackets,
1538 (unsigned long long)ifp->if_opackets,
1539 (unsigned long long)ifp->if_ierrors,
1540 (unsigned long long)ifp->if_oerrors,
1541 (unsigned long long)ifp->if_ibytes,
1542 (unsigned long long)ifp->if_obytes);
1543 }
1544
1545 void
1546 ex_tick(void *arg)
1547 {
1548 struct ex_softc *sc = arg;
1549 int s;
1550
1551 if (!device_is_active(sc->sc_dev))
1552 return;
1553
1554 s = splnet();
1555
1556 if (sc->ex_conf & EX_CONF_MII)
1557 mii_tick(&sc->ex_mii);
1558
1559 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1560 & COMMAND_IN_PROGRESS))
1561 ex_getstats(sc);
1562
1563 splx(s);
1564
1565 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1566 }
1567
1568 void
1569 ex_reset(struct ex_softc *sc)
1570 {
1571 uint16_t val = GLOBAL_RESET;
1572
1573 if (sc->ex_conf & EX_CONF_RESETHACK)
1574 val |= 0x10;
1575 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1576 /*
1577 * XXX apparently the command in progress bit can't be trusted
1578 * during a reset, so we just always wait this long. Fortunately
1579 * we normally only reset the chip during autoconfig.
1580 */
1581 delay(100000);
1582 ex_waitcmd(sc);
1583 }
1584
1585 void
1586 ex_watchdog(struct ifnet *ifp)
1587 {
1588 struct ex_softc *sc = ifp->if_softc;
1589
1590 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1591 ++sc->sc_ethercom.ec_if.if_oerrors;
1592
1593 ex_reset(sc);
1594 ex_init(ifp);
1595 }
1596
1597 void
1598 ex_stop(struct ifnet *ifp, int disable)
1599 {
1600 struct ex_softc *sc = ifp->if_softc;
1601 bus_space_tag_t iot = sc->sc_iot;
1602 bus_space_handle_t ioh = sc->sc_ioh;
1603 struct ex_txdesc *tx;
1604 struct ex_rxdesc *rx;
1605 int i;
1606
1607 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1608 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1609 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1610
1611 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1612 if (tx->tx_mbhead == NULL)
1613 continue;
1614 m_freem(tx->tx_mbhead);
1615 tx->tx_mbhead = NULL;
1616 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1617 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1618 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1619 ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1620 sizeof (struct ex_dpd),
1621 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1622 }
1623 sc->tx_tail = sc->tx_head = NULL;
1624 ex_init_txdescs(sc);
1625
1626 sc->rx_tail = sc->rx_head = 0;
1627 for (i = 0; i < EX_NUPD; i++) {
1628 rx = &sc->sc_rxdescs[i];
1629 if (rx->rx_mbhead != NULL) {
1630 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1631 m_freem(rx->rx_mbhead);
1632 rx->rx_mbhead = NULL;
1633 }
1634 ex_add_rxbuf(sc, rx);
1635 }
1636
1637 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1638
1639 callout_stop(&sc->ex_mii_callout);
1640 if (sc->ex_conf & EX_CONF_MII)
1641 mii_down(&sc->ex_mii);
1642
1643 if (disable)
1644 ex_disable(sc);
1645
1646 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1647 sc->sc_if_flags = ifp->if_flags;
1648 ifp->if_timer = 0;
1649 }
1650
1651 static void
1652 ex_init_txdescs(struct ex_softc *sc)
1653 {
1654 int i;
1655
1656 for (i = 0; i < EX_NDPD; i++) {
1657 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1658 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1659 if (i < EX_NDPD - 1)
1660 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1661 else
1662 sc->sc_txdescs[i].tx_next = NULL;
1663 }
1664 sc->tx_free = &sc->sc_txdescs[0];
1665 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1666 }
1667
1668
1669 int
1670 ex_activate(device_t self, enum devact act)
1671 {
1672 struct ex_softc *sc = device_private(self);
1673
1674 switch (act) {
1675 case DVACT_DEACTIVATE:
1676 if_deactivate(&sc->sc_ethercom.ec_if);
1677 return 0;
1678 default:
1679 return EOPNOTSUPP;
1680 }
1681 }
1682
1683 int
1684 ex_detach(struct ex_softc *sc)
1685 {
1686 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1687 struct ex_rxdesc *rxd;
1688 int i, s;
1689
1690 /* Succeed now if there's no work to do. */
1691 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1692 return (0);
1693
1694 s = splnet();
1695 /* Stop the interface. Callouts are stopped in it. */
1696 ex_stop(ifp, 1);
1697 splx(s);
1698
1699 /* Destroy our callout. */
1700 callout_destroy(&sc->ex_mii_callout);
1701
1702 if (sc->ex_conf & EX_CONF_MII) {
1703 /* Detach all PHYs */
1704 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1705 }
1706
1707 /* Delete all remaining media. */
1708 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1709
1710 #if NRND > 0
1711 rnd_detach_source(&sc->rnd_source);
1712 #endif
1713 ether_ifdetach(ifp);
1714 if_detach(ifp);
1715
1716 for (i = 0; i < EX_NUPD; i++) {
1717 rxd = &sc->sc_rxdescs[i];
1718 if (rxd->rx_mbhead != NULL) {
1719 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1720 m_freem(rxd->rx_mbhead);
1721 rxd->rx_mbhead = NULL;
1722 }
1723 }
1724 for (i = 0; i < EX_NUPD; i++)
1725 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1726 for (i = 0; i < EX_NDPD; i++)
1727 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1728 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1729 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1730 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1731 EX_NDPD * sizeof (struct ex_dpd));
1732 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1733 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1734 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1735 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1736 EX_NUPD * sizeof (struct ex_upd));
1737 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1738
1739 pmf_device_deregister(sc->sc_dev);
1740
1741 return (0);
1742 }
1743
1744 /*
1745 * Before reboots, reset card completely.
1746 */
1747 static bool
1748 ex_shutdown(device_t self, int flags)
1749 {
1750 struct ex_softc *sc = device_private(self);
1751
1752 ex_stop(&sc->sc_ethercom.ec_if, 1);
1753 /*
1754 * Make sure the interface is powered up when we reboot,
1755 * otherwise firmware on some systems gets really confused.
1756 */
1757 (void) ex_enable(sc);
1758 return true;
1759 }
1760
1761 /*
1762 * Read EEPROM data.
1763 * XXX what to do if EEPROM doesn't unbusy?
1764 */
1765 uint16_t
1766 ex_read_eeprom(struct ex_softc *sc, int offset)
1767 {
1768 bus_space_tag_t iot = sc->sc_iot;
1769 bus_space_handle_t ioh = sc->sc_ioh;
1770 uint16_t data = 0, cmd = READ_EEPROM;
1771 int off;
1772
1773 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1774 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1775
1776 GO_WINDOW(0);
1777 if (ex_eeprom_busy(sc))
1778 goto out;
1779 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1780 cmd | (off + (offset & 0x3f)));
1781 if (ex_eeprom_busy(sc))
1782 goto out;
1783 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1784 out:
1785 return data;
1786 }
1787
1788 static int
1789 ex_eeprom_busy(struct ex_softc *sc)
1790 {
1791 bus_space_tag_t iot = sc->sc_iot;
1792 bus_space_handle_t ioh = sc->sc_ioh;
1793 int i = 100;
1794
1795 while (i--) {
1796 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1797 EEPROM_BUSY))
1798 return 0;
1799 delay(100);
1800 }
1801 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n");
1802 return (1);
1803 }
1804
1805 /*
1806 * Create a new rx buffer and add it to the 'soft' rx list.
1807 */
1808 static int
1809 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd)
1810 {
1811 struct mbuf *m, *oldm;
1812 bus_dmamap_t rxmap;
1813 int error, rval = 0;
1814
1815 oldm = rxd->rx_mbhead;
1816 rxmap = rxd->rx_dmamap;
1817
1818 MGETHDR(m, M_DONTWAIT, MT_DATA);
1819 if (m != NULL) {
1820 MCLGET(m, M_DONTWAIT);
1821 if ((m->m_flags & M_EXT) == 0) {
1822 m_freem(m);
1823 if (oldm == NULL)
1824 return 1;
1825 m = oldm;
1826 MRESETDATA(m);
1827 rval = 1;
1828 }
1829 } else {
1830 if (oldm == NULL)
1831 return 1;
1832 m = oldm;
1833 MRESETDATA(m);
1834 rval = 1;
1835 }
1836
1837 /*
1838 * Setup the DMA map for this receive buffer.
1839 */
1840 if (m != oldm) {
1841 if (oldm != NULL)
1842 bus_dmamap_unload(sc->sc_dmat, rxmap);
1843 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1844 m->m_ext.ext_buf, MCLBYTES, NULL,
1845 BUS_DMA_READ|BUS_DMA_NOWAIT);
1846 if (error) {
1847 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n",
1848 error);
1849 panic("ex_add_rxbuf"); /* XXX */
1850 }
1851 }
1852
1853 /*
1854 * Align for data after 14 byte header.
1855 */
1856 m->m_data += 2;
1857
1858 rxd->rx_mbhead = m;
1859 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1860 rxd->rx_upd->upd_frags[0].fr_addr =
1861 htole32(rxmap->dm_segs[0].ds_addr + 2);
1862 rxd->rx_upd->upd_nextptr = 0;
1863
1864 /*
1865 * Attach it to the end of the list.
1866 */
1867 if (sc->rx_head != NULL) {
1868 sc->rx_tail->rx_next = rxd;
1869 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1870 ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1871 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1872 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1873 sizeof (struct ex_upd),
1874 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1875 } else {
1876 sc->rx_head = rxd;
1877 }
1878 sc->rx_tail = rxd;
1879
1880 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1881 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1882 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1883 ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1884 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1885 return (rval);
1886 }
1887
1888 uint32_t
1889 ex_mii_bitbang_read(device_t self)
1890 {
1891 struct ex_softc *sc = device_private(self);
1892
1893 /* We're already in Window 4. */
1894 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1895 }
1896
1897 void
1898 ex_mii_bitbang_write(device_t self, uint32_t val)
1899 {
1900 struct ex_softc *sc = device_private(self);
1901
1902 /* We're already in Window 4. */
1903 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1904 }
1905
1906 int
1907 ex_mii_readreg(device_t v, int phy, int reg)
1908 {
1909 struct ex_softc *sc = device_private(v);
1910 int val;
1911
1912 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1913 return 0;
1914
1915 GO_WINDOW(4);
1916
1917 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1918
1919 GO_WINDOW(1);
1920
1921 return (val);
1922 }
1923
1924 void
1925 ex_mii_writereg(device_t v, int phy, int reg, int data)
1926 {
1927 struct ex_softc *sc = device_private(v);
1928
1929 GO_WINDOW(4);
1930
1931 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1932
1933 GO_WINDOW(1);
1934 }
1935
1936 void
1937 ex_mii_statchg(device_t v)
1938 {
1939 struct ex_softc *sc = device_private(v);
1940 bus_space_tag_t iot = sc->sc_iot;
1941 bus_space_handle_t ioh = sc->sc_ioh;
1942 int mctl;
1943
1944 GO_WINDOW(3);
1945 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1946 if (sc->ex_mii.mii_media_active & IFM_FDX)
1947 mctl |= MAC_CONTROL_FDX;
1948 else
1949 mctl &= ~MAC_CONTROL_FDX;
1950 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1951 GO_WINDOW(1); /* back to operating window */
1952 }
1953
1954 int
1955 ex_enable(struct ex_softc *sc)
1956 {
1957 if (sc->enabled == 0 && sc->enable != NULL) {
1958 if ((*sc->enable)(sc) != 0) {
1959 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1960 return (EIO);
1961 }
1962 sc->enabled = 1;
1963 }
1964 return (0);
1965 }
1966
1967 void
1968 ex_disable(struct ex_softc *sc)
1969 {
1970 if (sc->enabled == 1 && sc->disable != NULL) {
1971 (*sc->disable)(sc);
1972 sc->enabled = 0;
1973 }
1974 }
1975
1976