elinkxl.c revision 1.105 1 /* $NetBSD: elinkxl.c,v 1.105 2008/04/28 20:23:49 martin Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.105 2008/04/28 20:23:49 martin Exp $");
34
35 #include "bpfilter.h"
36 #include "rnd.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/callout.h>
41 #include <sys/kernel.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/syslog.h>
47 #include <sys/select.h>
48 #include <sys/device.h>
49 #if NRND > 0
50 #include <sys/rnd.h>
51 #endif
52
53 #include <uvm/uvm_extern.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_ether.h>
58 #include <net/if_media.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #include <net/bpfdesc.h>
63 #endif
64
65 #include <sys/cpu.h>
66 #include <sys/bus.h>
67 #include <sys/intr.h>
68 #include <machine/endian.h>
69
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/mii.h>
72 #include <dev/mii/mii_bitbang.h>
73
74 #include <dev/ic/elink3reg.h>
75 /* #include <dev/ic/elink3var.h> */
76 #include <dev/ic/elinkxlreg.h>
77 #include <dev/ic/elinkxlvar.h>
78
79 #ifdef DEBUG
80 int exdebug = 0;
81 #endif
82
83 /* ifmedia callbacks */
84 int ex_media_chg(struct ifnet *ifp);
85 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
86
87 void ex_probe_media(struct ex_softc *);
88 void ex_set_filter(struct ex_softc *);
89 void ex_set_media(struct ex_softc *);
90 void ex_set_xcvr(struct ex_softc *, uint16_t);
91 struct mbuf *ex_get(struct ex_softc *, int);
92 uint16_t ex_read_eeprom(struct ex_softc *, int);
93 int ex_init(struct ifnet *);
94 void ex_read(struct ex_softc *);
95 void ex_reset(struct ex_softc *);
96 void ex_set_mc(struct ex_softc *);
97 void ex_getstats(struct ex_softc *);
98 void ex_printstats(struct ex_softc *);
99 void ex_tick(void *);
100
101 static int ex_eeprom_busy(struct ex_softc *);
102 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
103 static void ex_init_txdescs(struct ex_softc *);
104
105 static void ex_setup_tx(struct ex_softc *);
106 static bool ex_shutdown(device_t, int);
107 static void ex_start(struct ifnet *);
108 static void ex_txstat(struct ex_softc *);
109
110 int ex_mii_readreg(device_t, int, int);
111 void ex_mii_writereg(device_t, int, int, int);
112 void ex_mii_statchg(device_t);
113
114 void ex_probemedia(struct ex_softc *);
115
116 /*
117 * Structure to map media-present bits in boards to ifmedia codes and
118 * printable media names. Used for table-driven ifmedia initialization.
119 */
120 struct ex_media {
121 int exm_mpbit; /* media present bit */
122 const char *exm_name; /* name of medium */
123 int exm_ifmedia; /* ifmedia word for medium */
124 int exm_epmedia; /* ELINKMEDIA_* constant */
125 };
126
127 /*
128 * Media table for 3c90x chips. Note that chips with MII have no
129 * `native' media.
130 */
131 struct ex_media ex_native_media[] = {
132 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
133 ELINKMEDIA_10BASE_T },
134 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
135 ELINKMEDIA_10BASE_T },
136 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
137 ELINKMEDIA_AUI },
138 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
139 ELINKMEDIA_10BASE_2 },
140 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
141 ELINKMEDIA_100BASE_TX },
142 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
143 ELINKMEDIA_100BASE_TX },
144 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
145 ELINKMEDIA_100BASE_FX },
146 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
147 ELINKMEDIA_MII },
148 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
149 ELINKMEDIA_100BASE_T4 },
150 { 0, NULL, 0,
151 0 },
152 };
153
154 /*
155 * MII bit-bang glue.
156 */
157 uint32_t ex_mii_bitbang_read(device_t);
158 void ex_mii_bitbang_write(device_t, uint32_t);
159
160 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
161 ex_mii_bitbang_read,
162 ex_mii_bitbang_write,
163 {
164 ELINK_PHY_DATA, /* MII_BIT_MDO */
165 ELINK_PHY_DATA, /* MII_BIT_MDI */
166 ELINK_PHY_CLK, /* MII_BIT_MDC */
167 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
168 0, /* MII_BIT_DIR_PHY_HOST */
169 }
170 };
171
172 /*
173 * Back-end attach and configure.
174 */
175 void
176 ex_config(struct ex_softc *sc)
177 {
178 struct ifnet *ifp;
179 uint16_t val;
180 uint8_t macaddr[ETHER_ADDR_LEN] = {0};
181 bus_space_tag_t iot = sc->sc_iot;
182 bus_space_handle_t ioh = sc->sc_ioh;
183 int i, error, attach_stage;
184
185 callout_init(&sc->ex_mii_callout, 0);
186
187 ex_reset(sc);
188
189 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
190 macaddr[0] = val >> 8;
191 macaddr[1] = val & 0xff;
192 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
193 macaddr[2] = val >> 8;
194 macaddr[3] = val & 0xff;
195 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
196 macaddr[4] = val >> 8;
197 macaddr[5] = val & 0xff;
198
199 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr));
200
201 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
202 GO_WINDOW(2);
203 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
204 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
205 val |= ELINK_RESET_OPT_LEDPOLAR;
206 if (sc->ex_conf & EX_CONF_PHY_POWER)
207 val |= ELINK_RESET_OPT_PHYPOWER;
208 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
209 }
210 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
211 GO_WINDOW(0);
212 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
213 EX_XCVR_PWR_MAGICBITS);
214 }
215
216 attach_stage = 0;
217
218 /*
219 * Allocate the upload descriptors, and create and load the DMA
220 * map for them.
221 */
222 if ((error = bus_dmamem_alloc(sc->sc_dmat,
223 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
224 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
225 aprint_error_dev(sc->sc_dev,
226 "can't allocate upload descriptors, error = %d\n", error);
227 goto fail;
228 }
229
230 attach_stage = 1;
231
232 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
233 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
234 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
235 aprint_error_dev(sc->sc_dev,
236 "can't map upload descriptors, error = %d\n", error);
237 goto fail;
238 }
239
240 attach_stage = 2;
241
242 if ((error = bus_dmamap_create(sc->sc_dmat,
243 EX_NUPD * sizeof (struct ex_upd), 1,
244 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
245 &sc->sc_upd_dmamap)) != 0) {
246 aprint_error_dev(sc->sc_dev,
247 "can't create upload desc. DMA map, error = %d\n", error);
248 goto fail;
249 }
250
251 attach_stage = 3;
252
253 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
254 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
255 BUS_DMA_NOWAIT)) != 0) {
256 aprint_error_dev(sc->sc_dev,
257 "can't load upload desc. DMA map, error = %d\n", error);
258 goto fail;
259 }
260
261 attach_stage = 4;
262
263 /*
264 * Allocate the download descriptors, and create and load the DMA
265 * map for them.
266 */
267 if ((error = bus_dmamem_alloc(sc->sc_dmat,
268 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
269 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
270 aprint_error_dev(sc->sc_dev,
271 "can't allocate download descriptors, error = %d\n", error);
272 goto fail;
273 }
274
275 attach_stage = 5;
276
277 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
278 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
279 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
280 aprint_error_dev(sc->sc_dev,
281 "can't map download descriptors, error = %d\n", error);
282 goto fail;
283 }
284 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
285
286 attach_stage = 6;
287
288 if ((error = bus_dmamap_create(sc->sc_dmat,
289 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
290 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
291 &sc->sc_dpd_dmamap)) != 0) {
292 aprint_error_dev(sc->sc_dev,
293 "can't create download desc. DMA map, error = %d\n", error);
294 goto fail;
295 }
296
297 attach_stage = 7;
298
299 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
300 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
301 BUS_DMA_NOWAIT)) != 0) {
302 aprint_error_dev(sc->sc_dev,
303 "can't load download desc. DMA map, error = %d\n", error);
304 goto fail;
305 }
306 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
307 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
308
309 attach_stage = 8;
310
311
312 /*
313 * Create the transmit buffer DMA maps.
314 */
315 for (i = 0; i < EX_NDPD; i++) {
316 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
317 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
318 &sc->sc_tx_dmamaps[i])) != 0) {
319 aprint_error_dev(sc->sc_dev,
320 "can't create tx DMA map %d, error = %d\n",
321 i, error);
322 goto fail;
323 }
324 }
325
326 attach_stage = 9;
327
328 /*
329 * Create the receive buffer DMA maps.
330 */
331 for (i = 0; i < EX_NUPD; i++) {
332 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
333 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
334 &sc->sc_rx_dmamaps[i])) != 0) {
335 aprint_error_dev(sc->sc_dev,
336 "can't create rx DMA map %d, error = %d\n",
337 i, error);
338 goto fail;
339 }
340 }
341
342 attach_stage = 10;
343
344 /*
345 * Create ring of upload descriptors, only once. The DMA engine
346 * will loop over this when receiving packets, stalling if it
347 * hits an UPD with a finished receive.
348 */
349 for (i = 0; i < EX_NUPD; i++) {
350 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
351 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
352 sc->sc_upd[i].upd_frags[0].fr_len =
353 htole32((MCLBYTES - 2) | EX_FR_LAST);
354 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
355 aprint_error_dev(sc->sc_dev,
356 "can't allocate or map rx buffers\n");
357 goto fail;
358 }
359 }
360
361 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
362 EX_NUPD * sizeof (struct ex_upd),
363 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
364
365 ex_init_txdescs(sc);
366
367 attach_stage = 11;
368
369
370 GO_WINDOW(3);
371 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
372 if (val & ELINK_MEDIACAP_MII)
373 sc->ex_conf |= EX_CONF_MII;
374
375 ifp = &sc->sc_ethercom.ec_if;
376
377 /*
378 * Initialize our media structures and MII info. We'll
379 * probe the MII if we discover that we have one.
380 */
381 sc->ex_mii.mii_ifp = ifp;
382 sc->ex_mii.mii_readreg = ex_mii_readreg;
383 sc->ex_mii.mii_writereg = ex_mii_writereg;
384 sc->ex_mii.mii_statchg = ex_mii_statchg;
385 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
386 ex_media_stat);
387
388 if (sc->ex_conf & EX_CONF_MII) {
389 /*
390 * Find PHY, extract media information from it.
391 * First, select the right transceiver.
392 */
393 ex_set_xcvr(sc, val);
394
395 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff,
396 MII_PHY_ANY, MII_OFFSET_ANY, 0);
397 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
398 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
399 0, NULL);
400 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
401 } else {
402 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
403 }
404 } else
405 ex_probemedia(sc);
406
407 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
408 ifp->if_softc = sc;
409 ifp->if_start = ex_start;
410 ifp->if_ioctl = ex_ioctl;
411 ifp->if_watchdog = ex_watchdog;
412 ifp->if_init = ex_init;
413 ifp->if_stop = ex_stop;
414 ifp->if_flags =
415 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
416 sc->sc_if_flags = ifp->if_flags;
417 IFQ_SET_READY(&ifp->if_snd);
418
419 /*
420 * We can support 802.1Q VLAN-sized frames.
421 */
422 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
423
424 /*
425 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
426 */
427 if (sc->ex_conf & EX_CONF_90XB)
428 sc->sc_ethercom.ec_if.if_capabilities |=
429 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
430 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
431 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
432
433 if_attach(ifp);
434 ether_ifattach(ifp, macaddr);
435
436 GO_WINDOW(1);
437
438 sc->tx_start_thresh = 20;
439 sc->tx_succ_ok = 0;
440
441 /* TODO: set queues to 0 */
442
443 #if NRND > 0
444 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
445 RND_TYPE_NET, 0);
446 #endif
447
448 if (!pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown))
449 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
450 else
451 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
452
453 /* The attach is successful. */
454 sc->ex_flags |= EX_FLAGS_ATTACHED;
455 return;
456
457 fail:
458 /*
459 * Free any resources we've allocated during the failed attach
460 * attempt. Do this in reverse order and fall though.
461 */
462 switch (attach_stage) {
463 case 11:
464 {
465 struct ex_rxdesc *rxd;
466
467 for (i = 0; i < EX_NUPD; i++) {
468 rxd = &sc->sc_rxdescs[i];
469 if (rxd->rx_mbhead != NULL) {
470 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
471 m_freem(rxd->rx_mbhead);
472 }
473 }
474 }
475 /* FALLTHROUGH */
476
477 case 10:
478 for (i = 0; i < EX_NUPD; i++)
479 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
480 /* FALLTHROUGH */
481
482 case 9:
483 for (i = 0; i < EX_NDPD; i++)
484 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
485 /* FALLTHROUGH */
486 case 8:
487 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
488 /* FALLTHROUGH */
489
490 case 7:
491 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
492 /* FALLTHROUGH */
493
494 case 6:
495 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
496 EX_NDPD * sizeof (struct ex_dpd));
497 /* FALLTHROUGH */
498
499 case 5:
500 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
501 break;
502
503 case 4:
504 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
505 /* FALLTHROUGH */
506
507 case 3:
508 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
509 /* FALLTHROUGH */
510
511 case 2:
512 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
513 EX_NUPD * sizeof (struct ex_upd));
514 /* FALLTHROUGH */
515
516 case 1:
517 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
518 break;
519 }
520
521 }
522
523 /*
524 * Find the media present on non-MII chips.
525 */
526 void
527 ex_probemedia(struct ex_softc *sc)
528 {
529 bus_space_tag_t iot = sc->sc_iot;
530 bus_space_handle_t ioh = sc->sc_ioh;
531 struct ifmedia *ifm = &sc->ex_mii.mii_media;
532 struct ex_media *exm;
533 uint16_t config1, reset_options, default_media;
534 int defmedia = 0;
535 const char *sep = "", *defmedianame = NULL;
536
537 GO_WINDOW(3);
538 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
539 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
540 GO_WINDOW(0);
541
542 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
543
544 /* Sanity check that there are any media! */
545 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
546 aprint_error_dev(sc->sc_dev, "no media present!\n");
547 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
548 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
549 return;
550 }
551
552 aprint_normal_dev(sc->sc_dev, "");
553
554 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
555
556 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
557 if (reset_options & exm->exm_mpbit) {
558 /*
559 * Default media is a little complicated. We
560 * support full-duplex which uses the same
561 * reset options bit.
562 *
563 * XXX Check EEPROM for default to FDX?
564 */
565 if (exm->exm_epmedia == default_media) {
566 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
567 defmedia = exm->exm_ifmedia;
568 defmedianame = exm->exm_name;
569 }
570 } else if (defmedia == 0) {
571 defmedia = exm->exm_ifmedia;
572 defmedianame = exm->exm_name;
573 }
574 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
575 NULL);
576 PRINT(exm->exm_name);
577 }
578 }
579
580 #undef PRINT
581
582 #ifdef DIAGNOSTIC
583 if (defmedia == 0)
584 panic("ex_probemedia: impossible");
585 #endif
586
587 aprint_normal(", default %s\n", defmedianame);
588 ifmedia_set(ifm, defmedia);
589 }
590
591 /*
592 * Setup transmitter parameters.
593 */
594 static void
595 ex_setup_tx(struct ex_softc *sc)
596 {
597 bus_space_tag_t iot = sc->sc_iot;
598 bus_space_handle_t ioh = sc->sc_ioh;
599
600 /*
601 * Disable reclaim threshold for 90xB, set free threshold to
602 * 6 * 256 = 1536 for 90x.
603 */
604 if (sc->ex_conf & EX_CONF_90XB)
605 bus_space_write_2(iot, ioh, ELINK_COMMAND,
606 ELINK_TXRECLTHRESH | 255);
607 else
608 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
609
610 /* Setup early transmission start threshold. */
611 bus_space_write_2(iot, ioh, ELINK_COMMAND,
612 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
613 }
614
615 /*
616 * Bring device up.
617 */
618 int
619 ex_init(struct ifnet *ifp)
620 {
621 struct ex_softc *sc = ifp->if_softc;
622 bus_space_tag_t iot = sc->sc_iot;
623 bus_space_handle_t ioh = sc->sc_ioh;
624 int i;
625 uint16_t val;
626 int error = 0;
627
628 if ((error = ex_enable(sc)) != 0)
629 goto out;
630
631 ex_waitcmd(sc);
632 ex_stop(ifp, 0);
633
634 GO_WINDOW(2);
635
636 /* Turn on PHY power. */
637 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
638 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
639 if (sc->ex_conf & EX_CONF_PHY_POWER)
640 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
641 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
642 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
643 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
644 }
645
646 /*
647 * Set the station address and clear the station mask. The latter
648 * is needed for 90x cards, 0 is the default for 90xB cards.
649 */
650 for (i = 0; i < ETHER_ADDR_LEN; i++) {
651 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
652 CLLADDR(ifp->if_sadl)[i]);
653 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
654 }
655
656 GO_WINDOW(3);
657
658 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
659 ex_waitcmd(sc);
660 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
661 ex_waitcmd(sc);
662
663 /* Load Tx parameters. */
664 ex_setup_tx(sc);
665
666 bus_space_write_2(iot, ioh, ELINK_COMMAND,
667 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
668
669 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
670 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
671
672 bus_space_write_2(iot, ioh, ELINK_COMMAND,
673 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
674 bus_space_write_2(iot, ioh, ELINK_COMMAND,
675 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
676
677 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
678 if (sc->intr_ack)
679 (* sc->intr_ack)(sc);
680 ex_set_media(sc);
681 ex_set_mc(sc);
682
683
684 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
685 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
686 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
687 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
688 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
689
690 ifp->if_flags |= IFF_RUNNING;
691 ifp->if_flags &= ~IFF_OACTIVE;
692 ex_start(ifp);
693 sc->sc_if_flags = ifp->if_flags;
694
695 GO_WINDOW(1);
696
697 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
698
699 out:
700 if (error) {
701 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
702 ifp->if_timer = 0;
703 aprint_error_dev(sc->sc_dev, "interface not running\n");
704 }
705 return (error);
706 }
707
708 #define MCHASHSIZE 256
709 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
710 (MCHASHSIZE - 1))
711
712 /*
713 * Set multicast receive filter. Also take care of promiscuous mode
714 * here (XXX).
715 */
716 void
717 ex_set_mc(struct ex_softc *sc)
718 {
719 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
720 struct ethercom *ec = &sc->sc_ethercom;
721 struct ether_multi *enm;
722 struct ether_multistep estep;
723 int i;
724 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
725
726 if (ifp->if_flags & IFF_PROMISC) {
727 mask |= FIL_PROMISC;
728 goto allmulti;
729 }
730
731 ETHER_FIRST_MULTI(estep, ec, enm);
732 if (enm == NULL)
733 goto nomulti;
734
735 if ((sc->ex_conf & EX_CONF_90XB) == 0)
736 /* No multicast hash filtering. */
737 goto allmulti;
738
739 for (i = 0; i < MCHASHSIZE; i++)
740 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
741 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
742
743 do {
744 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
745 ETHER_ADDR_LEN) != 0)
746 goto allmulti;
747
748 i = ex_mchash(enm->enm_addrlo);
749 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
750 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
751 ETHER_NEXT_MULTI(estep, enm);
752 } while (enm != NULL);
753 mask |= FIL_MULTIHASH;
754
755 nomulti:
756 ifp->if_flags &= ~IFF_ALLMULTI;
757 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
758 SET_RX_FILTER | mask);
759 return;
760
761 allmulti:
762 ifp->if_flags |= IFF_ALLMULTI;
763 mask |= FIL_MULTICAST;
764 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
765 SET_RX_FILTER | mask);
766 }
767
768
769 /*
770 * The Tx Complete interrupts occur only on errors,
771 * and this is the error handler.
772 */
773 static void
774 ex_txstat(struct ex_softc *sc)
775 {
776 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
777 bus_space_tag_t iot = sc->sc_iot;
778 bus_space_handle_t ioh = sc->sc_ioh;
779 int i, err = 0;
780
781 /*
782 * We need to read+write TX_STATUS until we get a 0 status
783 * in order to turn off the interrupt flag.
784 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
785 */
786 for (;;) {
787 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
788 if ((i & TXS_COMPLETE) == 0)
789 break;
790 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
791 err |= i;
792 }
793 err &= ~TXS_TIMER;
794
795 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
796 || err == 0 /* should not happen, just in case */) {
797 /*
798 * Make sure the transmission is stopped.
799 */
800 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
801 for (i = 1000; i > 0; i--)
802 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
803 ELINK_DMAC_DNINPROG) == 0)
804 break;
805
806 /*
807 * Reset the transmitter.
808 */
809 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
810
811 /* Resetting takes a while and we will do more than wait. */
812
813 ifp->if_flags &= ~IFF_OACTIVE;
814 ++sc->sc_ethercom.ec_if.if_oerrors;
815 aprint_error_dev(sc->sc_dev, "%s%s%s",
816 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
817 (err & TXS_JABBER) ? " jabber" : "",
818 (err & TXS_RECLAIM) ? " reclaim" : "");
819 if (err == 0)
820 aprint_error(" unknown Tx error");
821 printf(" (%x)", err);
822 if (err & TXS_UNDERRUN) {
823 aprint_error(" @%d", sc->tx_start_thresh);
824 if (sc->tx_succ_ok < 256 &&
825 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
826 > sc->tx_start_thresh) {
827 aprint_error(", new threshold is %d", i);
828 sc->tx_start_thresh = i;
829 }
830 sc->tx_succ_ok = 0;
831 }
832 aprint_error("\n");
833 if (err & TXS_MAX_COLLISION)
834 ++sc->sc_ethercom.ec_if.if_collisions;
835
836 /* Wait for TX_RESET to finish. */
837 ex_waitcmd(sc);
838
839 /* Reload Tx parameters. */
840 ex_setup_tx(sc);
841 } else {
842 if (err & TXS_MAX_COLLISION)
843 ++sc->sc_ethercom.ec_if.if_collisions;
844 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
845 }
846
847 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
848
849 /* Retransmit current packet if any. */
850 if (sc->tx_head) {
851 ifp->if_flags |= IFF_OACTIVE;
852 bus_space_write_2(iot, ioh, ELINK_COMMAND,
853 ELINK_DNUNSTALL);
854 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
855 DPD_DMADDR(sc, sc->tx_head));
856
857 /* Retrigger watchdog if stopped. */
858 if (ifp->if_timer == 0)
859 ifp->if_timer = 1;
860 }
861 }
862
863 int
864 ex_media_chg(struct ifnet *ifp)
865 {
866
867 if (ifp->if_flags & IFF_UP)
868 ex_init(ifp);
869 return 0;
870 }
871
872 void
873 ex_set_xcvr(struct ex_softc *sc, const uint16_t media)
874 {
875 bus_space_tag_t iot = sc->sc_iot;
876 bus_space_handle_t ioh = sc->sc_ioh;
877 uint32_t icfg;
878
879 /*
880 * We're already in Window 3
881 */
882 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
883 icfg &= ~(CONFIG_XCVR_SEL << 16);
884 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
885 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
886 if (media & ELINK_MEDIACAP_100BASETX)
887 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
888 if (media & ELINK_MEDIACAP_100BASEFX)
889 icfg |= ELINKMEDIA_100BASE_FX
890 << (CONFIG_XCVR_SEL_SHIFT + 16);
891 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
892 }
893
894 void
895 ex_set_media(struct ex_softc *sc)
896 {
897 bus_space_tag_t iot = sc->sc_iot;
898 bus_space_handle_t ioh = sc->sc_ioh;
899 uint32_t configreg;
900
901 if (((sc->ex_conf & EX_CONF_MII) &&
902 (sc->ex_mii.mii_media_active & IFM_FDX))
903 || (!(sc->ex_conf & EX_CONF_MII) &&
904 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
905 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
906 MAC_CONTROL_FDX);
907 } else {
908 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
909 }
910
911 /*
912 * If the device has MII, select it, and then tell the
913 * PHY which media to use.
914 */
915 if (sc->ex_conf & EX_CONF_MII) {
916 uint16_t val;
917
918 GO_WINDOW(3);
919 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
920 ex_set_xcvr(sc, val);
921 mii_mediachg(&sc->ex_mii);
922 return;
923 }
924
925 GO_WINDOW(4);
926 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
927 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
928 delay(800);
929
930 /*
931 * Now turn on the selected media/transceiver.
932 */
933 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
934 case IFM_10_T:
935 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
936 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
937 break;
938
939 case IFM_10_2:
940 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
941 DELAY(800);
942 break;
943
944 case IFM_100_TX:
945 case IFM_100_FX:
946 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
947 LINKBEAT_ENABLE);
948 DELAY(800);
949 break;
950
951 case IFM_10_5:
952 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
953 SQE_ENABLE);
954 DELAY(800);
955 break;
956
957 case IFM_MANUAL:
958 break;
959
960 case IFM_NONE:
961 return;
962
963 default:
964 panic("ex_set_media: impossible");
965 }
966
967 GO_WINDOW(3);
968 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
969
970 configreg &= ~(CONFIG_MEDIAMASK << 16);
971 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
972 (CONFIG_MEDIAMASK_SHIFT + 16));
973
974 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
975 }
976
977 /*
978 * Get currently-selected media from card.
979 * (if_media callback, may be called before interface is brought up).
980 */
981 void
982 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req)
983 {
984 struct ex_softc *sc = ifp->if_softc;
985 uint16_t help;
986
987 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
988 if (sc->ex_conf & EX_CONF_MII) {
989 mii_pollstat(&sc->ex_mii);
990 req->ifm_status = sc->ex_mii.mii_media_status;
991 req->ifm_active = sc->ex_mii.mii_media_active;
992 } else {
993 GO_WINDOW(4);
994 req->ifm_status = IFM_AVALID;
995 req->ifm_active =
996 sc->ex_mii.mii_media.ifm_cur->ifm_media;
997 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
998 ELINK_W4_MEDIA_TYPE);
999 if (help & LINKBEAT_DETECT)
1000 req->ifm_status |= IFM_ACTIVE;
1001 GO_WINDOW(1);
1002 }
1003 }
1004 }
1005
1006
1007
1008 /*
1009 * Start outputting on the interface.
1010 */
1011 static void
1012 ex_start(struct ifnet *ifp)
1013 {
1014 struct ex_softc *sc = ifp->if_softc;
1015 bus_space_tag_t iot = sc->sc_iot;
1016 bus_space_handle_t ioh = sc->sc_ioh;
1017 volatile struct ex_fraghdr *fr = NULL;
1018 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1019 struct ex_txdesc *txp;
1020 struct mbuf *mb_head;
1021 bus_dmamap_t dmamap;
1022 int m_csumflags, offset, seglen, totlen, segment, error;
1023 uint32_t csum_flags;
1024
1025 if (sc->tx_head || sc->tx_free == NULL)
1026 return;
1027
1028 txp = NULL;
1029
1030 /*
1031 * We're finished if there is nothing more to add to the list or if
1032 * we're all filled up with buffers to transmit.
1033 */
1034 while (sc->tx_free != NULL) {
1035 /*
1036 * Grab a packet to transmit.
1037 */
1038 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1039 if (mb_head == NULL)
1040 break;
1041
1042 /*
1043 * mb_head might be updated later,
1044 * so preserve csum_flags here.
1045 */
1046 m_csumflags = mb_head->m_pkthdr.csum_flags;
1047
1048 /*
1049 * Get pointer to next available tx desc.
1050 */
1051 txp = sc->tx_free;
1052 dmamap = txp->tx_dmamap;
1053
1054 /*
1055 * Go through each of the mbufs in the chain and initialize
1056 * the transmit buffer descriptors with the physical address
1057 * and size of the mbuf.
1058 */
1059 reload:
1060 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1061 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1062 switch (error) {
1063 case 0:
1064 /* Success. */
1065 break;
1066
1067 case EFBIG:
1068 {
1069 struct mbuf *mn;
1070
1071 /*
1072 * We ran out of segments. We have to recopy this
1073 * mbuf chain first. Bail out if we can't get the
1074 * new buffers.
1075 */
1076 aprint_error_dev(sc->sc_dev, "too many segments, ");
1077
1078 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1079 if (mn == NULL) {
1080 m_freem(mb_head);
1081 aprint_error("aborting\n");
1082 goto out;
1083 }
1084 if (mb_head->m_pkthdr.len > MHLEN) {
1085 MCLGET(mn, M_DONTWAIT);
1086 if ((mn->m_flags & M_EXT) == 0) {
1087 m_freem(mn);
1088 m_freem(mb_head);
1089 aprint_error("aborting\n");
1090 goto out;
1091 }
1092 }
1093 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1094 mtod(mn, void *));
1095 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1096 m_freem(mb_head);
1097 mb_head = mn;
1098 aprint_error("retrying\n");
1099 goto reload;
1100 }
1101
1102 default:
1103 /*
1104 * Some other problem; report it.
1105 */
1106 aprint_error_dev(sc->sc_dev,
1107 "can't load mbuf chain, error = %d\n", error);
1108 m_freem(mb_head);
1109 goto out;
1110 }
1111
1112 /*
1113 * remove our tx desc from freelist.
1114 */
1115 sc->tx_free = txp->tx_next;
1116 txp->tx_next = NULL;
1117
1118 fr = &txp->tx_dpd->dpd_frags[0];
1119 totlen = 0;
1120 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1121 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1122 seglen = dmamap->dm_segs[segment].ds_len;
1123 fr->fr_len = htole32(seglen);
1124 totlen += seglen;
1125 }
1126 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1127 (m_csumflags & M_CSUM_IPv4) != 0)) {
1128 /*
1129 * Pad short packets to avoid ip4csum-tx bug.
1130 *
1131 * XXX Should we still consider if such short
1132 * (36 bytes or less) packets might already
1133 * occupy EX_NTFRAG (== 32) fragements here?
1134 */
1135 KASSERT(segment < EX_NTFRAGS);
1136 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1137 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1138 fr->fr_len = htole32(EX_FR_LAST | seglen);
1139 totlen += seglen;
1140 } else {
1141 fr--;
1142 fr->fr_len |= htole32(EX_FR_LAST);
1143 }
1144 txp->tx_mbhead = mb_head;
1145
1146 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1147 BUS_DMASYNC_PREWRITE);
1148
1149 dpd = txp->tx_dpd;
1150 dpd->dpd_nextptr = 0;
1151 dpd->dpd_fsh = htole32(totlen);
1152
1153 /* Byte-swap constants so compiler can optimize. */
1154
1155 if (sc->ex_conf & EX_CONF_90XB) {
1156 csum_flags = 0;
1157
1158 if (m_csumflags & M_CSUM_IPv4)
1159 csum_flags |= htole32(EX_DPD_IPCKSUM);
1160
1161 if (m_csumflags & M_CSUM_TCPv4)
1162 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1163 else if (m_csumflags & M_CSUM_UDPv4)
1164 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1165
1166 dpd->dpd_fsh |= csum_flags;
1167 } else {
1168 KDASSERT((mb_head->m_pkthdr.csum_flags &
1169 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1170 }
1171
1172 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1173 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1174 sizeof (struct ex_dpd),
1175 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1176
1177 /*
1178 * No need to stall the download engine, we know it's
1179 * not busy right now.
1180 *
1181 * Fix up pointers in both the "soft" tx and the physical
1182 * tx list.
1183 */
1184 if (sc->tx_head != NULL) {
1185 prevdpd = sc->tx_tail->tx_dpd;
1186 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1187 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1188 offset, sizeof (struct ex_dpd),
1189 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1190 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1191 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1192 offset, sizeof (struct ex_dpd),
1193 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1194 sc->tx_tail->tx_next = txp;
1195 sc->tx_tail = txp;
1196 } else {
1197 sc->tx_tail = sc->tx_head = txp;
1198 }
1199
1200 #if NBPFILTER > 0
1201 /*
1202 * Pass packet to bpf if there is a listener.
1203 */
1204 if (ifp->if_bpf)
1205 bpf_mtap(ifp->if_bpf, mb_head);
1206 #endif
1207 }
1208 out:
1209 if (sc->tx_head) {
1210 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1211 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1212 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1213 sizeof (struct ex_dpd),
1214 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1215 ifp->if_flags |= IFF_OACTIVE;
1216 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1217 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1218 DPD_DMADDR(sc, sc->tx_head));
1219
1220 /* trigger watchdog */
1221 ifp->if_timer = 5;
1222 }
1223 }
1224
1225
1226 int
1227 ex_intr(void *arg)
1228 {
1229 struct ex_softc *sc = arg;
1230 bus_space_tag_t iot = sc->sc_iot;
1231 bus_space_handle_t ioh = sc->sc_ioh;
1232 uint16_t stat;
1233 int ret = 0;
1234 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1235
1236 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1237 !device_is_active(sc->sc_dev))
1238 return (0);
1239
1240 for (;;) {
1241 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1242
1243 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1244 if ((stat & INTR_LATCH) == 0) {
1245 #if 0
1246 aprint_error_dev(sc->sc_dev,
1247 "intr latch cleared\n");
1248 #endif
1249 break;
1250 }
1251 }
1252
1253 ret = 1;
1254
1255 /*
1256 * Acknowledge interrupts.
1257 */
1258 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1259 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1260 if (sc->intr_ack)
1261 (*sc->intr_ack)(sc);
1262
1263 if (stat & HOST_ERROR) {
1264 aprint_error_dev(sc->sc_dev,
1265 "adapter failure (%x)\n", stat);
1266 ex_reset(sc);
1267 ex_init(ifp);
1268 return 1;
1269 }
1270 if (stat & UPD_STATS) {
1271 ex_getstats(sc);
1272 }
1273 if (stat & TX_COMPLETE) {
1274 ex_txstat(sc);
1275 #if 0
1276 if (stat & DN_COMPLETE)
1277 aprint_error_dev(sc->sc_dev,
1278 "Ignoring Dn interrupt (%x)\n", stat);
1279 #endif
1280 /*
1281 * In some rare cases, both Tx Complete and
1282 * Dn Complete bits are set. However, the packet
1283 * has been reloaded in ex_txstat() and should not
1284 * handle the Dn Complete event here.
1285 * Hence the "else" below.
1286 */
1287 } else if (stat & DN_COMPLETE) {
1288 struct ex_txdesc *txp, *ptxp = NULL;
1289 bus_dmamap_t txmap;
1290
1291 /* reset watchdog timer, was set in ex_start() */
1292 ifp->if_timer = 0;
1293
1294 for (txp = sc->tx_head; txp != NULL;
1295 txp = txp->tx_next) {
1296 bus_dmamap_sync(sc->sc_dmat,
1297 sc->sc_dpd_dmamap,
1298 (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1299 sizeof (struct ex_dpd),
1300 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1301 if (txp->tx_mbhead != NULL) {
1302 txmap = txp->tx_dmamap;
1303 bus_dmamap_sync(sc->sc_dmat, txmap,
1304 0, txmap->dm_mapsize,
1305 BUS_DMASYNC_POSTWRITE);
1306 bus_dmamap_unload(sc->sc_dmat, txmap);
1307 m_freem(txp->tx_mbhead);
1308 txp->tx_mbhead = NULL;
1309 }
1310 ptxp = txp;
1311 }
1312
1313 /*
1314 * Move finished tx buffers back to the tx free list.
1315 */
1316 if (sc->tx_free) {
1317 sc->tx_ftail->tx_next = sc->tx_head;
1318 sc->tx_ftail = ptxp;
1319 } else
1320 sc->tx_ftail = sc->tx_free = sc->tx_head;
1321
1322 sc->tx_head = sc->tx_tail = NULL;
1323 ifp->if_flags &= ~IFF_OACTIVE;
1324
1325 if (sc->tx_succ_ok < 256)
1326 sc->tx_succ_ok++;
1327 }
1328
1329 if (stat & UP_COMPLETE) {
1330 struct ex_rxdesc *rxd;
1331 struct mbuf *m;
1332 struct ex_upd *upd;
1333 bus_dmamap_t rxmap;
1334 uint32_t pktstat;
1335
1336 rcvloop:
1337 rxd = sc->rx_head;
1338 rxmap = rxd->rx_dmamap;
1339 m = rxd->rx_mbhead;
1340 upd = rxd->rx_upd;
1341
1342 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1343 rxmap->dm_mapsize,
1344 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1345 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1346 ((char *)upd - (char *)sc->sc_upd),
1347 sizeof (struct ex_upd),
1348 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1349 pktstat = le32toh(upd->upd_pktstatus);
1350
1351 if (pktstat & EX_UPD_COMPLETE) {
1352 /*
1353 * Remove first packet from the chain.
1354 */
1355 sc->rx_head = rxd->rx_next;
1356 rxd->rx_next = NULL;
1357
1358 /*
1359 * Add a new buffer to the receive chain.
1360 * If this fails, the old buffer is recycled
1361 * instead.
1362 */
1363 if (ex_add_rxbuf(sc, rxd) == 0) {
1364 uint16_t total_len;
1365
1366 if (pktstat &
1367 ((sc->sc_ethercom.ec_capenable &
1368 ETHERCAP_VLAN_MTU) ?
1369 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1370 ifp->if_ierrors++;
1371 m_freem(m);
1372 goto rcvloop;
1373 }
1374
1375 total_len = pktstat & EX_UPD_PKTLENMASK;
1376 if (total_len <
1377 sizeof(struct ether_header)) {
1378 m_freem(m);
1379 goto rcvloop;
1380 }
1381 m->m_pkthdr.rcvif = ifp;
1382 m->m_pkthdr.len = m->m_len = total_len;
1383 #if NBPFILTER > 0
1384 if (ifp->if_bpf)
1385 bpf_mtap(ifp->if_bpf, m);
1386 #endif
1387 /*
1388 * Set the incoming checksum information for the packet.
1389 */
1390 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1391 (pktstat & EX_UPD_IPCHECKED) != 0) {
1392 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1393 if (pktstat & EX_UPD_IPCKSUMERR)
1394 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1395 if (pktstat & EX_UPD_TCPCHECKED) {
1396 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1397 if (pktstat & EX_UPD_TCPCKSUMERR)
1398 m->m_pkthdr.csum_flags |=
1399 M_CSUM_TCP_UDP_BAD;
1400 } else if (pktstat & EX_UPD_UDPCHECKED) {
1401 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1402 if (pktstat & EX_UPD_UDPCKSUMERR)
1403 m->m_pkthdr.csum_flags |=
1404 M_CSUM_TCP_UDP_BAD;
1405 }
1406 }
1407 (*ifp->if_input)(ifp, m);
1408 }
1409 goto rcvloop;
1410 }
1411 /*
1412 * Just in case we filled up all UPDs and the DMA engine
1413 * stalled. We could be more subtle about this.
1414 */
1415 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1416 aprint_error_dev(sc->sc_dev,
1417 "uplistptr was 0\n");
1418 ex_init(ifp);
1419 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1420 & 0x2000) {
1421 aprint_error_dev(sc->sc_dev,
1422 "receive stalled\n");
1423 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1424 ELINK_UPUNSTALL);
1425 }
1426 }
1427
1428 #if NRND > 0
1429 if (stat)
1430 rnd_add_uint32(&sc->rnd_source, stat);
1431 #endif
1432 }
1433
1434 /* no more interrupts */
1435 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1436 ex_start(ifp);
1437 return ret;
1438 }
1439
1440 int
1441 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1442 {
1443 struct ex_softc *sc = ifp->if_softc;
1444 struct ifreq *ifr = (struct ifreq *)data;
1445 int s, error;
1446
1447 s = splnet();
1448
1449 switch (cmd) {
1450 case SIOCSIFMEDIA:
1451 case SIOCGIFMEDIA:
1452 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1453 break;
1454 case SIOCSIFFLAGS:
1455 /* If the interface is up and running, only modify the receive
1456 * filter when setting promiscuous or debug mode. Otherwise
1457 * fall through to ether_ioctl, which will reset the chip.
1458 */
1459 #define RESETIGN (IFF_CANTCHANGE|IFF_DEBUG)
1460 if (((ifp->if_flags & (IFF_UP|IFF_RUNNING))
1461 == (IFF_UP|IFF_RUNNING))
1462 && ((ifp->if_flags & (~RESETIGN))
1463 == (sc->sc_if_flags & (~RESETIGN)))) {
1464 ex_set_mc(sc);
1465 error = 0;
1466 break;
1467 #undef RESETIGN
1468 }
1469 /* FALLTHROUGH */
1470 default:
1471 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1472 break;
1473
1474 error = 0;
1475
1476 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1477 ;
1478 else if (ifp->if_flags & IFF_RUNNING) {
1479 /*
1480 * Multicast list has changed; set the hardware filter
1481 * accordingly.
1482 */
1483 ex_set_mc(sc);
1484 }
1485 break;
1486 }
1487
1488 sc->sc_if_flags = ifp->if_flags;
1489 splx(s);
1490 return (error);
1491 }
1492
1493 void
1494 ex_getstats(struct ex_softc *sc)
1495 {
1496 bus_space_handle_t ioh = sc->sc_ioh;
1497 bus_space_tag_t iot = sc->sc_iot;
1498 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1499 uint8_t upperok;
1500
1501 GO_WINDOW(6);
1502 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1503 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1504 ifp->if_ipackets += (upperok & 0x03) << 8;
1505 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1506 ifp->if_opackets += (upperok & 0x30) << 4;
1507 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1508 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1509 /*
1510 * There seems to be no way to get the exact number of collisions,
1511 * this is the number that occurred at the very least.
1512 */
1513 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1514 TX_AFTER_X_COLLISIONS);
1515 /*
1516 * Interface byte counts are counted by ether_input() and
1517 * ether_output(), so don't accumulate them here. Just
1518 * read the NIC counters so they don't generate overflow interrupts.
1519 * Upper byte counters are latched from reading the totals, so
1520 * they don't need to be read if we don't need their values.
1521 */
1522 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1523 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1524
1525 /*
1526 * Clear the following to avoid stats overflow interrupts
1527 */
1528 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1529 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1530 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1531 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1532 GO_WINDOW(4);
1533 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1534 GO_WINDOW(1);
1535 }
1536
1537 void
1538 ex_printstats(struct ex_softc *sc)
1539 {
1540 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1541
1542 ex_getstats(sc);
1543 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1544 "%llu\n", (unsigned long long)ifp->if_ipackets,
1545 (unsigned long long)ifp->if_opackets,
1546 (unsigned long long)ifp->if_ierrors,
1547 (unsigned long long)ifp->if_oerrors,
1548 (unsigned long long)ifp->if_ibytes,
1549 (unsigned long long)ifp->if_obytes);
1550 }
1551
1552 void
1553 ex_tick(void *arg)
1554 {
1555 struct ex_softc *sc = arg;
1556 int s;
1557
1558 if (!device_is_active(sc->sc_dev))
1559 return;
1560
1561 s = splnet();
1562
1563 if (sc->ex_conf & EX_CONF_MII)
1564 mii_tick(&sc->ex_mii);
1565
1566 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1567 & COMMAND_IN_PROGRESS))
1568 ex_getstats(sc);
1569
1570 splx(s);
1571
1572 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1573 }
1574
1575 void
1576 ex_reset(struct ex_softc *sc)
1577 {
1578 uint16_t val = GLOBAL_RESET;
1579
1580 if (sc->ex_conf & EX_CONF_RESETHACK)
1581 val |= 0x10;
1582 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1583 /*
1584 * XXX apparently the command in progress bit can't be trusted
1585 * during a reset, so we just always wait this long. Fortunately
1586 * we normally only reset the chip during autoconfig.
1587 */
1588 delay(100000);
1589 ex_waitcmd(sc);
1590 }
1591
1592 void
1593 ex_watchdog(struct ifnet *ifp)
1594 {
1595 struct ex_softc *sc = ifp->if_softc;
1596
1597 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1598 ++sc->sc_ethercom.ec_if.if_oerrors;
1599
1600 ex_reset(sc);
1601 ex_init(ifp);
1602 }
1603
1604 void
1605 ex_stop(struct ifnet *ifp, int disable)
1606 {
1607 struct ex_softc *sc = ifp->if_softc;
1608 bus_space_tag_t iot = sc->sc_iot;
1609 bus_space_handle_t ioh = sc->sc_ioh;
1610 struct ex_txdesc *tx;
1611 struct ex_rxdesc *rx;
1612 int i;
1613
1614 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1615 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1616 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1617
1618 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1619 if (tx->tx_mbhead == NULL)
1620 continue;
1621 m_freem(tx->tx_mbhead);
1622 tx->tx_mbhead = NULL;
1623 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1624 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1625 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1626 ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1627 sizeof (struct ex_dpd),
1628 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1629 }
1630 sc->tx_tail = sc->tx_head = NULL;
1631 ex_init_txdescs(sc);
1632
1633 sc->rx_tail = sc->rx_head = 0;
1634 for (i = 0; i < EX_NUPD; i++) {
1635 rx = &sc->sc_rxdescs[i];
1636 if (rx->rx_mbhead != NULL) {
1637 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1638 m_freem(rx->rx_mbhead);
1639 rx->rx_mbhead = NULL;
1640 }
1641 ex_add_rxbuf(sc, rx);
1642 }
1643
1644 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1645
1646 callout_stop(&sc->ex_mii_callout);
1647 if (sc->ex_conf & EX_CONF_MII)
1648 mii_down(&sc->ex_mii);
1649
1650 if (disable)
1651 ex_disable(sc);
1652
1653 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1654 sc->sc_if_flags = ifp->if_flags;
1655 ifp->if_timer = 0;
1656 }
1657
1658 static void
1659 ex_init_txdescs(struct ex_softc *sc)
1660 {
1661 int i;
1662
1663 for (i = 0; i < EX_NDPD; i++) {
1664 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1665 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1666 if (i < EX_NDPD - 1)
1667 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1668 else
1669 sc->sc_txdescs[i].tx_next = NULL;
1670 }
1671 sc->tx_free = &sc->sc_txdescs[0];
1672 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1673 }
1674
1675
1676 int
1677 ex_activate(device_t self, enum devact act)
1678 {
1679 struct ex_softc *sc = device_private(self);
1680 int s, error = 0;
1681
1682 s = splnet();
1683 switch (act) {
1684 case DVACT_ACTIVATE:
1685 error = EOPNOTSUPP;
1686 break;
1687
1688 case DVACT_DEACTIVATE:
1689 if (sc->ex_conf & EX_CONF_MII)
1690 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1691 MII_OFFSET_ANY);
1692 if_deactivate(&sc->sc_ethercom.ec_if);
1693 break;
1694 }
1695 splx(s);
1696
1697 return (error);
1698 }
1699
1700 int
1701 ex_detach(struct ex_softc *sc)
1702 {
1703 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1704 struct ex_rxdesc *rxd;
1705 int i;
1706
1707 /* Succeed now if there's no work to do. */
1708 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1709 return (0);
1710
1711 /* Unhook our tick handler. */
1712 callout_stop(&sc->ex_mii_callout);
1713
1714 if (sc->ex_conf & EX_CONF_MII) {
1715 /* Detach all PHYs */
1716 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1717 }
1718
1719 /* Delete all remaining media. */
1720 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1721
1722 #if NRND > 0
1723 rnd_detach_source(&sc->rnd_source);
1724 #endif
1725 ether_ifdetach(ifp);
1726 if_detach(ifp);
1727
1728 for (i = 0; i < EX_NUPD; i++) {
1729 rxd = &sc->sc_rxdescs[i];
1730 if (rxd->rx_mbhead != NULL) {
1731 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1732 m_freem(rxd->rx_mbhead);
1733 rxd->rx_mbhead = NULL;
1734 }
1735 }
1736 for (i = 0; i < EX_NUPD; i++)
1737 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1738 for (i = 0; i < EX_NDPD; i++)
1739 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1740 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1741 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1742 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1743 EX_NDPD * sizeof (struct ex_dpd));
1744 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1745 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1746 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1747 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1748 EX_NUPD * sizeof (struct ex_upd));
1749 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1750
1751 pmf_device_deregister(sc->sc_dev);
1752
1753 return (0);
1754 }
1755
1756 /*
1757 * Before reboots, reset card completely.
1758 */
1759 static bool
1760 ex_shutdown(device_t self, int flags)
1761 {
1762 struct ex_softc *sc = device_private(self);
1763
1764 ex_stop(&sc->sc_ethercom.ec_if, 1);
1765 /*
1766 * Make sure the interface is powered up when we reboot,
1767 * otherwise firmware on some systems gets really confused.
1768 */
1769 (void) ex_enable(sc);
1770 return true;
1771 }
1772
1773 /*
1774 * Read EEPROM data.
1775 * XXX what to do if EEPROM doesn't unbusy?
1776 */
1777 uint16_t
1778 ex_read_eeprom(struct ex_softc *sc, int offset)
1779 {
1780 bus_space_tag_t iot = sc->sc_iot;
1781 bus_space_handle_t ioh = sc->sc_ioh;
1782 uint16_t data = 0, cmd = READ_EEPROM;
1783 int off;
1784
1785 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1786 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1787
1788 GO_WINDOW(0);
1789 if (ex_eeprom_busy(sc))
1790 goto out;
1791 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1792 cmd | (off + (offset & 0x3f)));
1793 if (ex_eeprom_busy(sc))
1794 goto out;
1795 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1796 out:
1797 return data;
1798 }
1799
1800 static int
1801 ex_eeprom_busy(struct ex_softc *sc)
1802 {
1803 bus_space_tag_t iot = sc->sc_iot;
1804 bus_space_handle_t ioh = sc->sc_ioh;
1805 int i = 100;
1806
1807 while (i--) {
1808 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1809 EEPROM_BUSY))
1810 return 0;
1811 delay(100);
1812 }
1813 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n");
1814 return (1);
1815 }
1816
1817 /*
1818 * Create a new rx buffer and add it to the 'soft' rx list.
1819 */
1820 static int
1821 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd)
1822 {
1823 struct mbuf *m, *oldm;
1824 bus_dmamap_t rxmap;
1825 int error, rval = 0;
1826
1827 oldm = rxd->rx_mbhead;
1828 rxmap = rxd->rx_dmamap;
1829
1830 MGETHDR(m, M_DONTWAIT, MT_DATA);
1831 if (m != NULL) {
1832 MCLGET(m, M_DONTWAIT);
1833 if ((m->m_flags & M_EXT) == 0) {
1834 m_freem(m);
1835 if (oldm == NULL)
1836 return 1;
1837 m = oldm;
1838 MRESETDATA(m);
1839 rval = 1;
1840 }
1841 } else {
1842 if (oldm == NULL)
1843 return 1;
1844 m = oldm;
1845 MRESETDATA(m);
1846 rval = 1;
1847 }
1848
1849 /*
1850 * Setup the DMA map for this receive buffer.
1851 */
1852 if (m != oldm) {
1853 if (oldm != NULL)
1854 bus_dmamap_unload(sc->sc_dmat, rxmap);
1855 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1856 m->m_ext.ext_buf, MCLBYTES, NULL,
1857 BUS_DMA_READ|BUS_DMA_NOWAIT);
1858 if (error) {
1859 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n",
1860 error);
1861 panic("ex_add_rxbuf"); /* XXX */
1862 }
1863 }
1864
1865 /*
1866 * Align for data after 14 byte header.
1867 */
1868 m->m_data += 2;
1869
1870 rxd->rx_mbhead = m;
1871 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1872 rxd->rx_upd->upd_frags[0].fr_addr =
1873 htole32(rxmap->dm_segs[0].ds_addr + 2);
1874 rxd->rx_upd->upd_nextptr = 0;
1875
1876 /*
1877 * Attach it to the end of the list.
1878 */
1879 if (sc->rx_head != NULL) {
1880 sc->rx_tail->rx_next = rxd;
1881 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1882 ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1883 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1884 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1885 sizeof (struct ex_upd),
1886 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1887 } else {
1888 sc->rx_head = rxd;
1889 }
1890 sc->rx_tail = rxd;
1891
1892 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1893 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1894 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1895 ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1896 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1897 return (rval);
1898 }
1899
1900 uint32_t
1901 ex_mii_bitbang_read(device_t self)
1902 {
1903 struct ex_softc *sc = device_private(self);
1904
1905 /* We're already in Window 4. */
1906 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1907 }
1908
1909 void
1910 ex_mii_bitbang_write(device_t self, uint32_t val)
1911 {
1912 struct ex_softc *sc = device_private(self);
1913
1914 /* We're already in Window 4. */
1915 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1916 }
1917
1918 int
1919 ex_mii_readreg(device_t v, int phy, int reg)
1920 {
1921 struct ex_softc *sc = device_private(v);
1922 int val;
1923
1924 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1925 return 0;
1926
1927 GO_WINDOW(4);
1928
1929 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1930
1931 GO_WINDOW(1);
1932
1933 return (val);
1934 }
1935
1936 void
1937 ex_mii_writereg(device_t v, int phy, int reg, int data)
1938 {
1939 struct ex_softc *sc = device_private(v);
1940
1941 GO_WINDOW(4);
1942
1943 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1944
1945 GO_WINDOW(1);
1946 }
1947
1948 void
1949 ex_mii_statchg(device_t v)
1950 {
1951 struct ex_softc *sc = device_private(v);
1952 bus_space_tag_t iot = sc->sc_iot;
1953 bus_space_handle_t ioh = sc->sc_ioh;
1954 int mctl;
1955
1956 GO_WINDOW(3);
1957 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1958 if (sc->ex_mii.mii_media_active & IFM_FDX)
1959 mctl |= MAC_CONTROL_FDX;
1960 else
1961 mctl &= ~MAC_CONTROL_FDX;
1962 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1963 GO_WINDOW(1); /* back to operating window */
1964 }
1965
1966 int
1967 ex_enable(struct ex_softc *sc)
1968 {
1969 if (sc->enabled == 0 && sc->enable != NULL) {
1970 if ((*sc->enable)(sc) != 0) {
1971 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1972 return (EIO);
1973 }
1974 sc->enabled = 1;
1975 }
1976 return (0);
1977 }
1978
1979 void
1980 ex_disable(struct ex_softc *sc)
1981 {
1982 if (sc->enabled == 1 && sc->disable != NULL) {
1983 (*sc->disable)(sc);
1984 sc->enabled = 0;
1985 }
1986 }
1987
1988