elinkxl.c revision 1.110 1 /* $NetBSD: elinkxl.c,v 1.110 2010/01/19 22:06:24 pooka Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.110 2010/01/19 22:06:24 pooka Exp $");
34
35 #include "rnd.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/callout.h>
40 #include <sys/kernel.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/ioctl.h>
44 #include <sys/errno.h>
45 #include <sys/syslog.h>
46 #include <sys/select.h>
47 #include <sys/device.h>
48 #if NRND > 0
49 #include <sys/rnd.h>
50 #endif
51
52 #include <uvm/uvm_extern.h>
53
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_ether.h>
57 #include <net/if_media.h>
58
59 #include <net/bpf.h>
60 #include <net/bpfdesc.h>
61
62 #include <sys/cpu.h>
63 #include <sys/bus.h>
64 #include <sys/intr.h>
65 #include <machine/endian.h>
66
67 #include <dev/mii/miivar.h>
68 #include <dev/mii/mii.h>
69 #include <dev/mii/mii_bitbang.h>
70
71 #include <dev/ic/elink3reg.h>
72 /* #include <dev/ic/elink3var.h> */
73 #include <dev/ic/elinkxlreg.h>
74 #include <dev/ic/elinkxlvar.h>
75
76 #ifdef DEBUG
77 int exdebug = 0;
78 #endif
79
80 /* ifmedia callbacks */
81 int ex_media_chg(struct ifnet *ifp);
82 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
83
84 static int ex_ifflags_cb(struct ethercom *);
85
86 void ex_probe_media(struct ex_softc *);
87 void ex_set_filter(struct ex_softc *);
88 void ex_set_media(struct ex_softc *);
89 void ex_set_xcvr(struct ex_softc *, uint16_t);
90 struct mbuf *ex_get(struct ex_softc *, int);
91 uint16_t ex_read_eeprom(struct ex_softc *, int);
92 int ex_init(struct ifnet *);
93 void ex_read(struct ex_softc *);
94 void ex_reset(struct ex_softc *);
95 void ex_set_mc(struct ex_softc *);
96 void ex_getstats(struct ex_softc *);
97 void ex_printstats(struct ex_softc *);
98 void ex_tick(void *);
99
100 static int ex_eeprom_busy(struct ex_softc *);
101 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
102 static void ex_init_txdescs(struct ex_softc *);
103
104 static void ex_setup_tx(struct ex_softc *);
105 static bool ex_shutdown(device_t, int);
106 static void ex_start(struct ifnet *);
107 static void ex_txstat(struct ex_softc *);
108
109 int ex_mii_readreg(device_t, int, int);
110 void ex_mii_writereg(device_t, int, int, int);
111 void ex_mii_statchg(device_t);
112
113 void ex_probemedia(struct ex_softc *);
114
115 /*
116 * Structure to map media-present bits in boards to ifmedia codes and
117 * printable media names. Used for table-driven ifmedia initialization.
118 */
119 struct ex_media {
120 int exm_mpbit; /* media present bit */
121 const char *exm_name; /* name of medium */
122 int exm_ifmedia; /* ifmedia word for medium */
123 int exm_epmedia; /* ELINKMEDIA_* constant */
124 };
125
126 /*
127 * Media table for 3c90x chips. Note that chips with MII have no
128 * `native' media.
129 */
130 struct ex_media ex_native_media[] = {
131 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
132 ELINKMEDIA_10BASE_T },
133 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
134 ELINKMEDIA_10BASE_T },
135 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
136 ELINKMEDIA_AUI },
137 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
138 ELINKMEDIA_10BASE_2 },
139 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
140 ELINKMEDIA_100BASE_TX },
141 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
142 ELINKMEDIA_100BASE_TX },
143 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
144 ELINKMEDIA_100BASE_FX },
145 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
146 ELINKMEDIA_MII },
147 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
148 ELINKMEDIA_100BASE_T4 },
149 { 0, NULL, 0,
150 0 },
151 };
152
153 /*
154 * MII bit-bang glue.
155 */
156 uint32_t ex_mii_bitbang_read(device_t);
157 void ex_mii_bitbang_write(device_t, uint32_t);
158
159 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
160 ex_mii_bitbang_read,
161 ex_mii_bitbang_write,
162 {
163 ELINK_PHY_DATA, /* MII_BIT_MDO */
164 ELINK_PHY_DATA, /* MII_BIT_MDI */
165 ELINK_PHY_CLK, /* MII_BIT_MDC */
166 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
167 0, /* MII_BIT_DIR_PHY_HOST */
168 }
169 };
170
171 /*
172 * Back-end attach and configure.
173 */
174 void
175 ex_config(struct ex_softc *sc)
176 {
177 struct ifnet *ifp;
178 uint16_t val;
179 uint8_t macaddr[ETHER_ADDR_LEN] = {0};
180 bus_space_tag_t iot = sc->sc_iot;
181 bus_space_handle_t ioh = sc->sc_ioh;
182 int i, error, attach_stage;
183
184 callout_init(&sc->ex_mii_callout, 0);
185
186 ex_reset(sc);
187
188 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
189 macaddr[0] = val >> 8;
190 macaddr[1] = val & 0xff;
191 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
192 macaddr[2] = val >> 8;
193 macaddr[3] = val & 0xff;
194 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
195 macaddr[4] = val >> 8;
196 macaddr[5] = val & 0xff;
197
198 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr));
199
200 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
201 GO_WINDOW(2);
202 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
203 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
204 val |= ELINK_RESET_OPT_LEDPOLAR;
205 if (sc->ex_conf & EX_CONF_PHY_POWER)
206 val |= ELINK_RESET_OPT_PHYPOWER;
207 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
208 }
209 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
210 GO_WINDOW(0);
211 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
212 EX_XCVR_PWR_MAGICBITS);
213 }
214
215 attach_stage = 0;
216
217 /*
218 * Allocate the upload descriptors, and create and load the DMA
219 * map for them.
220 */
221 if ((error = bus_dmamem_alloc(sc->sc_dmat,
222 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
223 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
224 aprint_error_dev(sc->sc_dev,
225 "can't allocate upload descriptors, error = %d\n", error);
226 goto fail;
227 }
228
229 attach_stage = 1;
230
231 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
232 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
233 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
234 aprint_error_dev(sc->sc_dev,
235 "can't map upload descriptors, error = %d\n", error);
236 goto fail;
237 }
238
239 attach_stage = 2;
240
241 if ((error = bus_dmamap_create(sc->sc_dmat,
242 EX_NUPD * sizeof (struct ex_upd), 1,
243 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
244 &sc->sc_upd_dmamap)) != 0) {
245 aprint_error_dev(sc->sc_dev,
246 "can't create upload desc. DMA map, error = %d\n", error);
247 goto fail;
248 }
249
250 attach_stage = 3;
251
252 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
253 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
254 BUS_DMA_NOWAIT)) != 0) {
255 aprint_error_dev(sc->sc_dev,
256 "can't load upload desc. DMA map, error = %d\n", error);
257 goto fail;
258 }
259
260 attach_stage = 4;
261
262 /*
263 * Allocate the download descriptors, and create and load the DMA
264 * map for them.
265 */
266 if ((error = bus_dmamem_alloc(sc->sc_dmat,
267 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
268 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
269 aprint_error_dev(sc->sc_dev,
270 "can't allocate download descriptors, error = %d\n", error);
271 goto fail;
272 }
273
274 attach_stage = 5;
275
276 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
277 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
278 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
279 aprint_error_dev(sc->sc_dev,
280 "can't map download descriptors, error = %d\n", error);
281 goto fail;
282 }
283 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
284
285 attach_stage = 6;
286
287 if ((error = bus_dmamap_create(sc->sc_dmat,
288 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
289 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
290 &sc->sc_dpd_dmamap)) != 0) {
291 aprint_error_dev(sc->sc_dev,
292 "can't create download desc. DMA map, error = %d\n", error);
293 goto fail;
294 }
295
296 attach_stage = 7;
297
298 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
299 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
300 BUS_DMA_NOWAIT)) != 0) {
301 aprint_error_dev(sc->sc_dev,
302 "can't load download desc. DMA map, error = %d\n", error);
303 goto fail;
304 }
305 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
306 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
307
308 attach_stage = 8;
309
310
311 /*
312 * Create the transmit buffer DMA maps.
313 */
314 for (i = 0; i < EX_NDPD; i++) {
315 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
316 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
317 &sc->sc_tx_dmamaps[i])) != 0) {
318 aprint_error_dev(sc->sc_dev,
319 "can't create tx DMA map %d, error = %d\n",
320 i, error);
321 goto fail;
322 }
323 }
324
325 attach_stage = 9;
326
327 /*
328 * Create the receive buffer DMA maps.
329 */
330 for (i = 0; i < EX_NUPD; i++) {
331 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
332 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
333 &sc->sc_rx_dmamaps[i])) != 0) {
334 aprint_error_dev(sc->sc_dev,
335 "can't create rx DMA map %d, error = %d\n",
336 i, error);
337 goto fail;
338 }
339 }
340
341 attach_stage = 10;
342
343 /*
344 * Create ring of upload descriptors, only once. The DMA engine
345 * will loop over this when receiving packets, stalling if it
346 * hits an UPD with a finished receive.
347 */
348 for (i = 0; i < EX_NUPD; i++) {
349 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
350 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
351 sc->sc_upd[i].upd_frags[0].fr_len =
352 htole32((MCLBYTES - 2) | EX_FR_LAST);
353 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
354 aprint_error_dev(sc->sc_dev,
355 "can't allocate or map rx buffers\n");
356 goto fail;
357 }
358 }
359
360 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
361 EX_NUPD * sizeof (struct ex_upd),
362 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
363
364 ex_init_txdescs(sc);
365
366 attach_stage = 11;
367
368
369 GO_WINDOW(3);
370 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
371 if (val & ELINK_MEDIACAP_MII)
372 sc->ex_conf |= EX_CONF_MII;
373
374 ifp = &sc->sc_ethercom.ec_if;
375
376 /*
377 * Initialize our media structures and MII info. We'll
378 * probe the MII if we discover that we have one.
379 */
380 sc->ex_mii.mii_ifp = ifp;
381 sc->ex_mii.mii_readreg = ex_mii_readreg;
382 sc->ex_mii.mii_writereg = ex_mii_writereg;
383 sc->ex_mii.mii_statchg = ex_mii_statchg;
384 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
385 ex_media_stat);
386
387 if (sc->ex_conf & EX_CONF_MII) {
388 /*
389 * Find PHY, extract media information from it.
390 * First, select the right transceiver.
391 */
392 ex_set_xcvr(sc, val);
393
394 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff,
395 MII_PHY_ANY, MII_OFFSET_ANY, 0);
396 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
397 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
398 0, NULL);
399 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
400 } else {
401 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
402 }
403 } else
404 ex_probemedia(sc);
405
406 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
407 ifp->if_softc = sc;
408 ifp->if_start = ex_start;
409 ifp->if_ioctl = ex_ioctl;
410 ifp->if_watchdog = ex_watchdog;
411 ifp->if_init = ex_init;
412 ifp->if_stop = ex_stop;
413 ifp->if_flags =
414 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
415 sc->sc_if_flags = ifp->if_flags;
416 IFQ_SET_READY(&ifp->if_snd);
417
418 /*
419 * We can support 802.1Q VLAN-sized frames.
420 */
421 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
422
423 /*
424 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
425 */
426 if (sc->ex_conf & EX_CONF_90XB)
427 sc->sc_ethercom.ec_if.if_capabilities |=
428 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
429 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
430 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
431
432 if_attach(ifp);
433 ether_ifattach(ifp, macaddr);
434 ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb);
435
436 GO_WINDOW(1);
437
438 sc->tx_start_thresh = 20;
439 sc->tx_succ_ok = 0;
440
441 /* TODO: set queues to 0 */
442
443 #if NRND > 0
444 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
445 RND_TYPE_NET, 0);
446 #endif
447
448 if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown))
449 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
450 else
451 aprint_error_dev(sc->sc_dev,
452 "couldn't establish power handler\n");
453
454 /* The attach is successful. */
455 sc->ex_flags |= EX_FLAGS_ATTACHED;
456 return;
457
458 fail:
459 /*
460 * Free any resources we've allocated during the failed attach
461 * attempt. Do this in reverse order and fall though.
462 */
463 switch (attach_stage) {
464 case 11:
465 {
466 struct ex_rxdesc *rxd;
467
468 for (i = 0; i < EX_NUPD; i++) {
469 rxd = &sc->sc_rxdescs[i];
470 if (rxd->rx_mbhead != NULL) {
471 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
472 m_freem(rxd->rx_mbhead);
473 }
474 }
475 }
476 /* FALLTHROUGH */
477
478 case 10:
479 for (i = 0; i < EX_NUPD; i++)
480 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
481 /* FALLTHROUGH */
482
483 case 9:
484 for (i = 0; i < EX_NDPD; i++)
485 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
486 /* FALLTHROUGH */
487 case 8:
488 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
489 /* FALLTHROUGH */
490
491 case 7:
492 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
493 /* FALLTHROUGH */
494
495 case 6:
496 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
497 EX_NDPD * sizeof (struct ex_dpd));
498 /* FALLTHROUGH */
499
500 case 5:
501 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
502 break;
503
504 case 4:
505 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
506 /* FALLTHROUGH */
507
508 case 3:
509 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
510 /* FALLTHROUGH */
511
512 case 2:
513 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
514 EX_NUPD * sizeof (struct ex_upd));
515 /* FALLTHROUGH */
516
517 case 1:
518 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
519 break;
520 }
521
522 }
523
524 /*
525 * Find the media present on non-MII chips.
526 */
527 void
528 ex_probemedia(struct ex_softc *sc)
529 {
530 bus_space_tag_t iot = sc->sc_iot;
531 bus_space_handle_t ioh = sc->sc_ioh;
532 struct ifmedia *ifm = &sc->ex_mii.mii_media;
533 struct ex_media *exm;
534 uint16_t config1, reset_options, default_media;
535 int defmedia = 0;
536 const char *sep = "", *defmedianame = NULL;
537
538 GO_WINDOW(3);
539 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
540 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
541 GO_WINDOW(0);
542
543 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
544
545 /* Sanity check that there are any media! */
546 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
547 aprint_error_dev(sc->sc_dev, "no media present!\n");
548 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
549 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
550 return;
551 }
552
553 aprint_normal_dev(sc->sc_dev, "");
554
555 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
556
557 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
558 if (reset_options & exm->exm_mpbit) {
559 /*
560 * Default media is a little complicated. We
561 * support full-duplex which uses the same
562 * reset options bit.
563 *
564 * XXX Check EEPROM for default to FDX?
565 */
566 if (exm->exm_epmedia == default_media) {
567 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
568 defmedia = exm->exm_ifmedia;
569 defmedianame = exm->exm_name;
570 }
571 } else if (defmedia == 0) {
572 defmedia = exm->exm_ifmedia;
573 defmedianame = exm->exm_name;
574 }
575 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
576 NULL);
577 PRINT(exm->exm_name);
578 }
579 }
580
581 #undef PRINT
582
583 #ifdef DIAGNOSTIC
584 if (defmedia == 0)
585 panic("ex_probemedia: impossible");
586 #endif
587
588 aprint_normal(", default %s\n", defmedianame);
589 ifmedia_set(ifm, defmedia);
590 }
591
592 /*
593 * Setup transmitter parameters.
594 */
595 static void
596 ex_setup_tx(struct ex_softc *sc)
597 {
598 bus_space_tag_t iot = sc->sc_iot;
599 bus_space_handle_t ioh = sc->sc_ioh;
600
601 /*
602 * Disable reclaim threshold for 90xB, set free threshold to
603 * 6 * 256 = 1536 for 90x.
604 */
605 if (sc->ex_conf & EX_CONF_90XB)
606 bus_space_write_2(iot, ioh, ELINK_COMMAND,
607 ELINK_TXRECLTHRESH | 255);
608 else
609 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
610
611 /* Setup early transmission start threshold. */
612 bus_space_write_2(iot, ioh, ELINK_COMMAND,
613 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
614 }
615
616 /*
617 * Bring device up.
618 */
619 int
620 ex_init(struct ifnet *ifp)
621 {
622 struct ex_softc *sc = ifp->if_softc;
623 bus_space_tag_t iot = sc->sc_iot;
624 bus_space_handle_t ioh = sc->sc_ioh;
625 int i;
626 uint16_t val;
627 int error = 0;
628
629 if ((error = ex_enable(sc)) != 0)
630 goto out;
631
632 ex_waitcmd(sc);
633 ex_stop(ifp, 0);
634
635 GO_WINDOW(2);
636
637 /* Turn on PHY power. */
638 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
639 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
640 if (sc->ex_conf & EX_CONF_PHY_POWER)
641 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
642 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
643 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
644 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
645 }
646
647 /*
648 * Set the station address and clear the station mask. The latter
649 * is needed for 90x cards, 0 is the default for 90xB cards.
650 */
651 for (i = 0; i < ETHER_ADDR_LEN; i++) {
652 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
653 CLLADDR(ifp->if_sadl)[i]);
654 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
655 }
656
657 GO_WINDOW(3);
658
659 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
660 ex_waitcmd(sc);
661 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
662 ex_waitcmd(sc);
663
664 /* Load Tx parameters. */
665 ex_setup_tx(sc);
666
667 bus_space_write_2(iot, ioh, ELINK_COMMAND,
668 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
669
670 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
671 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
672
673 bus_space_write_2(iot, ioh, ELINK_COMMAND,
674 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
675 bus_space_write_2(iot, ioh, ELINK_COMMAND,
676 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
677
678 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
679 if (sc->intr_ack)
680 (* sc->intr_ack)(sc);
681 ex_set_media(sc);
682 ex_set_mc(sc);
683
684
685 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
686 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
687 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
688 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
689 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
690
691 ifp->if_flags |= IFF_RUNNING;
692 ifp->if_flags &= ~IFF_OACTIVE;
693 ex_start(ifp);
694 sc->sc_if_flags = ifp->if_flags;
695
696 GO_WINDOW(1);
697
698 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
699
700 out:
701 if (error) {
702 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
703 ifp->if_timer = 0;
704 aprint_error_dev(sc->sc_dev, "interface not running\n");
705 }
706 return (error);
707 }
708
709 #define MCHASHSIZE 256
710 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
711 (MCHASHSIZE - 1))
712
713 /*
714 * Set multicast receive filter. Also take care of promiscuous mode
715 * here (XXX).
716 */
717 void
718 ex_set_mc(struct ex_softc *sc)
719 {
720 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
721 struct ethercom *ec = &sc->sc_ethercom;
722 struct ether_multi *enm;
723 struct ether_multistep estep;
724 int i;
725 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
726
727 if (ifp->if_flags & IFF_PROMISC) {
728 mask |= FIL_PROMISC;
729 goto allmulti;
730 }
731
732 ETHER_FIRST_MULTI(estep, ec, enm);
733 if (enm == NULL)
734 goto nomulti;
735
736 if ((sc->ex_conf & EX_CONF_90XB) == 0)
737 /* No multicast hash filtering. */
738 goto allmulti;
739
740 for (i = 0; i < MCHASHSIZE; i++)
741 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
742 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
743
744 do {
745 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
746 ETHER_ADDR_LEN) != 0)
747 goto allmulti;
748
749 i = ex_mchash(enm->enm_addrlo);
750 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
751 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
752 ETHER_NEXT_MULTI(estep, enm);
753 } while (enm != NULL);
754 mask |= FIL_MULTIHASH;
755
756 nomulti:
757 ifp->if_flags &= ~IFF_ALLMULTI;
758 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
759 SET_RX_FILTER | mask);
760 return;
761
762 allmulti:
763 ifp->if_flags |= IFF_ALLMULTI;
764 mask |= FIL_MULTICAST;
765 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
766 SET_RX_FILTER | mask);
767 }
768
769
770 /*
771 * The Tx Complete interrupts occur only on errors,
772 * and this is the error handler.
773 */
774 static void
775 ex_txstat(struct ex_softc *sc)
776 {
777 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
778 bus_space_tag_t iot = sc->sc_iot;
779 bus_space_handle_t ioh = sc->sc_ioh;
780 int i, err = 0;
781
782 /*
783 * We need to read+write TX_STATUS until we get a 0 status
784 * in order to turn off the interrupt flag.
785 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
786 */
787 for (;;) {
788 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
789 if ((i & TXS_COMPLETE) == 0)
790 break;
791 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
792 err |= i;
793 }
794 err &= ~TXS_TIMER;
795
796 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
797 || err == 0 /* should not happen, just in case */) {
798 /*
799 * Make sure the transmission is stopped.
800 */
801 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
802 for (i = 1000; i > 0; i--)
803 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
804 ELINK_DMAC_DNINPROG) == 0)
805 break;
806
807 /*
808 * Reset the transmitter.
809 */
810 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
811
812 /* Resetting takes a while and we will do more than wait. */
813
814 ifp->if_flags &= ~IFF_OACTIVE;
815 ++sc->sc_ethercom.ec_if.if_oerrors;
816 aprint_error_dev(sc->sc_dev, "%s%s%s",
817 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
818 (err & TXS_JABBER) ? " jabber" : "",
819 (err & TXS_RECLAIM) ? " reclaim" : "");
820 if (err == 0)
821 aprint_error(" unknown Tx error");
822 printf(" (%x)", err);
823 if (err & TXS_UNDERRUN) {
824 aprint_error(" @%d", sc->tx_start_thresh);
825 if (sc->tx_succ_ok < 256 &&
826 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
827 > sc->tx_start_thresh) {
828 aprint_error(", new threshold is %d", i);
829 sc->tx_start_thresh = i;
830 }
831 sc->tx_succ_ok = 0;
832 }
833 aprint_error("\n");
834 if (err & TXS_MAX_COLLISION)
835 ++sc->sc_ethercom.ec_if.if_collisions;
836
837 /* Wait for TX_RESET to finish. */
838 ex_waitcmd(sc);
839
840 /* Reload Tx parameters. */
841 ex_setup_tx(sc);
842 } else {
843 if (err & TXS_MAX_COLLISION)
844 ++sc->sc_ethercom.ec_if.if_collisions;
845 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
846 }
847
848 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
849
850 /* Retransmit current packet if any. */
851 if (sc->tx_head) {
852 ifp->if_flags |= IFF_OACTIVE;
853 bus_space_write_2(iot, ioh, ELINK_COMMAND,
854 ELINK_DNUNSTALL);
855 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
856 DPD_DMADDR(sc, sc->tx_head));
857
858 /* Retrigger watchdog if stopped. */
859 if (ifp->if_timer == 0)
860 ifp->if_timer = 1;
861 }
862 }
863
864 int
865 ex_media_chg(struct ifnet *ifp)
866 {
867
868 if (ifp->if_flags & IFF_UP)
869 ex_init(ifp);
870 return 0;
871 }
872
873 void
874 ex_set_xcvr(struct ex_softc *sc, const uint16_t media)
875 {
876 bus_space_tag_t iot = sc->sc_iot;
877 bus_space_handle_t ioh = sc->sc_ioh;
878 uint32_t icfg;
879
880 /*
881 * We're already in Window 3
882 */
883 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
884 icfg &= ~(CONFIG_XCVR_SEL << 16);
885 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
886 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
887 if (media & ELINK_MEDIACAP_100BASETX)
888 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
889 if (media & ELINK_MEDIACAP_100BASEFX)
890 icfg |= ELINKMEDIA_100BASE_FX
891 << (CONFIG_XCVR_SEL_SHIFT + 16);
892 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
893 }
894
895 void
896 ex_set_media(struct ex_softc *sc)
897 {
898 bus_space_tag_t iot = sc->sc_iot;
899 bus_space_handle_t ioh = sc->sc_ioh;
900 uint32_t configreg;
901
902 if (((sc->ex_conf & EX_CONF_MII) &&
903 (sc->ex_mii.mii_media_active & IFM_FDX))
904 || (!(sc->ex_conf & EX_CONF_MII) &&
905 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
906 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
907 MAC_CONTROL_FDX);
908 } else {
909 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
910 }
911
912 /*
913 * If the device has MII, select it, and then tell the
914 * PHY which media to use.
915 */
916 if (sc->ex_conf & EX_CONF_MII) {
917 uint16_t val;
918
919 GO_WINDOW(3);
920 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
921 ex_set_xcvr(sc, val);
922 mii_mediachg(&sc->ex_mii);
923 return;
924 }
925
926 GO_WINDOW(4);
927 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
928 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
929 delay(800);
930
931 /*
932 * Now turn on the selected media/transceiver.
933 */
934 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
935 case IFM_10_T:
936 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
937 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
938 break;
939
940 case IFM_10_2:
941 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
942 DELAY(800);
943 break;
944
945 case IFM_100_TX:
946 case IFM_100_FX:
947 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
948 LINKBEAT_ENABLE);
949 DELAY(800);
950 break;
951
952 case IFM_10_5:
953 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
954 SQE_ENABLE);
955 DELAY(800);
956 break;
957
958 case IFM_MANUAL:
959 break;
960
961 case IFM_NONE:
962 return;
963
964 default:
965 panic("ex_set_media: impossible");
966 }
967
968 GO_WINDOW(3);
969 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
970
971 configreg &= ~(CONFIG_MEDIAMASK << 16);
972 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
973 (CONFIG_MEDIAMASK_SHIFT + 16));
974
975 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
976 }
977
978 /*
979 * Get currently-selected media from card.
980 * (if_media callback, may be called before interface is brought up).
981 */
982 void
983 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req)
984 {
985 struct ex_softc *sc = ifp->if_softc;
986 uint16_t help;
987
988 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
989 if (sc->ex_conf & EX_CONF_MII) {
990 mii_pollstat(&sc->ex_mii);
991 req->ifm_status = sc->ex_mii.mii_media_status;
992 req->ifm_active = sc->ex_mii.mii_media_active;
993 } else {
994 GO_WINDOW(4);
995 req->ifm_status = IFM_AVALID;
996 req->ifm_active =
997 sc->ex_mii.mii_media.ifm_cur->ifm_media;
998 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
999 ELINK_W4_MEDIA_TYPE);
1000 if (help & LINKBEAT_DETECT)
1001 req->ifm_status |= IFM_ACTIVE;
1002 GO_WINDOW(1);
1003 }
1004 }
1005 }
1006
1007
1008
1009 /*
1010 * Start outputting on the interface.
1011 */
1012 static void
1013 ex_start(struct ifnet *ifp)
1014 {
1015 struct ex_softc *sc = ifp->if_softc;
1016 bus_space_tag_t iot = sc->sc_iot;
1017 bus_space_handle_t ioh = sc->sc_ioh;
1018 volatile struct ex_fraghdr *fr = NULL;
1019 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1020 struct ex_txdesc *txp;
1021 struct mbuf *mb_head;
1022 bus_dmamap_t dmamap;
1023 int m_csumflags, offset, seglen, totlen, segment, error;
1024 uint32_t csum_flags;
1025
1026 if (sc->tx_head || sc->tx_free == NULL)
1027 return;
1028
1029 txp = NULL;
1030
1031 /*
1032 * We're finished if there is nothing more to add to the list or if
1033 * we're all filled up with buffers to transmit.
1034 */
1035 while (sc->tx_free != NULL) {
1036 /*
1037 * Grab a packet to transmit.
1038 */
1039 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1040 if (mb_head == NULL)
1041 break;
1042
1043 /*
1044 * mb_head might be updated later,
1045 * so preserve csum_flags here.
1046 */
1047 m_csumflags = mb_head->m_pkthdr.csum_flags;
1048
1049 /*
1050 * Get pointer to next available tx desc.
1051 */
1052 txp = sc->tx_free;
1053 dmamap = txp->tx_dmamap;
1054
1055 /*
1056 * Go through each of the mbufs in the chain and initialize
1057 * the transmit buffer descriptors with the physical address
1058 * and size of the mbuf.
1059 */
1060 reload:
1061 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1062 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1063 switch (error) {
1064 case 0:
1065 /* Success. */
1066 break;
1067
1068 case EFBIG:
1069 {
1070 struct mbuf *mn;
1071
1072 /*
1073 * We ran out of segments. We have to recopy this
1074 * mbuf chain first. Bail out if we can't get the
1075 * new buffers.
1076 */
1077 aprint_error_dev(sc->sc_dev, "too many segments, ");
1078
1079 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1080 if (mn == NULL) {
1081 m_freem(mb_head);
1082 aprint_error("aborting\n");
1083 goto out;
1084 }
1085 if (mb_head->m_pkthdr.len > MHLEN) {
1086 MCLGET(mn, M_DONTWAIT);
1087 if ((mn->m_flags & M_EXT) == 0) {
1088 m_freem(mn);
1089 m_freem(mb_head);
1090 aprint_error("aborting\n");
1091 goto out;
1092 }
1093 }
1094 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1095 mtod(mn, void *));
1096 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1097 m_freem(mb_head);
1098 mb_head = mn;
1099 aprint_error("retrying\n");
1100 goto reload;
1101 }
1102
1103 default:
1104 /*
1105 * Some other problem; report it.
1106 */
1107 aprint_error_dev(sc->sc_dev,
1108 "can't load mbuf chain, error = %d\n", error);
1109 m_freem(mb_head);
1110 goto out;
1111 }
1112
1113 /*
1114 * remove our tx desc from freelist.
1115 */
1116 sc->tx_free = txp->tx_next;
1117 txp->tx_next = NULL;
1118
1119 fr = &txp->tx_dpd->dpd_frags[0];
1120 totlen = 0;
1121 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1122 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1123 seglen = dmamap->dm_segs[segment].ds_len;
1124 fr->fr_len = htole32(seglen);
1125 totlen += seglen;
1126 }
1127 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1128 (m_csumflags & M_CSUM_IPv4) != 0)) {
1129 /*
1130 * Pad short packets to avoid ip4csum-tx bug.
1131 *
1132 * XXX Should we still consider if such short
1133 * (36 bytes or less) packets might already
1134 * occupy EX_NTFRAG (== 32) fragments here?
1135 */
1136 KASSERT(segment < EX_NTFRAGS);
1137 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1138 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1139 fr->fr_len = htole32(EX_FR_LAST | seglen);
1140 totlen += seglen;
1141 } else {
1142 fr--;
1143 fr->fr_len |= htole32(EX_FR_LAST);
1144 }
1145 txp->tx_mbhead = mb_head;
1146
1147 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1148 BUS_DMASYNC_PREWRITE);
1149
1150 dpd = txp->tx_dpd;
1151 dpd->dpd_nextptr = 0;
1152 dpd->dpd_fsh = htole32(totlen);
1153
1154 /* Byte-swap constants so compiler can optimize. */
1155
1156 if (sc->ex_conf & EX_CONF_90XB) {
1157 csum_flags = 0;
1158
1159 if (m_csumflags & M_CSUM_IPv4)
1160 csum_flags |= htole32(EX_DPD_IPCKSUM);
1161
1162 if (m_csumflags & M_CSUM_TCPv4)
1163 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1164 else if (m_csumflags & M_CSUM_UDPv4)
1165 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1166
1167 dpd->dpd_fsh |= csum_flags;
1168 } else {
1169 KDASSERT((mb_head->m_pkthdr.csum_flags &
1170 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1171 }
1172
1173 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1174 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1175 sizeof (struct ex_dpd),
1176 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1177
1178 /*
1179 * No need to stall the download engine, we know it's
1180 * not busy right now.
1181 *
1182 * Fix up pointers in both the "soft" tx and the physical
1183 * tx list.
1184 */
1185 if (sc->tx_head != NULL) {
1186 prevdpd = sc->tx_tail->tx_dpd;
1187 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1188 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1189 offset, sizeof (struct ex_dpd),
1190 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1191 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1192 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1193 offset, sizeof (struct ex_dpd),
1194 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1195 sc->tx_tail->tx_next = txp;
1196 sc->tx_tail = txp;
1197 } else {
1198 sc->tx_tail = sc->tx_head = txp;
1199 }
1200
1201 /*
1202 * Pass packet to bpf if there is a listener.
1203 */
1204 if (ifp->if_bpf)
1205 bpf_ops->bpf_mtap(ifp->if_bpf, mb_head);
1206 }
1207 out:
1208 if (sc->tx_head) {
1209 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1210 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1211 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1212 sizeof (struct ex_dpd),
1213 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1214 ifp->if_flags |= IFF_OACTIVE;
1215 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1216 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1217 DPD_DMADDR(sc, sc->tx_head));
1218
1219 /* trigger watchdog */
1220 ifp->if_timer = 5;
1221 }
1222 }
1223
1224
1225 int
1226 ex_intr(void *arg)
1227 {
1228 struct ex_softc *sc = arg;
1229 bus_space_tag_t iot = sc->sc_iot;
1230 bus_space_handle_t ioh = sc->sc_ioh;
1231 uint16_t stat;
1232 int ret = 0;
1233 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1234
1235 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1236 !device_is_active(sc->sc_dev))
1237 return (0);
1238
1239 for (;;) {
1240 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1241
1242 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1243 if ((stat & INTR_LATCH) == 0) {
1244 #if 0
1245 aprint_error_dev(sc->sc_dev,
1246 "intr latch cleared\n");
1247 #endif
1248 break;
1249 }
1250 }
1251
1252 ret = 1;
1253
1254 /*
1255 * Acknowledge interrupts.
1256 */
1257 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1258 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1259 if (sc->intr_ack)
1260 (*sc->intr_ack)(sc);
1261
1262 if (stat & HOST_ERROR) {
1263 aprint_error_dev(sc->sc_dev,
1264 "adapter failure (%x)\n", stat);
1265 ex_reset(sc);
1266 ex_init(ifp);
1267 return 1;
1268 }
1269 if (stat & UPD_STATS) {
1270 ex_getstats(sc);
1271 }
1272 if (stat & TX_COMPLETE) {
1273 ex_txstat(sc);
1274 #if 0
1275 if (stat & DN_COMPLETE)
1276 aprint_error_dev(sc->sc_dev,
1277 "Ignoring Dn interrupt (%x)\n", stat);
1278 #endif
1279 /*
1280 * In some rare cases, both Tx Complete and
1281 * Dn Complete bits are set. However, the packet
1282 * has been reloaded in ex_txstat() and should not
1283 * handle the Dn Complete event here.
1284 * Hence the "else" below.
1285 */
1286 } else if (stat & DN_COMPLETE) {
1287 struct ex_txdesc *txp, *ptxp = NULL;
1288 bus_dmamap_t txmap;
1289
1290 /* reset watchdog timer, was set in ex_start() */
1291 ifp->if_timer = 0;
1292
1293 for (txp = sc->tx_head; txp != NULL;
1294 txp = txp->tx_next) {
1295 bus_dmamap_sync(sc->sc_dmat,
1296 sc->sc_dpd_dmamap,
1297 (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1298 sizeof (struct ex_dpd),
1299 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1300 if (txp->tx_mbhead != NULL) {
1301 txmap = txp->tx_dmamap;
1302 bus_dmamap_sync(sc->sc_dmat, txmap,
1303 0, txmap->dm_mapsize,
1304 BUS_DMASYNC_POSTWRITE);
1305 bus_dmamap_unload(sc->sc_dmat, txmap);
1306 m_freem(txp->tx_mbhead);
1307 txp->tx_mbhead = NULL;
1308 }
1309 ptxp = txp;
1310 }
1311
1312 /*
1313 * Move finished tx buffers back to the tx free list.
1314 */
1315 if (sc->tx_free) {
1316 sc->tx_ftail->tx_next = sc->tx_head;
1317 sc->tx_ftail = ptxp;
1318 } else
1319 sc->tx_ftail = sc->tx_free = sc->tx_head;
1320
1321 sc->tx_head = sc->tx_tail = NULL;
1322 ifp->if_flags &= ~IFF_OACTIVE;
1323
1324 if (sc->tx_succ_ok < 256)
1325 sc->tx_succ_ok++;
1326 }
1327
1328 if (stat & UP_COMPLETE) {
1329 struct ex_rxdesc *rxd;
1330 struct mbuf *m;
1331 struct ex_upd *upd;
1332 bus_dmamap_t rxmap;
1333 uint32_t pktstat;
1334
1335 rcvloop:
1336 rxd = sc->rx_head;
1337 rxmap = rxd->rx_dmamap;
1338 m = rxd->rx_mbhead;
1339 upd = rxd->rx_upd;
1340
1341 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1342 rxmap->dm_mapsize,
1343 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1344 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1345 ((char *)upd - (char *)sc->sc_upd),
1346 sizeof (struct ex_upd),
1347 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1348 pktstat = le32toh(upd->upd_pktstatus);
1349
1350 if (pktstat & EX_UPD_COMPLETE) {
1351 /*
1352 * Remove first packet from the chain.
1353 */
1354 sc->rx_head = rxd->rx_next;
1355 rxd->rx_next = NULL;
1356
1357 /*
1358 * Add a new buffer to the receive chain.
1359 * If this fails, the old buffer is recycled
1360 * instead.
1361 */
1362 if (ex_add_rxbuf(sc, rxd) == 0) {
1363 uint16_t total_len;
1364
1365 if (pktstat &
1366 ((sc->sc_ethercom.ec_capenable &
1367 ETHERCAP_VLAN_MTU) ?
1368 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1369 ifp->if_ierrors++;
1370 m_freem(m);
1371 goto rcvloop;
1372 }
1373
1374 total_len = pktstat & EX_UPD_PKTLENMASK;
1375 if (total_len <
1376 sizeof(struct ether_header)) {
1377 m_freem(m);
1378 goto rcvloop;
1379 }
1380 m->m_pkthdr.rcvif = ifp;
1381 m->m_pkthdr.len = m->m_len = total_len;
1382 if (ifp->if_bpf)
1383 bpf_ops->bpf_mtap(
1384 ifp->if_bpf, m);
1385 /*
1386 * Set the incoming checksum information for the packet.
1387 */
1388 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1389 (pktstat & EX_UPD_IPCHECKED) != 0) {
1390 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1391 if (pktstat & EX_UPD_IPCKSUMERR)
1392 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1393 if (pktstat & EX_UPD_TCPCHECKED) {
1394 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1395 if (pktstat & EX_UPD_TCPCKSUMERR)
1396 m->m_pkthdr.csum_flags |=
1397 M_CSUM_TCP_UDP_BAD;
1398 } else if (pktstat & EX_UPD_UDPCHECKED) {
1399 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1400 if (pktstat & EX_UPD_UDPCKSUMERR)
1401 m->m_pkthdr.csum_flags |=
1402 M_CSUM_TCP_UDP_BAD;
1403 }
1404 }
1405 (*ifp->if_input)(ifp, m);
1406 }
1407 goto rcvloop;
1408 }
1409 /*
1410 * Just in case we filled up all UPDs and the DMA engine
1411 * stalled. We could be more subtle about this.
1412 */
1413 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1414 aprint_error_dev(sc->sc_dev,
1415 "uplistptr was 0\n");
1416 ex_init(ifp);
1417 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1418 & 0x2000) {
1419 aprint_error_dev(sc->sc_dev,
1420 "receive stalled\n");
1421 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1422 ELINK_UPUNSTALL);
1423 }
1424 }
1425
1426 #if NRND > 0
1427 if (stat)
1428 rnd_add_uint32(&sc->rnd_source, stat);
1429 #endif
1430 }
1431
1432 /* no more interrupts */
1433 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1434 ex_start(ifp);
1435 return ret;
1436 }
1437
1438 static int
1439 ex_ifflags_cb(struct ethercom *ec)
1440 {
1441 struct ifnet *ifp = &ec->ec_if;
1442 struct ex_softc *sc = ifp->if_softc;
1443 int change = ifp->if_flags ^ sc->sc_if_flags;
1444
1445 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
1446 return ENETRESET;
1447 else if ((change & IFF_PROMISC) != 0)
1448 ex_set_mc(sc);
1449 return 0;
1450 }
1451
1452 int
1453 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1454 {
1455 struct ex_softc *sc = ifp->if_softc;
1456 struct ifreq *ifr = (struct ifreq *)data;
1457 int s, error;
1458
1459 s = splnet();
1460
1461 switch (cmd) {
1462 case SIOCSIFMEDIA:
1463 case SIOCGIFMEDIA:
1464 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1465 break;
1466 default:
1467 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1468 break;
1469
1470 error = 0;
1471
1472 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1473 ;
1474 else if (ifp->if_flags & IFF_RUNNING) {
1475 /*
1476 * Multicast list has changed; set the hardware filter
1477 * accordingly.
1478 */
1479 ex_set_mc(sc);
1480 }
1481 break;
1482 }
1483
1484 sc->sc_if_flags = ifp->if_flags;
1485 splx(s);
1486 return (error);
1487 }
1488
1489 void
1490 ex_getstats(struct ex_softc *sc)
1491 {
1492 bus_space_handle_t ioh = sc->sc_ioh;
1493 bus_space_tag_t iot = sc->sc_iot;
1494 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1495 uint8_t upperok;
1496
1497 GO_WINDOW(6);
1498 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1499 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1500 ifp->if_ipackets += (upperok & 0x03) << 8;
1501 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1502 ifp->if_opackets += (upperok & 0x30) << 4;
1503 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1504 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1505 /*
1506 * There seems to be no way to get the exact number of collisions,
1507 * this is the number that occurred at the very least.
1508 */
1509 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1510 TX_AFTER_X_COLLISIONS);
1511 /*
1512 * Interface byte counts are counted by ether_input() and
1513 * ether_output(), so don't accumulate them here. Just
1514 * read the NIC counters so they don't generate overflow interrupts.
1515 * Upper byte counters are latched from reading the totals, so
1516 * they don't need to be read if we don't need their values.
1517 */
1518 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1519 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1520
1521 /*
1522 * Clear the following to avoid stats overflow interrupts
1523 */
1524 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1525 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1526 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1527 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1528 GO_WINDOW(4);
1529 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1530 GO_WINDOW(1);
1531 }
1532
1533 void
1534 ex_printstats(struct ex_softc *sc)
1535 {
1536 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1537
1538 ex_getstats(sc);
1539 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1540 "%llu\n", (unsigned long long)ifp->if_ipackets,
1541 (unsigned long long)ifp->if_opackets,
1542 (unsigned long long)ifp->if_ierrors,
1543 (unsigned long long)ifp->if_oerrors,
1544 (unsigned long long)ifp->if_ibytes,
1545 (unsigned long long)ifp->if_obytes);
1546 }
1547
1548 void
1549 ex_tick(void *arg)
1550 {
1551 struct ex_softc *sc = arg;
1552 int s;
1553
1554 if (!device_is_active(sc->sc_dev))
1555 return;
1556
1557 s = splnet();
1558
1559 if (sc->ex_conf & EX_CONF_MII)
1560 mii_tick(&sc->ex_mii);
1561
1562 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1563 & COMMAND_IN_PROGRESS))
1564 ex_getstats(sc);
1565
1566 splx(s);
1567
1568 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1569 }
1570
1571 void
1572 ex_reset(struct ex_softc *sc)
1573 {
1574 uint16_t val = GLOBAL_RESET;
1575
1576 if (sc->ex_conf & EX_CONF_RESETHACK)
1577 val |= 0x10;
1578 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1579 /*
1580 * XXX apparently the command in progress bit can't be trusted
1581 * during a reset, so we just always wait this long. Fortunately
1582 * we normally only reset the chip during autoconfig.
1583 */
1584 delay(100000);
1585 ex_waitcmd(sc);
1586 }
1587
1588 void
1589 ex_watchdog(struct ifnet *ifp)
1590 {
1591 struct ex_softc *sc = ifp->if_softc;
1592
1593 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1594 ++sc->sc_ethercom.ec_if.if_oerrors;
1595
1596 ex_reset(sc);
1597 ex_init(ifp);
1598 }
1599
1600 void
1601 ex_stop(struct ifnet *ifp, int disable)
1602 {
1603 struct ex_softc *sc = ifp->if_softc;
1604 bus_space_tag_t iot = sc->sc_iot;
1605 bus_space_handle_t ioh = sc->sc_ioh;
1606 struct ex_txdesc *tx;
1607 struct ex_rxdesc *rx;
1608 int i;
1609
1610 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1611 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1612 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1613
1614 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1615 if (tx->tx_mbhead == NULL)
1616 continue;
1617 m_freem(tx->tx_mbhead);
1618 tx->tx_mbhead = NULL;
1619 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1620 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1621 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1622 ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1623 sizeof (struct ex_dpd),
1624 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1625 }
1626 sc->tx_tail = sc->tx_head = NULL;
1627 ex_init_txdescs(sc);
1628
1629 sc->rx_tail = sc->rx_head = 0;
1630 for (i = 0; i < EX_NUPD; i++) {
1631 rx = &sc->sc_rxdescs[i];
1632 if (rx->rx_mbhead != NULL) {
1633 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1634 m_freem(rx->rx_mbhead);
1635 rx->rx_mbhead = NULL;
1636 }
1637 ex_add_rxbuf(sc, rx);
1638 }
1639
1640 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1641
1642 callout_stop(&sc->ex_mii_callout);
1643 if (sc->ex_conf & EX_CONF_MII)
1644 mii_down(&sc->ex_mii);
1645
1646 if (disable)
1647 ex_disable(sc);
1648
1649 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1650 sc->sc_if_flags = ifp->if_flags;
1651 ifp->if_timer = 0;
1652 }
1653
1654 static void
1655 ex_init_txdescs(struct ex_softc *sc)
1656 {
1657 int i;
1658
1659 for (i = 0; i < EX_NDPD; i++) {
1660 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1661 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1662 if (i < EX_NDPD - 1)
1663 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1664 else
1665 sc->sc_txdescs[i].tx_next = NULL;
1666 }
1667 sc->tx_free = &sc->sc_txdescs[0];
1668 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1669 }
1670
1671
1672 int
1673 ex_activate(device_t self, enum devact act)
1674 {
1675 struct ex_softc *sc = device_private(self);
1676
1677 switch (act) {
1678 case DVACT_DEACTIVATE:
1679 if_deactivate(&sc->sc_ethercom.ec_if);
1680 return 0;
1681 default:
1682 return EOPNOTSUPP;
1683 }
1684 }
1685
1686 int
1687 ex_detach(struct ex_softc *sc)
1688 {
1689 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1690 struct ex_rxdesc *rxd;
1691 int i;
1692
1693 /* Succeed now if there's no work to do. */
1694 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1695 return (0);
1696
1697 /* Unhook our tick handler. */
1698 callout_stop(&sc->ex_mii_callout);
1699
1700 if (sc->ex_conf & EX_CONF_MII) {
1701 /* Detach all PHYs */
1702 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1703 }
1704
1705 /* Delete all remaining media. */
1706 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1707
1708 #if NRND > 0
1709 rnd_detach_source(&sc->rnd_source);
1710 #endif
1711 ether_ifdetach(ifp);
1712 if_detach(ifp);
1713
1714 for (i = 0; i < EX_NUPD; i++) {
1715 rxd = &sc->sc_rxdescs[i];
1716 if (rxd->rx_mbhead != NULL) {
1717 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1718 m_freem(rxd->rx_mbhead);
1719 rxd->rx_mbhead = NULL;
1720 }
1721 }
1722 for (i = 0; i < EX_NUPD; i++)
1723 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1724 for (i = 0; i < EX_NDPD; i++)
1725 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1726 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1727 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1728 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1729 EX_NDPD * sizeof (struct ex_dpd));
1730 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1731 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1732 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1733 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1734 EX_NUPD * sizeof (struct ex_upd));
1735 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1736
1737 pmf_device_deregister(sc->sc_dev);
1738
1739 return (0);
1740 }
1741
1742 /*
1743 * Before reboots, reset card completely.
1744 */
1745 static bool
1746 ex_shutdown(device_t self, int flags)
1747 {
1748 struct ex_softc *sc = device_private(self);
1749
1750 ex_stop(&sc->sc_ethercom.ec_if, 1);
1751 /*
1752 * Make sure the interface is powered up when we reboot,
1753 * otherwise firmware on some systems gets really confused.
1754 */
1755 (void) ex_enable(sc);
1756 return true;
1757 }
1758
1759 /*
1760 * Read EEPROM data.
1761 * XXX what to do if EEPROM doesn't unbusy?
1762 */
1763 uint16_t
1764 ex_read_eeprom(struct ex_softc *sc, int offset)
1765 {
1766 bus_space_tag_t iot = sc->sc_iot;
1767 bus_space_handle_t ioh = sc->sc_ioh;
1768 uint16_t data = 0, cmd = READ_EEPROM;
1769 int off;
1770
1771 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1772 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1773
1774 GO_WINDOW(0);
1775 if (ex_eeprom_busy(sc))
1776 goto out;
1777 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1778 cmd | (off + (offset & 0x3f)));
1779 if (ex_eeprom_busy(sc))
1780 goto out;
1781 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1782 out:
1783 return data;
1784 }
1785
1786 static int
1787 ex_eeprom_busy(struct ex_softc *sc)
1788 {
1789 bus_space_tag_t iot = sc->sc_iot;
1790 bus_space_handle_t ioh = sc->sc_ioh;
1791 int i = 100;
1792
1793 while (i--) {
1794 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1795 EEPROM_BUSY))
1796 return 0;
1797 delay(100);
1798 }
1799 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n");
1800 return (1);
1801 }
1802
1803 /*
1804 * Create a new rx buffer and add it to the 'soft' rx list.
1805 */
1806 static int
1807 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd)
1808 {
1809 struct mbuf *m, *oldm;
1810 bus_dmamap_t rxmap;
1811 int error, rval = 0;
1812
1813 oldm = rxd->rx_mbhead;
1814 rxmap = rxd->rx_dmamap;
1815
1816 MGETHDR(m, M_DONTWAIT, MT_DATA);
1817 if (m != NULL) {
1818 MCLGET(m, M_DONTWAIT);
1819 if ((m->m_flags & M_EXT) == 0) {
1820 m_freem(m);
1821 if (oldm == NULL)
1822 return 1;
1823 m = oldm;
1824 MRESETDATA(m);
1825 rval = 1;
1826 }
1827 } else {
1828 if (oldm == NULL)
1829 return 1;
1830 m = oldm;
1831 MRESETDATA(m);
1832 rval = 1;
1833 }
1834
1835 /*
1836 * Setup the DMA map for this receive buffer.
1837 */
1838 if (m != oldm) {
1839 if (oldm != NULL)
1840 bus_dmamap_unload(sc->sc_dmat, rxmap);
1841 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1842 m->m_ext.ext_buf, MCLBYTES, NULL,
1843 BUS_DMA_READ|BUS_DMA_NOWAIT);
1844 if (error) {
1845 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n",
1846 error);
1847 panic("ex_add_rxbuf"); /* XXX */
1848 }
1849 }
1850
1851 /*
1852 * Align for data after 14 byte header.
1853 */
1854 m->m_data += 2;
1855
1856 rxd->rx_mbhead = m;
1857 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1858 rxd->rx_upd->upd_frags[0].fr_addr =
1859 htole32(rxmap->dm_segs[0].ds_addr + 2);
1860 rxd->rx_upd->upd_nextptr = 0;
1861
1862 /*
1863 * Attach it to the end of the list.
1864 */
1865 if (sc->rx_head != NULL) {
1866 sc->rx_tail->rx_next = rxd;
1867 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1868 ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1869 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1870 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1871 sizeof (struct ex_upd),
1872 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1873 } else {
1874 sc->rx_head = rxd;
1875 }
1876 sc->rx_tail = rxd;
1877
1878 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1879 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1880 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1881 ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1882 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1883 return (rval);
1884 }
1885
1886 uint32_t
1887 ex_mii_bitbang_read(device_t self)
1888 {
1889 struct ex_softc *sc = device_private(self);
1890
1891 /* We're already in Window 4. */
1892 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1893 }
1894
1895 void
1896 ex_mii_bitbang_write(device_t self, uint32_t val)
1897 {
1898 struct ex_softc *sc = device_private(self);
1899
1900 /* We're already in Window 4. */
1901 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1902 }
1903
1904 int
1905 ex_mii_readreg(device_t v, int phy, int reg)
1906 {
1907 struct ex_softc *sc = device_private(v);
1908 int val;
1909
1910 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1911 return 0;
1912
1913 GO_WINDOW(4);
1914
1915 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1916
1917 GO_WINDOW(1);
1918
1919 return (val);
1920 }
1921
1922 void
1923 ex_mii_writereg(device_t v, int phy, int reg, int data)
1924 {
1925 struct ex_softc *sc = device_private(v);
1926
1927 GO_WINDOW(4);
1928
1929 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1930
1931 GO_WINDOW(1);
1932 }
1933
1934 void
1935 ex_mii_statchg(device_t v)
1936 {
1937 struct ex_softc *sc = device_private(v);
1938 bus_space_tag_t iot = sc->sc_iot;
1939 bus_space_handle_t ioh = sc->sc_ioh;
1940 int mctl;
1941
1942 GO_WINDOW(3);
1943 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1944 if (sc->ex_mii.mii_media_active & IFM_FDX)
1945 mctl |= MAC_CONTROL_FDX;
1946 else
1947 mctl &= ~MAC_CONTROL_FDX;
1948 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1949 GO_WINDOW(1); /* back to operating window */
1950 }
1951
1952 int
1953 ex_enable(struct ex_softc *sc)
1954 {
1955 if (sc->enabled == 0 && sc->enable != NULL) {
1956 if ((*sc->enable)(sc) != 0) {
1957 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1958 return (EIO);
1959 }
1960 sc->enabled = 1;
1961 }
1962 return (0);
1963 }
1964
1965 void
1966 ex_disable(struct ex_softc *sc)
1967 {
1968 if (sc->enabled == 1 && sc->disable != NULL) {
1969 (*sc->disable)(sc);
1970 sc->enabled = 0;
1971 }
1972 }
1973
1974