elinkxl.c revision 1.107 1 /* $NetBSD: elinkxl.c,v 1.107 2008/12/03 15:36:11 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Frank van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.107 2008/12/03 15:36:11 tsutsui Exp $");
34
35 #include "bpfilter.h"
36 #include "rnd.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/callout.h>
41 #include <sys/kernel.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/ioctl.h>
45 #include <sys/errno.h>
46 #include <sys/syslog.h>
47 #include <sys/select.h>
48 #include <sys/device.h>
49 #if NRND > 0
50 #include <sys/rnd.h>
51 #endif
52
53 #include <uvm/uvm_extern.h>
54
55 #include <net/if.h>
56 #include <net/if_dl.h>
57 #include <net/if_ether.h>
58 #include <net/if_media.h>
59
60 #if NBPFILTER > 0
61 #include <net/bpf.h>
62 #include <net/bpfdesc.h>
63 #endif
64
65 #include <sys/cpu.h>
66 #include <sys/bus.h>
67 #include <sys/intr.h>
68 #include <machine/endian.h>
69
70 #include <dev/mii/miivar.h>
71 #include <dev/mii/mii.h>
72 #include <dev/mii/mii_bitbang.h>
73
74 #include <dev/ic/elink3reg.h>
75 /* #include <dev/ic/elink3var.h> */
76 #include <dev/ic/elinkxlreg.h>
77 #include <dev/ic/elinkxlvar.h>
78
79 #ifdef DEBUG
80 int exdebug = 0;
81 #endif
82
83 /* ifmedia callbacks */
84 int ex_media_chg(struct ifnet *ifp);
85 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
86
87 static int ex_ifflags_cb(struct ethercom *);
88
89 void ex_probe_media(struct ex_softc *);
90 void ex_set_filter(struct ex_softc *);
91 void ex_set_media(struct ex_softc *);
92 void ex_set_xcvr(struct ex_softc *, uint16_t);
93 struct mbuf *ex_get(struct ex_softc *, int);
94 uint16_t ex_read_eeprom(struct ex_softc *, int);
95 int ex_init(struct ifnet *);
96 void ex_read(struct ex_softc *);
97 void ex_reset(struct ex_softc *);
98 void ex_set_mc(struct ex_softc *);
99 void ex_getstats(struct ex_softc *);
100 void ex_printstats(struct ex_softc *);
101 void ex_tick(void *);
102
103 static int ex_eeprom_busy(struct ex_softc *);
104 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
105 static void ex_init_txdescs(struct ex_softc *);
106
107 static void ex_setup_tx(struct ex_softc *);
108 static bool ex_shutdown(device_t, int);
109 static void ex_start(struct ifnet *);
110 static void ex_txstat(struct ex_softc *);
111
112 int ex_mii_readreg(device_t, int, int);
113 void ex_mii_writereg(device_t, int, int, int);
114 void ex_mii_statchg(device_t);
115
116 void ex_probemedia(struct ex_softc *);
117
118 /*
119 * Structure to map media-present bits in boards to ifmedia codes and
120 * printable media names. Used for table-driven ifmedia initialization.
121 */
122 struct ex_media {
123 int exm_mpbit; /* media present bit */
124 const char *exm_name; /* name of medium */
125 int exm_ifmedia; /* ifmedia word for medium */
126 int exm_epmedia; /* ELINKMEDIA_* constant */
127 };
128
129 /*
130 * Media table for 3c90x chips. Note that chips with MII have no
131 * `native' media.
132 */
133 struct ex_media ex_native_media[] = {
134 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T,
135 ELINKMEDIA_10BASE_T },
136 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX,
137 ELINKMEDIA_10BASE_T },
138 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5,
139 ELINKMEDIA_AUI },
140 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2,
141 ELINKMEDIA_10BASE_2 },
142 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX,
143 ELINKMEDIA_100BASE_TX },
144 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
145 ELINKMEDIA_100BASE_TX },
146 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX,
147 ELINKMEDIA_100BASE_FX },
148 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL,
149 ELINKMEDIA_MII },
150 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4,
151 ELINKMEDIA_100BASE_T4 },
152 { 0, NULL, 0,
153 0 },
154 };
155
156 /*
157 * MII bit-bang glue.
158 */
159 uint32_t ex_mii_bitbang_read(device_t);
160 void ex_mii_bitbang_write(device_t, uint32_t);
161
162 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
163 ex_mii_bitbang_read,
164 ex_mii_bitbang_write,
165 {
166 ELINK_PHY_DATA, /* MII_BIT_MDO */
167 ELINK_PHY_DATA, /* MII_BIT_MDI */
168 ELINK_PHY_CLK, /* MII_BIT_MDC */
169 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */
170 0, /* MII_BIT_DIR_PHY_HOST */
171 }
172 };
173
174 /*
175 * Back-end attach and configure.
176 */
177 void
178 ex_config(struct ex_softc *sc)
179 {
180 struct ifnet *ifp;
181 uint16_t val;
182 uint8_t macaddr[ETHER_ADDR_LEN] = {0};
183 bus_space_tag_t iot = sc->sc_iot;
184 bus_space_handle_t ioh = sc->sc_ioh;
185 int i, error, attach_stage;
186
187 callout_init(&sc->ex_mii_callout, 0);
188
189 ex_reset(sc);
190
191 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
192 macaddr[0] = val >> 8;
193 macaddr[1] = val & 0xff;
194 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
195 macaddr[2] = val >> 8;
196 macaddr[3] = val & 0xff;
197 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
198 macaddr[4] = val >> 8;
199 macaddr[5] = val & 0xff;
200
201 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr));
202
203 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
204 GO_WINDOW(2);
205 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
206 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
207 val |= ELINK_RESET_OPT_LEDPOLAR;
208 if (sc->ex_conf & EX_CONF_PHY_POWER)
209 val |= ELINK_RESET_OPT_PHYPOWER;
210 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
211 }
212 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
213 GO_WINDOW(0);
214 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
215 EX_XCVR_PWR_MAGICBITS);
216 }
217
218 attach_stage = 0;
219
220 /*
221 * Allocate the upload descriptors, and create and load the DMA
222 * map for them.
223 */
224 if ((error = bus_dmamem_alloc(sc->sc_dmat,
225 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
226 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
227 aprint_error_dev(sc->sc_dev,
228 "can't allocate upload descriptors, error = %d\n", error);
229 goto fail;
230 }
231
232 attach_stage = 1;
233
234 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
235 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
236 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
237 aprint_error_dev(sc->sc_dev,
238 "can't map upload descriptors, error = %d\n", error);
239 goto fail;
240 }
241
242 attach_stage = 2;
243
244 if ((error = bus_dmamap_create(sc->sc_dmat,
245 EX_NUPD * sizeof (struct ex_upd), 1,
246 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
247 &sc->sc_upd_dmamap)) != 0) {
248 aprint_error_dev(sc->sc_dev,
249 "can't create upload desc. DMA map, error = %d\n", error);
250 goto fail;
251 }
252
253 attach_stage = 3;
254
255 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
256 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
257 BUS_DMA_NOWAIT)) != 0) {
258 aprint_error_dev(sc->sc_dev,
259 "can't load upload desc. DMA map, error = %d\n", error);
260 goto fail;
261 }
262
263 attach_stage = 4;
264
265 /*
266 * Allocate the download descriptors, and create and load the DMA
267 * map for them.
268 */
269 if ((error = bus_dmamem_alloc(sc->sc_dmat,
270 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
271 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
272 aprint_error_dev(sc->sc_dev,
273 "can't allocate download descriptors, error = %d\n", error);
274 goto fail;
275 }
276
277 attach_stage = 5;
278
279 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
280 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
281 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
282 aprint_error_dev(sc->sc_dev,
283 "can't map download descriptors, error = %d\n", error);
284 goto fail;
285 }
286 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
287
288 attach_stage = 6;
289
290 if ((error = bus_dmamap_create(sc->sc_dmat,
291 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
292 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
293 &sc->sc_dpd_dmamap)) != 0) {
294 aprint_error_dev(sc->sc_dev,
295 "can't create download desc. DMA map, error = %d\n", error);
296 goto fail;
297 }
298
299 attach_stage = 7;
300
301 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
302 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
303 BUS_DMA_NOWAIT)) != 0) {
304 aprint_error_dev(sc->sc_dev,
305 "can't load download desc. DMA map, error = %d\n", error);
306 goto fail;
307 }
308 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
309 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
310
311 attach_stage = 8;
312
313
314 /*
315 * Create the transmit buffer DMA maps.
316 */
317 for (i = 0; i < EX_NDPD; i++) {
318 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
319 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
320 &sc->sc_tx_dmamaps[i])) != 0) {
321 aprint_error_dev(sc->sc_dev,
322 "can't create tx DMA map %d, error = %d\n",
323 i, error);
324 goto fail;
325 }
326 }
327
328 attach_stage = 9;
329
330 /*
331 * Create the receive buffer DMA maps.
332 */
333 for (i = 0; i < EX_NUPD; i++) {
334 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
335 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
336 &sc->sc_rx_dmamaps[i])) != 0) {
337 aprint_error_dev(sc->sc_dev,
338 "can't create rx DMA map %d, error = %d\n",
339 i, error);
340 goto fail;
341 }
342 }
343
344 attach_stage = 10;
345
346 /*
347 * Create ring of upload descriptors, only once. The DMA engine
348 * will loop over this when receiving packets, stalling if it
349 * hits an UPD with a finished receive.
350 */
351 for (i = 0; i < EX_NUPD; i++) {
352 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
353 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
354 sc->sc_upd[i].upd_frags[0].fr_len =
355 htole32((MCLBYTES - 2) | EX_FR_LAST);
356 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
357 aprint_error_dev(sc->sc_dev,
358 "can't allocate or map rx buffers\n");
359 goto fail;
360 }
361 }
362
363 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
364 EX_NUPD * sizeof (struct ex_upd),
365 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
366
367 ex_init_txdescs(sc);
368
369 attach_stage = 11;
370
371
372 GO_WINDOW(3);
373 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
374 if (val & ELINK_MEDIACAP_MII)
375 sc->ex_conf |= EX_CONF_MII;
376
377 ifp = &sc->sc_ethercom.ec_if;
378
379 /*
380 * Initialize our media structures and MII info. We'll
381 * probe the MII if we discover that we have one.
382 */
383 sc->ex_mii.mii_ifp = ifp;
384 sc->ex_mii.mii_readreg = ex_mii_readreg;
385 sc->ex_mii.mii_writereg = ex_mii_writereg;
386 sc->ex_mii.mii_statchg = ex_mii_statchg;
387 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
388 ex_media_stat);
389
390 if (sc->ex_conf & EX_CONF_MII) {
391 /*
392 * Find PHY, extract media information from it.
393 * First, select the right transceiver.
394 */
395 ex_set_xcvr(sc, val);
396
397 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff,
398 MII_PHY_ANY, MII_OFFSET_ANY, 0);
399 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
400 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
401 0, NULL);
402 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
403 } else {
404 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
405 }
406 } else
407 ex_probemedia(sc);
408
409 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
410 ifp->if_softc = sc;
411 ifp->if_start = ex_start;
412 ifp->if_ioctl = ex_ioctl;
413 ifp->if_watchdog = ex_watchdog;
414 ifp->if_init = ex_init;
415 ifp->if_stop = ex_stop;
416 ifp->if_flags =
417 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
418 sc->sc_if_flags = ifp->if_flags;
419 IFQ_SET_READY(&ifp->if_snd);
420
421 /*
422 * We can support 802.1Q VLAN-sized frames.
423 */
424 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
425
426 /*
427 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
428 */
429 if (sc->ex_conf & EX_CONF_90XB)
430 sc->sc_ethercom.ec_if.if_capabilities |=
431 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
432 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
433 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
434
435 if_attach(ifp);
436 ether_ifattach(ifp, macaddr);
437 ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb);
438
439 GO_WINDOW(1);
440
441 sc->tx_start_thresh = 20;
442 sc->tx_succ_ok = 0;
443
444 /* TODO: set queues to 0 */
445
446 #if NRND > 0
447 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
448 RND_TYPE_NET, 0);
449 #endif
450
451 if (!pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown))
452 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
453 else
454 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
455
456 /* The attach is successful. */
457 sc->ex_flags |= EX_FLAGS_ATTACHED;
458 return;
459
460 fail:
461 /*
462 * Free any resources we've allocated during the failed attach
463 * attempt. Do this in reverse order and fall though.
464 */
465 switch (attach_stage) {
466 case 11:
467 {
468 struct ex_rxdesc *rxd;
469
470 for (i = 0; i < EX_NUPD; i++) {
471 rxd = &sc->sc_rxdescs[i];
472 if (rxd->rx_mbhead != NULL) {
473 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
474 m_freem(rxd->rx_mbhead);
475 }
476 }
477 }
478 /* FALLTHROUGH */
479
480 case 10:
481 for (i = 0; i < EX_NUPD; i++)
482 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
483 /* FALLTHROUGH */
484
485 case 9:
486 for (i = 0; i < EX_NDPD; i++)
487 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
488 /* FALLTHROUGH */
489 case 8:
490 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
491 /* FALLTHROUGH */
492
493 case 7:
494 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
495 /* FALLTHROUGH */
496
497 case 6:
498 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
499 EX_NDPD * sizeof (struct ex_dpd));
500 /* FALLTHROUGH */
501
502 case 5:
503 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
504 break;
505
506 case 4:
507 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
508 /* FALLTHROUGH */
509
510 case 3:
511 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
512 /* FALLTHROUGH */
513
514 case 2:
515 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
516 EX_NUPD * sizeof (struct ex_upd));
517 /* FALLTHROUGH */
518
519 case 1:
520 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
521 break;
522 }
523
524 }
525
526 /*
527 * Find the media present on non-MII chips.
528 */
529 void
530 ex_probemedia(struct ex_softc *sc)
531 {
532 bus_space_tag_t iot = sc->sc_iot;
533 bus_space_handle_t ioh = sc->sc_ioh;
534 struct ifmedia *ifm = &sc->ex_mii.mii_media;
535 struct ex_media *exm;
536 uint16_t config1, reset_options, default_media;
537 int defmedia = 0;
538 const char *sep = "", *defmedianame = NULL;
539
540 GO_WINDOW(3);
541 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
542 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
543 GO_WINDOW(0);
544
545 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
546
547 /* Sanity check that there are any media! */
548 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
549 aprint_error_dev(sc->sc_dev, "no media present!\n");
550 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
551 ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
552 return;
553 }
554
555 aprint_normal_dev(sc->sc_dev, "");
556
557 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", "
558
559 for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
560 if (reset_options & exm->exm_mpbit) {
561 /*
562 * Default media is a little complicated. We
563 * support full-duplex which uses the same
564 * reset options bit.
565 *
566 * XXX Check EEPROM for default to FDX?
567 */
568 if (exm->exm_epmedia == default_media) {
569 if ((exm->exm_ifmedia & IFM_FDX) == 0) {
570 defmedia = exm->exm_ifmedia;
571 defmedianame = exm->exm_name;
572 }
573 } else if (defmedia == 0) {
574 defmedia = exm->exm_ifmedia;
575 defmedianame = exm->exm_name;
576 }
577 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
578 NULL);
579 PRINT(exm->exm_name);
580 }
581 }
582
583 #undef PRINT
584
585 #ifdef DIAGNOSTIC
586 if (defmedia == 0)
587 panic("ex_probemedia: impossible");
588 #endif
589
590 aprint_normal(", default %s\n", defmedianame);
591 ifmedia_set(ifm, defmedia);
592 }
593
594 /*
595 * Setup transmitter parameters.
596 */
597 static void
598 ex_setup_tx(struct ex_softc *sc)
599 {
600 bus_space_tag_t iot = sc->sc_iot;
601 bus_space_handle_t ioh = sc->sc_ioh;
602
603 /*
604 * Disable reclaim threshold for 90xB, set free threshold to
605 * 6 * 256 = 1536 for 90x.
606 */
607 if (sc->ex_conf & EX_CONF_90XB)
608 bus_space_write_2(iot, ioh, ELINK_COMMAND,
609 ELINK_TXRECLTHRESH | 255);
610 else
611 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
612
613 /* Setup early transmission start threshold. */
614 bus_space_write_2(iot, ioh, ELINK_COMMAND,
615 ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
616 }
617
618 /*
619 * Bring device up.
620 */
621 int
622 ex_init(struct ifnet *ifp)
623 {
624 struct ex_softc *sc = ifp->if_softc;
625 bus_space_tag_t iot = sc->sc_iot;
626 bus_space_handle_t ioh = sc->sc_ioh;
627 int i;
628 uint16_t val;
629 int error = 0;
630
631 if ((error = ex_enable(sc)) != 0)
632 goto out;
633
634 ex_waitcmd(sc);
635 ex_stop(ifp, 0);
636
637 GO_WINDOW(2);
638
639 /* Turn on PHY power. */
640 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
641 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
642 if (sc->ex_conf & EX_CONF_PHY_POWER)
643 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
644 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
645 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
646 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
647 }
648
649 /*
650 * Set the station address and clear the station mask. The latter
651 * is needed for 90x cards, 0 is the default for 90xB cards.
652 */
653 for (i = 0; i < ETHER_ADDR_LEN; i++) {
654 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
655 CLLADDR(ifp->if_sadl)[i]);
656 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
657 }
658
659 GO_WINDOW(3);
660
661 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
662 ex_waitcmd(sc);
663 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
664 ex_waitcmd(sc);
665
666 /* Load Tx parameters. */
667 ex_setup_tx(sc);
668
669 bus_space_write_2(iot, ioh, ELINK_COMMAND,
670 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
671
672 bus_space_write_4(iot, ioh, ELINK_DMACTRL,
673 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
674
675 bus_space_write_2(iot, ioh, ELINK_COMMAND,
676 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
677 bus_space_write_2(iot, ioh, ELINK_COMMAND,
678 SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
679
680 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
681 if (sc->intr_ack)
682 (* sc->intr_ack)(sc);
683 ex_set_media(sc);
684 ex_set_mc(sc);
685
686
687 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
688 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
689 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
690 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
691 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
692
693 ifp->if_flags |= IFF_RUNNING;
694 ifp->if_flags &= ~IFF_OACTIVE;
695 ex_start(ifp);
696 sc->sc_if_flags = ifp->if_flags;
697
698 GO_WINDOW(1);
699
700 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
701
702 out:
703 if (error) {
704 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
705 ifp->if_timer = 0;
706 aprint_error_dev(sc->sc_dev, "interface not running\n");
707 }
708 return (error);
709 }
710
711 #define MCHASHSIZE 256
712 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \
713 (MCHASHSIZE - 1))
714
715 /*
716 * Set multicast receive filter. Also take care of promiscuous mode
717 * here (XXX).
718 */
719 void
720 ex_set_mc(struct ex_softc *sc)
721 {
722 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
723 struct ethercom *ec = &sc->sc_ethercom;
724 struct ether_multi *enm;
725 struct ether_multistep estep;
726 int i;
727 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
728
729 if (ifp->if_flags & IFF_PROMISC) {
730 mask |= FIL_PROMISC;
731 goto allmulti;
732 }
733
734 ETHER_FIRST_MULTI(estep, ec, enm);
735 if (enm == NULL)
736 goto nomulti;
737
738 if ((sc->ex_conf & EX_CONF_90XB) == 0)
739 /* No multicast hash filtering. */
740 goto allmulti;
741
742 for (i = 0; i < MCHASHSIZE; i++)
743 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
744 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
745
746 do {
747 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
748 ETHER_ADDR_LEN) != 0)
749 goto allmulti;
750
751 i = ex_mchash(enm->enm_addrlo);
752 bus_space_write_2(sc->sc_iot, sc->sc_ioh,
753 ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
754 ETHER_NEXT_MULTI(estep, enm);
755 } while (enm != NULL);
756 mask |= FIL_MULTIHASH;
757
758 nomulti:
759 ifp->if_flags &= ~IFF_ALLMULTI;
760 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
761 SET_RX_FILTER | mask);
762 return;
763
764 allmulti:
765 ifp->if_flags |= IFF_ALLMULTI;
766 mask |= FIL_MULTICAST;
767 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
768 SET_RX_FILTER | mask);
769 }
770
771
772 /*
773 * The Tx Complete interrupts occur only on errors,
774 * and this is the error handler.
775 */
776 static void
777 ex_txstat(struct ex_softc *sc)
778 {
779 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
780 bus_space_tag_t iot = sc->sc_iot;
781 bus_space_handle_t ioh = sc->sc_ioh;
782 int i, err = 0;
783
784 /*
785 * We need to read+write TX_STATUS until we get a 0 status
786 * in order to turn off the interrupt flag.
787 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
788 */
789 for (;;) {
790 i = bus_space_read_2(iot, ioh, ELINK_TIMER);
791 if ((i & TXS_COMPLETE) == 0)
792 break;
793 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
794 err |= i;
795 }
796 err &= ~TXS_TIMER;
797
798 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
799 || err == 0 /* should not happen, just in case */) {
800 /*
801 * Make sure the transmission is stopped.
802 */
803 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
804 for (i = 1000; i > 0; i--)
805 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
806 ELINK_DMAC_DNINPROG) == 0)
807 break;
808
809 /*
810 * Reset the transmitter.
811 */
812 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
813
814 /* Resetting takes a while and we will do more than wait. */
815
816 ifp->if_flags &= ~IFF_OACTIVE;
817 ++sc->sc_ethercom.ec_if.if_oerrors;
818 aprint_error_dev(sc->sc_dev, "%s%s%s",
819 (err & TXS_UNDERRUN) ? " transmit underrun" : "",
820 (err & TXS_JABBER) ? " jabber" : "",
821 (err & TXS_RECLAIM) ? " reclaim" : "");
822 if (err == 0)
823 aprint_error(" unknown Tx error");
824 printf(" (%x)", err);
825 if (err & TXS_UNDERRUN) {
826 aprint_error(" @%d", sc->tx_start_thresh);
827 if (sc->tx_succ_ok < 256 &&
828 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
829 > sc->tx_start_thresh) {
830 aprint_error(", new threshold is %d", i);
831 sc->tx_start_thresh = i;
832 }
833 sc->tx_succ_ok = 0;
834 }
835 aprint_error("\n");
836 if (err & TXS_MAX_COLLISION)
837 ++sc->sc_ethercom.ec_if.if_collisions;
838
839 /* Wait for TX_RESET to finish. */
840 ex_waitcmd(sc);
841
842 /* Reload Tx parameters. */
843 ex_setup_tx(sc);
844 } else {
845 if (err & TXS_MAX_COLLISION)
846 ++sc->sc_ethercom.ec_if.if_collisions;
847 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
848 }
849
850 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
851
852 /* Retransmit current packet if any. */
853 if (sc->tx_head) {
854 ifp->if_flags |= IFF_OACTIVE;
855 bus_space_write_2(iot, ioh, ELINK_COMMAND,
856 ELINK_DNUNSTALL);
857 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
858 DPD_DMADDR(sc, sc->tx_head));
859
860 /* Retrigger watchdog if stopped. */
861 if (ifp->if_timer == 0)
862 ifp->if_timer = 1;
863 }
864 }
865
866 int
867 ex_media_chg(struct ifnet *ifp)
868 {
869
870 if (ifp->if_flags & IFF_UP)
871 ex_init(ifp);
872 return 0;
873 }
874
875 void
876 ex_set_xcvr(struct ex_softc *sc, const uint16_t media)
877 {
878 bus_space_tag_t iot = sc->sc_iot;
879 bus_space_handle_t ioh = sc->sc_ioh;
880 uint32_t icfg;
881
882 /*
883 * We're already in Window 3
884 */
885 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
886 icfg &= ~(CONFIG_XCVR_SEL << 16);
887 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
888 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
889 if (media & ELINK_MEDIACAP_100BASETX)
890 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
891 if (media & ELINK_MEDIACAP_100BASEFX)
892 icfg |= ELINKMEDIA_100BASE_FX
893 << (CONFIG_XCVR_SEL_SHIFT + 16);
894 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
895 }
896
897 void
898 ex_set_media(struct ex_softc *sc)
899 {
900 bus_space_tag_t iot = sc->sc_iot;
901 bus_space_handle_t ioh = sc->sc_ioh;
902 uint32_t configreg;
903
904 if (((sc->ex_conf & EX_CONF_MII) &&
905 (sc->ex_mii.mii_media_active & IFM_FDX))
906 || (!(sc->ex_conf & EX_CONF_MII) &&
907 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
908 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
909 MAC_CONTROL_FDX);
910 } else {
911 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
912 }
913
914 /*
915 * If the device has MII, select it, and then tell the
916 * PHY which media to use.
917 */
918 if (sc->ex_conf & EX_CONF_MII) {
919 uint16_t val;
920
921 GO_WINDOW(3);
922 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
923 ex_set_xcvr(sc, val);
924 mii_mediachg(&sc->ex_mii);
925 return;
926 }
927
928 GO_WINDOW(4);
929 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
930 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
931 delay(800);
932
933 /*
934 * Now turn on the selected media/transceiver.
935 */
936 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
937 case IFM_10_T:
938 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
939 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
940 break;
941
942 case IFM_10_2:
943 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
944 DELAY(800);
945 break;
946
947 case IFM_100_TX:
948 case IFM_100_FX:
949 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
950 LINKBEAT_ENABLE);
951 DELAY(800);
952 break;
953
954 case IFM_10_5:
955 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
956 SQE_ENABLE);
957 DELAY(800);
958 break;
959
960 case IFM_MANUAL:
961 break;
962
963 case IFM_NONE:
964 return;
965
966 default:
967 panic("ex_set_media: impossible");
968 }
969
970 GO_WINDOW(3);
971 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
972
973 configreg &= ~(CONFIG_MEDIAMASK << 16);
974 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
975 (CONFIG_MEDIAMASK_SHIFT + 16));
976
977 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
978 }
979
980 /*
981 * Get currently-selected media from card.
982 * (if_media callback, may be called before interface is brought up).
983 */
984 void
985 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req)
986 {
987 struct ex_softc *sc = ifp->if_softc;
988 uint16_t help;
989
990 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
991 if (sc->ex_conf & EX_CONF_MII) {
992 mii_pollstat(&sc->ex_mii);
993 req->ifm_status = sc->ex_mii.mii_media_status;
994 req->ifm_active = sc->ex_mii.mii_media_active;
995 } else {
996 GO_WINDOW(4);
997 req->ifm_status = IFM_AVALID;
998 req->ifm_active =
999 sc->ex_mii.mii_media.ifm_cur->ifm_media;
1000 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
1001 ELINK_W4_MEDIA_TYPE);
1002 if (help & LINKBEAT_DETECT)
1003 req->ifm_status |= IFM_ACTIVE;
1004 GO_WINDOW(1);
1005 }
1006 }
1007 }
1008
1009
1010
1011 /*
1012 * Start outputting on the interface.
1013 */
1014 static void
1015 ex_start(struct ifnet *ifp)
1016 {
1017 struct ex_softc *sc = ifp->if_softc;
1018 bus_space_tag_t iot = sc->sc_iot;
1019 bus_space_handle_t ioh = sc->sc_ioh;
1020 volatile struct ex_fraghdr *fr = NULL;
1021 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1022 struct ex_txdesc *txp;
1023 struct mbuf *mb_head;
1024 bus_dmamap_t dmamap;
1025 int m_csumflags, offset, seglen, totlen, segment, error;
1026 uint32_t csum_flags;
1027
1028 if (sc->tx_head || sc->tx_free == NULL)
1029 return;
1030
1031 txp = NULL;
1032
1033 /*
1034 * We're finished if there is nothing more to add to the list or if
1035 * we're all filled up with buffers to transmit.
1036 */
1037 while (sc->tx_free != NULL) {
1038 /*
1039 * Grab a packet to transmit.
1040 */
1041 IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1042 if (mb_head == NULL)
1043 break;
1044
1045 /*
1046 * mb_head might be updated later,
1047 * so preserve csum_flags here.
1048 */
1049 m_csumflags = mb_head->m_pkthdr.csum_flags;
1050
1051 /*
1052 * Get pointer to next available tx desc.
1053 */
1054 txp = sc->tx_free;
1055 dmamap = txp->tx_dmamap;
1056
1057 /*
1058 * Go through each of the mbufs in the chain and initialize
1059 * the transmit buffer descriptors with the physical address
1060 * and size of the mbuf.
1061 */
1062 reload:
1063 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1064 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1065 switch (error) {
1066 case 0:
1067 /* Success. */
1068 break;
1069
1070 case EFBIG:
1071 {
1072 struct mbuf *mn;
1073
1074 /*
1075 * We ran out of segments. We have to recopy this
1076 * mbuf chain first. Bail out if we can't get the
1077 * new buffers.
1078 */
1079 aprint_error_dev(sc->sc_dev, "too many segments, ");
1080
1081 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1082 if (mn == NULL) {
1083 m_freem(mb_head);
1084 aprint_error("aborting\n");
1085 goto out;
1086 }
1087 if (mb_head->m_pkthdr.len > MHLEN) {
1088 MCLGET(mn, M_DONTWAIT);
1089 if ((mn->m_flags & M_EXT) == 0) {
1090 m_freem(mn);
1091 m_freem(mb_head);
1092 aprint_error("aborting\n");
1093 goto out;
1094 }
1095 }
1096 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1097 mtod(mn, void *));
1098 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1099 m_freem(mb_head);
1100 mb_head = mn;
1101 aprint_error("retrying\n");
1102 goto reload;
1103 }
1104
1105 default:
1106 /*
1107 * Some other problem; report it.
1108 */
1109 aprint_error_dev(sc->sc_dev,
1110 "can't load mbuf chain, error = %d\n", error);
1111 m_freem(mb_head);
1112 goto out;
1113 }
1114
1115 /*
1116 * remove our tx desc from freelist.
1117 */
1118 sc->tx_free = txp->tx_next;
1119 txp->tx_next = NULL;
1120
1121 fr = &txp->tx_dpd->dpd_frags[0];
1122 totlen = 0;
1123 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1124 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1125 seglen = dmamap->dm_segs[segment].ds_len;
1126 fr->fr_len = htole32(seglen);
1127 totlen += seglen;
1128 }
1129 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1130 (m_csumflags & M_CSUM_IPv4) != 0)) {
1131 /*
1132 * Pad short packets to avoid ip4csum-tx bug.
1133 *
1134 * XXX Should we still consider if such short
1135 * (36 bytes or less) packets might already
1136 * occupy EX_NTFRAG (== 32) fragments here?
1137 */
1138 KASSERT(segment < EX_NTFRAGS);
1139 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1140 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1141 fr->fr_len = htole32(EX_FR_LAST | seglen);
1142 totlen += seglen;
1143 } else {
1144 fr--;
1145 fr->fr_len |= htole32(EX_FR_LAST);
1146 }
1147 txp->tx_mbhead = mb_head;
1148
1149 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1150 BUS_DMASYNC_PREWRITE);
1151
1152 dpd = txp->tx_dpd;
1153 dpd->dpd_nextptr = 0;
1154 dpd->dpd_fsh = htole32(totlen);
1155
1156 /* Byte-swap constants so compiler can optimize. */
1157
1158 if (sc->ex_conf & EX_CONF_90XB) {
1159 csum_flags = 0;
1160
1161 if (m_csumflags & M_CSUM_IPv4)
1162 csum_flags |= htole32(EX_DPD_IPCKSUM);
1163
1164 if (m_csumflags & M_CSUM_TCPv4)
1165 csum_flags |= htole32(EX_DPD_TCPCKSUM);
1166 else if (m_csumflags & M_CSUM_UDPv4)
1167 csum_flags |= htole32(EX_DPD_UDPCKSUM);
1168
1169 dpd->dpd_fsh |= csum_flags;
1170 } else {
1171 KDASSERT((mb_head->m_pkthdr.csum_flags &
1172 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1173 }
1174
1175 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1176 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1177 sizeof (struct ex_dpd),
1178 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1179
1180 /*
1181 * No need to stall the download engine, we know it's
1182 * not busy right now.
1183 *
1184 * Fix up pointers in both the "soft" tx and the physical
1185 * tx list.
1186 */
1187 if (sc->tx_head != NULL) {
1188 prevdpd = sc->tx_tail->tx_dpd;
1189 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1190 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1191 offset, sizeof (struct ex_dpd),
1192 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1193 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1194 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1195 offset, sizeof (struct ex_dpd),
1196 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1197 sc->tx_tail->tx_next = txp;
1198 sc->tx_tail = txp;
1199 } else {
1200 sc->tx_tail = sc->tx_head = txp;
1201 }
1202
1203 #if NBPFILTER > 0
1204 /*
1205 * Pass packet to bpf if there is a listener.
1206 */
1207 if (ifp->if_bpf)
1208 bpf_mtap(ifp->if_bpf, mb_head);
1209 #endif
1210 }
1211 out:
1212 if (sc->tx_head) {
1213 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1214 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1215 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1216 sizeof (struct ex_dpd),
1217 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1218 ifp->if_flags |= IFF_OACTIVE;
1219 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1220 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1221 DPD_DMADDR(sc, sc->tx_head));
1222
1223 /* trigger watchdog */
1224 ifp->if_timer = 5;
1225 }
1226 }
1227
1228
1229 int
1230 ex_intr(void *arg)
1231 {
1232 struct ex_softc *sc = arg;
1233 bus_space_tag_t iot = sc->sc_iot;
1234 bus_space_handle_t ioh = sc->sc_ioh;
1235 uint16_t stat;
1236 int ret = 0;
1237 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1238
1239 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1240 !device_is_active(sc->sc_dev))
1241 return (0);
1242
1243 for (;;) {
1244 stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1245
1246 if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1247 if ((stat & INTR_LATCH) == 0) {
1248 #if 0
1249 aprint_error_dev(sc->sc_dev,
1250 "intr latch cleared\n");
1251 #endif
1252 break;
1253 }
1254 }
1255
1256 ret = 1;
1257
1258 /*
1259 * Acknowledge interrupts.
1260 */
1261 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1262 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1263 if (sc->intr_ack)
1264 (*sc->intr_ack)(sc);
1265
1266 if (stat & HOST_ERROR) {
1267 aprint_error_dev(sc->sc_dev,
1268 "adapter failure (%x)\n", stat);
1269 ex_reset(sc);
1270 ex_init(ifp);
1271 return 1;
1272 }
1273 if (stat & UPD_STATS) {
1274 ex_getstats(sc);
1275 }
1276 if (stat & TX_COMPLETE) {
1277 ex_txstat(sc);
1278 #if 0
1279 if (stat & DN_COMPLETE)
1280 aprint_error_dev(sc->sc_dev,
1281 "Ignoring Dn interrupt (%x)\n", stat);
1282 #endif
1283 /*
1284 * In some rare cases, both Tx Complete and
1285 * Dn Complete bits are set. However, the packet
1286 * has been reloaded in ex_txstat() and should not
1287 * handle the Dn Complete event here.
1288 * Hence the "else" below.
1289 */
1290 } else if (stat & DN_COMPLETE) {
1291 struct ex_txdesc *txp, *ptxp = NULL;
1292 bus_dmamap_t txmap;
1293
1294 /* reset watchdog timer, was set in ex_start() */
1295 ifp->if_timer = 0;
1296
1297 for (txp = sc->tx_head; txp != NULL;
1298 txp = txp->tx_next) {
1299 bus_dmamap_sync(sc->sc_dmat,
1300 sc->sc_dpd_dmamap,
1301 (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1302 sizeof (struct ex_dpd),
1303 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1304 if (txp->tx_mbhead != NULL) {
1305 txmap = txp->tx_dmamap;
1306 bus_dmamap_sync(sc->sc_dmat, txmap,
1307 0, txmap->dm_mapsize,
1308 BUS_DMASYNC_POSTWRITE);
1309 bus_dmamap_unload(sc->sc_dmat, txmap);
1310 m_freem(txp->tx_mbhead);
1311 txp->tx_mbhead = NULL;
1312 }
1313 ptxp = txp;
1314 }
1315
1316 /*
1317 * Move finished tx buffers back to the tx free list.
1318 */
1319 if (sc->tx_free) {
1320 sc->tx_ftail->tx_next = sc->tx_head;
1321 sc->tx_ftail = ptxp;
1322 } else
1323 sc->tx_ftail = sc->tx_free = sc->tx_head;
1324
1325 sc->tx_head = sc->tx_tail = NULL;
1326 ifp->if_flags &= ~IFF_OACTIVE;
1327
1328 if (sc->tx_succ_ok < 256)
1329 sc->tx_succ_ok++;
1330 }
1331
1332 if (stat & UP_COMPLETE) {
1333 struct ex_rxdesc *rxd;
1334 struct mbuf *m;
1335 struct ex_upd *upd;
1336 bus_dmamap_t rxmap;
1337 uint32_t pktstat;
1338
1339 rcvloop:
1340 rxd = sc->rx_head;
1341 rxmap = rxd->rx_dmamap;
1342 m = rxd->rx_mbhead;
1343 upd = rxd->rx_upd;
1344
1345 bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1346 rxmap->dm_mapsize,
1347 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1348 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1349 ((char *)upd - (char *)sc->sc_upd),
1350 sizeof (struct ex_upd),
1351 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1352 pktstat = le32toh(upd->upd_pktstatus);
1353
1354 if (pktstat & EX_UPD_COMPLETE) {
1355 /*
1356 * Remove first packet from the chain.
1357 */
1358 sc->rx_head = rxd->rx_next;
1359 rxd->rx_next = NULL;
1360
1361 /*
1362 * Add a new buffer to the receive chain.
1363 * If this fails, the old buffer is recycled
1364 * instead.
1365 */
1366 if (ex_add_rxbuf(sc, rxd) == 0) {
1367 uint16_t total_len;
1368
1369 if (pktstat &
1370 ((sc->sc_ethercom.ec_capenable &
1371 ETHERCAP_VLAN_MTU) ?
1372 EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1373 ifp->if_ierrors++;
1374 m_freem(m);
1375 goto rcvloop;
1376 }
1377
1378 total_len = pktstat & EX_UPD_PKTLENMASK;
1379 if (total_len <
1380 sizeof(struct ether_header)) {
1381 m_freem(m);
1382 goto rcvloop;
1383 }
1384 m->m_pkthdr.rcvif = ifp;
1385 m->m_pkthdr.len = m->m_len = total_len;
1386 #if NBPFILTER > 0
1387 if (ifp->if_bpf)
1388 bpf_mtap(ifp->if_bpf, m);
1389 #endif
1390 /*
1391 * Set the incoming checksum information for the packet.
1392 */
1393 if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1394 (pktstat & EX_UPD_IPCHECKED) != 0) {
1395 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1396 if (pktstat & EX_UPD_IPCKSUMERR)
1397 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1398 if (pktstat & EX_UPD_TCPCHECKED) {
1399 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1400 if (pktstat & EX_UPD_TCPCKSUMERR)
1401 m->m_pkthdr.csum_flags |=
1402 M_CSUM_TCP_UDP_BAD;
1403 } else if (pktstat & EX_UPD_UDPCHECKED) {
1404 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1405 if (pktstat & EX_UPD_UDPCKSUMERR)
1406 m->m_pkthdr.csum_flags |=
1407 M_CSUM_TCP_UDP_BAD;
1408 }
1409 }
1410 (*ifp->if_input)(ifp, m);
1411 }
1412 goto rcvloop;
1413 }
1414 /*
1415 * Just in case we filled up all UPDs and the DMA engine
1416 * stalled. We could be more subtle about this.
1417 */
1418 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1419 aprint_error_dev(sc->sc_dev,
1420 "uplistptr was 0\n");
1421 ex_init(ifp);
1422 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1423 & 0x2000) {
1424 aprint_error_dev(sc->sc_dev,
1425 "receive stalled\n");
1426 bus_space_write_2(iot, ioh, ELINK_COMMAND,
1427 ELINK_UPUNSTALL);
1428 }
1429 }
1430
1431 #if NRND > 0
1432 if (stat)
1433 rnd_add_uint32(&sc->rnd_source, stat);
1434 #endif
1435 }
1436
1437 /* no more interrupts */
1438 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0)
1439 ex_start(ifp);
1440 return ret;
1441 }
1442
1443 static int
1444 ex_ifflags_cb(struct ethercom *ec)
1445 {
1446 struct ifnet *ifp = &ec->ec_if;
1447 struct ex_softc *sc = ifp->if_softc;
1448 int change = ifp->if_flags ^ sc->sc_if_flags;
1449
1450 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
1451 return ENETRESET;
1452 else if ((change & IFF_PROMISC) != 0)
1453 ex_set_mc(sc);
1454 return 0;
1455 }
1456
1457 int
1458 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1459 {
1460 struct ex_softc *sc = ifp->if_softc;
1461 struct ifreq *ifr = (struct ifreq *)data;
1462 int s, error;
1463
1464 s = splnet();
1465
1466 switch (cmd) {
1467 case SIOCSIFMEDIA:
1468 case SIOCGIFMEDIA:
1469 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1470 break;
1471 default:
1472 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1473 break;
1474
1475 error = 0;
1476
1477 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1478 ;
1479 else if (ifp->if_flags & IFF_RUNNING) {
1480 /*
1481 * Multicast list has changed; set the hardware filter
1482 * accordingly.
1483 */
1484 ex_set_mc(sc);
1485 }
1486 break;
1487 }
1488
1489 sc->sc_if_flags = ifp->if_flags;
1490 splx(s);
1491 return (error);
1492 }
1493
1494 void
1495 ex_getstats(struct ex_softc *sc)
1496 {
1497 bus_space_handle_t ioh = sc->sc_ioh;
1498 bus_space_tag_t iot = sc->sc_iot;
1499 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1500 uint8_t upperok;
1501
1502 GO_WINDOW(6);
1503 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1504 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1505 ifp->if_ipackets += (upperok & 0x03) << 8;
1506 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1507 ifp->if_opackets += (upperok & 0x30) << 4;
1508 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1509 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1510 /*
1511 * There seems to be no way to get the exact number of collisions,
1512 * this is the number that occurred at the very least.
1513 */
1514 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1515 TX_AFTER_X_COLLISIONS);
1516 /*
1517 * Interface byte counts are counted by ether_input() and
1518 * ether_output(), so don't accumulate them here. Just
1519 * read the NIC counters so they don't generate overflow interrupts.
1520 * Upper byte counters are latched from reading the totals, so
1521 * they don't need to be read if we don't need their values.
1522 */
1523 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1524 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1525
1526 /*
1527 * Clear the following to avoid stats overflow interrupts
1528 */
1529 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1530 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1531 (void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1532 (void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1533 GO_WINDOW(4);
1534 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1535 GO_WINDOW(1);
1536 }
1537
1538 void
1539 ex_printstats(struct ex_softc *sc)
1540 {
1541 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1542
1543 ex_getstats(sc);
1544 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1545 "%llu\n", (unsigned long long)ifp->if_ipackets,
1546 (unsigned long long)ifp->if_opackets,
1547 (unsigned long long)ifp->if_ierrors,
1548 (unsigned long long)ifp->if_oerrors,
1549 (unsigned long long)ifp->if_ibytes,
1550 (unsigned long long)ifp->if_obytes);
1551 }
1552
1553 void
1554 ex_tick(void *arg)
1555 {
1556 struct ex_softc *sc = arg;
1557 int s;
1558
1559 if (!device_is_active(sc->sc_dev))
1560 return;
1561
1562 s = splnet();
1563
1564 if (sc->ex_conf & EX_CONF_MII)
1565 mii_tick(&sc->ex_mii);
1566
1567 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1568 & COMMAND_IN_PROGRESS))
1569 ex_getstats(sc);
1570
1571 splx(s);
1572
1573 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1574 }
1575
1576 void
1577 ex_reset(struct ex_softc *sc)
1578 {
1579 uint16_t val = GLOBAL_RESET;
1580
1581 if (sc->ex_conf & EX_CONF_RESETHACK)
1582 val |= 0x10;
1583 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1584 /*
1585 * XXX apparently the command in progress bit can't be trusted
1586 * during a reset, so we just always wait this long. Fortunately
1587 * we normally only reset the chip during autoconfig.
1588 */
1589 delay(100000);
1590 ex_waitcmd(sc);
1591 }
1592
1593 void
1594 ex_watchdog(struct ifnet *ifp)
1595 {
1596 struct ex_softc *sc = ifp->if_softc;
1597
1598 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1599 ++sc->sc_ethercom.ec_if.if_oerrors;
1600
1601 ex_reset(sc);
1602 ex_init(ifp);
1603 }
1604
1605 void
1606 ex_stop(struct ifnet *ifp, int disable)
1607 {
1608 struct ex_softc *sc = ifp->if_softc;
1609 bus_space_tag_t iot = sc->sc_iot;
1610 bus_space_handle_t ioh = sc->sc_ioh;
1611 struct ex_txdesc *tx;
1612 struct ex_rxdesc *rx;
1613 int i;
1614
1615 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1616 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1617 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1618
1619 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1620 if (tx->tx_mbhead == NULL)
1621 continue;
1622 m_freem(tx->tx_mbhead);
1623 tx->tx_mbhead = NULL;
1624 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1625 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1626 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1627 ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1628 sizeof (struct ex_dpd),
1629 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1630 }
1631 sc->tx_tail = sc->tx_head = NULL;
1632 ex_init_txdescs(sc);
1633
1634 sc->rx_tail = sc->rx_head = 0;
1635 for (i = 0; i < EX_NUPD; i++) {
1636 rx = &sc->sc_rxdescs[i];
1637 if (rx->rx_mbhead != NULL) {
1638 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1639 m_freem(rx->rx_mbhead);
1640 rx->rx_mbhead = NULL;
1641 }
1642 ex_add_rxbuf(sc, rx);
1643 }
1644
1645 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1646
1647 callout_stop(&sc->ex_mii_callout);
1648 if (sc->ex_conf & EX_CONF_MII)
1649 mii_down(&sc->ex_mii);
1650
1651 if (disable)
1652 ex_disable(sc);
1653
1654 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1655 sc->sc_if_flags = ifp->if_flags;
1656 ifp->if_timer = 0;
1657 }
1658
1659 static void
1660 ex_init_txdescs(struct ex_softc *sc)
1661 {
1662 int i;
1663
1664 for (i = 0; i < EX_NDPD; i++) {
1665 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1666 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1667 if (i < EX_NDPD - 1)
1668 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1669 else
1670 sc->sc_txdescs[i].tx_next = NULL;
1671 }
1672 sc->tx_free = &sc->sc_txdescs[0];
1673 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1674 }
1675
1676
1677 int
1678 ex_activate(device_t self, enum devact act)
1679 {
1680 struct ex_softc *sc = device_private(self);
1681 int s, error = 0;
1682
1683 s = splnet();
1684 switch (act) {
1685 case DVACT_ACTIVATE:
1686 error = EOPNOTSUPP;
1687 break;
1688
1689 case DVACT_DEACTIVATE:
1690 if (sc->ex_conf & EX_CONF_MII)
1691 mii_activate(&sc->ex_mii, act, MII_PHY_ANY,
1692 MII_OFFSET_ANY);
1693 if_deactivate(&sc->sc_ethercom.ec_if);
1694 break;
1695 }
1696 splx(s);
1697
1698 return (error);
1699 }
1700
1701 int
1702 ex_detach(struct ex_softc *sc)
1703 {
1704 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1705 struct ex_rxdesc *rxd;
1706 int i;
1707
1708 /* Succeed now if there's no work to do. */
1709 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1710 return (0);
1711
1712 /* Unhook our tick handler. */
1713 callout_stop(&sc->ex_mii_callout);
1714
1715 if (sc->ex_conf & EX_CONF_MII) {
1716 /* Detach all PHYs */
1717 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1718 }
1719
1720 /* Delete all remaining media. */
1721 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1722
1723 #if NRND > 0
1724 rnd_detach_source(&sc->rnd_source);
1725 #endif
1726 ether_ifdetach(ifp);
1727 if_detach(ifp);
1728
1729 for (i = 0; i < EX_NUPD; i++) {
1730 rxd = &sc->sc_rxdescs[i];
1731 if (rxd->rx_mbhead != NULL) {
1732 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1733 m_freem(rxd->rx_mbhead);
1734 rxd->rx_mbhead = NULL;
1735 }
1736 }
1737 for (i = 0; i < EX_NUPD; i++)
1738 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1739 for (i = 0; i < EX_NDPD; i++)
1740 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1741 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1742 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1743 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1744 EX_NDPD * sizeof (struct ex_dpd));
1745 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1746 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1747 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1748 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1749 EX_NUPD * sizeof (struct ex_upd));
1750 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1751
1752 pmf_device_deregister(sc->sc_dev);
1753
1754 return (0);
1755 }
1756
1757 /*
1758 * Before reboots, reset card completely.
1759 */
1760 static bool
1761 ex_shutdown(device_t self, int flags)
1762 {
1763 struct ex_softc *sc = device_private(self);
1764
1765 ex_stop(&sc->sc_ethercom.ec_if, 1);
1766 /*
1767 * Make sure the interface is powered up when we reboot,
1768 * otherwise firmware on some systems gets really confused.
1769 */
1770 (void) ex_enable(sc);
1771 return true;
1772 }
1773
1774 /*
1775 * Read EEPROM data.
1776 * XXX what to do if EEPROM doesn't unbusy?
1777 */
1778 uint16_t
1779 ex_read_eeprom(struct ex_softc *sc, int offset)
1780 {
1781 bus_space_tag_t iot = sc->sc_iot;
1782 bus_space_handle_t ioh = sc->sc_ioh;
1783 uint16_t data = 0, cmd = READ_EEPROM;
1784 int off;
1785
1786 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1787 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1788
1789 GO_WINDOW(0);
1790 if (ex_eeprom_busy(sc))
1791 goto out;
1792 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1793 cmd | (off + (offset & 0x3f)));
1794 if (ex_eeprom_busy(sc))
1795 goto out;
1796 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1797 out:
1798 return data;
1799 }
1800
1801 static int
1802 ex_eeprom_busy(struct ex_softc *sc)
1803 {
1804 bus_space_tag_t iot = sc->sc_iot;
1805 bus_space_handle_t ioh = sc->sc_ioh;
1806 int i = 100;
1807
1808 while (i--) {
1809 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1810 EEPROM_BUSY))
1811 return 0;
1812 delay(100);
1813 }
1814 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n");
1815 return (1);
1816 }
1817
1818 /*
1819 * Create a new rx buffer and add it to the 'soft' rx list.
1820 */
1821 static int
1822 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd)
1823 {
1824 struct mbuf *m, *oldm;
1825 bus_dmamap_t rxmap;
1826 int error, rval = 0;
1827
1828 oldm = rxd->rx_mbhead;
1829 rxmap = rxd->rx_dmamap;
1830
1831 MGETHDR(m, M_DONTWAIT, MT_DATA);
1832 if (m != NULL) {
1833 MCLGET(m, M_DONTWAIT);
1834 if ((m->m_flags & M_EXT) == 0) {
1835 m_freem(m);
1836 if (oldm == NULL)
1837 return 1;
1838 m = oldm;
1839 MRESETDATA(m);
1840 rval = 1;
1841 }
1842 } else {
1843 if (oldm == NULL)
1844 return 1;
1845 m = oldm;
1846 MRESETDATA(m);
1847 rval = 1;
1848 }
1849
1850 /*
1851 * Setup the DMA map for this receive buffer.
1852 */
1853 if (m != oldm) {
1854 if (oldm != NULL)
1855 bus_dmamap_unload(sc->sc_dmat, rxmap);
1856 error = bus_dmamap_load(sc->sc_dmat, rxmap,
1857 m->m_ext.ext_buf, MCLBYTES, NULL,
1858 BUS_DMA_READ|BUS_DMA_NOWAIT);
1859 if (error) {
1860 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n",
1861 error);
1862 panic("ex_add_rxbuf"); /* XXX */
1863 }
1864 }
1865
1866 /*
1867 * Align for data after 14 byte header.
1868 */
1869 m->m_data += 2;
1870
1871 rxd->rx_mbhead = m;
1872 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1873 rxd->rx_upd->upd_frags[0].fr_addr =
1874 htole32(rxmap->dm_segs[0].ds_addr + 2);
1875 rxd->rx_upd->upd_nextptr = 0;
1876
1877 /*
1878 * Attach it to the end of the list.
1879 */
1880 if (sc->rx_head != NULL) {
1881 sc->rx_tail->rx_next = rxd;
1882 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1883 ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1884 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1885 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1886 sizeof (struct ex_upd),
1887 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1888 } else {
1889 sc->rx_head = rxd;
1890 }
1891 sc->rx_tail = rxd;
1892
1893 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1894 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1895 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1896 ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1897 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1898 return (rval);
1899 }
1900
1901 uint32_t
1902 ex_mii_bitbang_read(device_t self)
1903 {
1904 struct ex_softc *sc = device_private(self);
1905
1906 /* We're already in Window 4. */
1907 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1908 }
1909
1910 void
1911 ex_mii_bitbang_write(device_t self, uint32_t val)
1912 {
1913 struct ex_softc *sc = device_private(self);
1914
1915 /* We're already in Window 4. */
1916 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1917 }
1918
1919 int
1920 ex_mii_readreg(device_t v, int phy, int reg)
1921 {
1922 struct ex_softc *sc = device_private(v);
1923 int val;
1924
1925 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1926 return 0;
1927
1928 GO_WINDOW(4);
1929
1930 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1931
1932 GO_WINDOW(1);
1933
1934 return (val);
1935 }
1936
1937 void
1938 ex_mii_writereg(device_t v, int phy, int reg, int data)
1939 {
1940 struct ex_softc *sc = device_private(v);
1941
1942 GO_WINDOW(4);
1943
1944 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1945
1946 GO_WINDOW(1);
1947 }
1948
1949 void
1950 ex_mii_statchg(device_t v)
1951 {
1952 struct ex_softc *sc = device_private(v);
1953 bus_space_tag_t iot = sc->sc_iot;
1954 bus_space_handle_t ioh = sc->sc_ioh;
1955 int mctl;
1956
1957 GO_WINDOW(3);
1958 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1959 if (sc->ex_mii.mii_media_active & IFM_FDX)
1960 mctl |= MAC_CONTROL_FDX;
1961 else
1962 mctl &= ~MAC_CONTROL_FDX;
1963 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1964 GO_WINDOW(1); /* back to operating window */
1965 }
1966
1967 int
1968 ex_enable(struct ex_softc *sc)
1969 {
1970 if (sc->enabled == 0 && sc->enable != NULL) {
1971 if ((*sc->enable)(sc) != 0) {
1972 aprint_error_dev(sc->sc_dev, "device enable failed\n");
1973 return (EIO);
1974 }
1975 sc->enabled = 1;
1976 }
1977 return (0);
1978 }
1979
1980 void
1981 ex_disable(struct ex_softc *sc)
1982 {
1983 if (sc->enabled == 1 && sc->disable != NULL) {
1984 (*sc->disable)(sc);
1985 sc->enabled = 0;
1986 }
1987 }
1988
1989