smc83c170.c revision 1.13 1 /* $NetBSD: smc83c170.c,v 1.13 1999/02/18 02:12:09 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/mii/miivar.h>
82
83 #include <dev/ic/smc83c170reg.h>
84 #include <dev/ic/smc83c170var.h>
85
86 void epic_start __P((struct ifnet *));
87 void epic_watchdog __P((struct ifnet *));
88 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
89
90 void epic_shutdown __P((void *));
91
92 void epic_reset __P((struct epic_softc *));
93 void epic_init __P((struct epic_softc *));
94 void epic_stop __P((struct epic_softc *));
95 int epic_add_rxbuf __P((struct epic_softc *, int));
96 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
97 void epic_set_mchash __P((struct epic_softc *));
98 void epic_fixup_clock_source __P((struct epic_softc *));
99 int epic_mii_read __P((struct device *, int, int));
100 void epic_mii_write __P((struct device *, int, int, int));
101 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
102 void epic_tick __P((void *));
103
104 void epic_statchg __P((struct device *));
105 int epic_mediachange __P((struct ifnet *));
106 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
107
108 /* XXX Should be somewhere else. */
109 #define ETHER_MIN_LEN 60
110
111 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
112 INTSTAT_TXC | INTSTAT_RQE | INTSTAT_RCC)
113
114 /*
115 * Attach an EPIC interface to the system.
116 */
117 void
118 epic_attach(sc)
119 struct epic_softc *sc;
120 {
121 bus_space_tag_t st = sc->sc_st;
122 bus_space_handle_t sh = sc->sc_sh;
123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
124 int i, rseg, error, attach_stage;
125 bus_dma_segment_t seg;
126 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
127 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
128
129 attach_stage = 0;
130
131 /*
132 * Allocate the control data structures, and create and load the
133 * DMA map for it.
134 */
135 if ((error = bus_dmamem_alloc(sc->sc_dmat,
136 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
137 BUS_DMA_NOWAIT)) != 0) {
138 printf("%s: unable to allocate control data, error = %d\n",
139 sc->sc_dev.dv_xname, error);
140 goto fail;
141 }
142
143 attach_stage = 1;
144
145 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
146 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
147 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
148 printf("%s: unable to map control data, error = %d\n",
149 sc->sc_dev.dv_xname, error);
150 goto fail;
151 }
152
153 attach_stage = 2;
154
155 if ((error = bus_dmamap_create(sc->sc_dmat,
156 sizeof(struct epic_control_data), 1,
157 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
158 &sc->sc_cddmamap)) != 0) {
159 printf("%s: unable to create control data DMA map, "
160 "error = %d\n", sc->sc_dev.dv_xname, error);
161 goto fail;
162 }
163
164 attach_stage = 3;
165
166 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
167 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
168 BUS_DMA_NOWAIT)) != 0) {
169 printf("%s: unable to load control data DMA map, error = %d\n",
170 sc->sc_dev.dv_xname, error);
171 goto fail;
172 }
173
174 attach_stage = 4;
175
176 /*
177 * Create the transmit buffer DMA maps.
178 */
179 for (i = 0; i < EPIC_NTXDESC; i++) {
180 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
181 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
182 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
183 printf("%s: unable to create tx DMA map %d, "
184 "error = %d\n", sc->sc_dev.dv_xname, i, error);
185 goto fail;
186 }
187 }
188
189 attach_stage = 5;
190
191 /*
192 * Create the recieve buffer DMA maps.
193 */
194 for (i = 0; i < EPIC_NRXDESC; i++) {
195 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
196 MCLBYTES, 0, BUS_DMA_NOWAIT,
197 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
198 printf("%s: unable to create rx DMA map %d, "
199 "error = %d\n", sc->sc_dev.dv_xname, i, error);
200 goto fail;
201 }
202 }
203
204 attach_stage = 6;
205
206 /*
207 * Pre-allocate the receive buffers.
208 */
209 for (i = 0; i < EPIC_NRXDESC; i++) {
210 if ((error = epic_add_rxbuf(sc, i)) != 0) {
211 printf("%s: unable to allocate or map rx buffer %d\n,"
212 " error = %d\n", sc->sc_dev.dv_xname, i, error);
213 goto fail;
214 }
215 }
216
217 attach_stage = 7;
218
219 /*
220 * Bring the chip out of low-power mode and reset it to a known state.
221 */
222 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
223 epic_reset(sc);
224
225 /*
226 * Read the Ethernet address from the EEPROM.
227 */
228 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
229 bcopy(myea, enaddr, sizeof(myea));
230
231 /*
232 * ...and the device name.
233 */
234 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
235 mydevname);
236 bcopy(mydevname, devname, sizeof(mydevname));
237 devname[sizeof(mydevname)] = '\0';
238 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
239 if (devname[i] == ' ')
240 devname[i] = '\0';
241 else
242 break;
243 }
244
245 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
246 devname, ether_sprintf(enaddr));
247
248 /*
249 * Initialize our media structures and probe the MII.
250 */
251 sc->sc_mii.mii_ifp = ifp;
252 sc->sc_mii.mii_readreg = epic_mii_read;
253 sc->sc_mii.mii_writereg = epic_mii_write;
254 sc->sc_mii.mii_statchg = epic_statchg;
255 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
256 epic_mediastatus);
257 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
258 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
259 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
260 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
261 } else
262 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
263
264 ifp = &sc->sc_ethercom.ec_if;
265 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
266 ifp->if_softc = sc;
267 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
268 ifp->if_ioctl = epic_ioctl;
269 ifp->if_start = epic_start;
270 ifp->if_watchdog = epic_watchdog;
271
272 /*
273 * Attach the interface.
274 */
275 if_attach(ifp);
276 ether_ifattach(ifp, enaddr);
277 #if NBPFILTER > 0
278 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
279 sizeof(struct ether_header));
280 #endif
281
282 /*
283 * Make sure the interface is shutdown during reboot.
284 */
285 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
286 if (sc->sc_sdhook == NULL)
287 printf("%s: WARNING: unable to establish shutdown hook\n",
288 sc->sc_dev.dv_xname);
289 return;
290
291 fail:
292 /*
293 * Free any resources we've allocated during the failed attach
294 * attempt. Do this in reverse order and fall through.
295 */
296 switch (attach_stage) {
297 case 7:
298 for (i = 0; i < EPIC_NRXDESC; i++) {
299 if (EPIC_DSRX(sc, i)->ds_mbuf != NULL) {
300 bus_dmamap_unload(sc->sc_dmat,
301 EPIC_DSRX(sc, i)->ds_dmamap);
302 m_freem(EPIC_DSRX(sc, i)->ds_mbuf);
303 }
304 }
305 /* FALLTHROUGH */
306
307 case 6:
308 for (i = 0; i < EPIC_NRXDESC; i++)
309 bus_dmamap_destroy(sc->sc_dmat,
310 EPIC_DSRX(sc, i)->ds_dmamap);
311 /* FALLTHROUGH */
312
313 case 5:
314 for (i = 0; i < EPIC_NTXDESC; i++)
315 bus_dmamap_destroy(sc->sc_dmat,
316 EPIC_DSTX(sc, i)->ds_dmamap);
317 /* FALLTHROUGH */
318
319 case 4:
320 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
321 /* FALLTHROUGH */
322
323 case 3:
324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
325 /* FALLTHROUGH */
326
327 case 2:
328 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
329 sizeof(struct epic_control_data));
330 /* FALLTHROUGH */
331
332 case 1:
333 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
334 break;
335 }
336 }
337
338 /*
339 * Shutdown hook. Make sure the interface is stopped at reboot.
340 */
341 void
342 epic_shutdown(arg)
343 void *arg;
344 {
345 struct epic_softc *sc = arg;
346
347 epic_stop(sc);
348 }
349
350 /*
351 * Start packet transmission on the interface.
352 * [ifnet interface function]
353 */
354 void
355 epic_start(ifp)
356 struct ifnet *ifp;
357 {
358 struct epic_softc *sc = ifp->if_softc;
359 struct mbuf *m0, *m;
360 struct epic_txdesc *txd;
361 struct epic_descsoft *ds;
362 struct epic_fraglist *fr;
363 bus_dmamap_t dmamap;
364 int error, firsttx, nexttx, opending, seg;
365
366 /*
367 * Remember the previous txpending and the first transmit
368 * descriptor we use.
369 */
370 opending = sc->sc_txpending;
371 firsttx = EPIC_NEXTTX(sc->sc_txlast);
372
373 /*
374 * Loop through the send queue, setting up transmit descriptors
375 * until we drain the queue, or use up all available transmit
376 * descriptors.
377 */
378 while (sc->sc_txpending < EPIC_NTXDESC) {
379 /*
380 * Grab a packet off the queue.
381 */
382 IF_DEQUEUE(&ifp->if_snd, m0);
383 if (m0 == NULL)
384 break;
385
386 /*
387 * Get the last and next available transmit descriptor.
388 */
389 nexttx = EPIC_NEXTTX(sc->sc_txlast);
390 txd = EPIC_CDTX(sc, nexttx);
391 fr = EPIC_CDFL(sc, nexttx);
392 ds = EPIC_DSTX(sc, nexttx);
393 dmamap = ds->ds_dmamap;
394
395 /*
396 * Load the DMA map. If this fails, the packet either
397 * didn't fit in the alloted number of frags, or we were
398 * short on resources. In this case, we'll copy and try
399 * again.
400 */
401 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
402 BUS_DMA_NOWAIT) != 0) {
403 MGETHDR(m, M_DONTWAIT, MT_DATA);
404 if (m == NULL) {
405 printf("%s: unable to allocate Tx mbuf\n",
406 sc->sc_dev.dv_xname);
407 IF_PREPEND(&ifp->if_snd, m0);
408 break;
409 }
410 if (m0->m_pkthdr.len > MHLEN) {
411 MCLGET(m, M_DONTWAIT);
412 if ((m->m_flags & M_EXT) == 0) {
413 printf("%s: unable to allocate Tx "
414 "cluster\n", sc->sc_dev.dv_xname);
415 m_freem(m);
416 IF_PREPEND(&ifp->if_snd, m0);
417 break;
418 }
419 }
420 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
421 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
422 m_freem(m0);
423 m0 = m;
424 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
425 m0, BUS_DMA_NOWAIT);
426 if (error) {
427 printf("%s: unable to load Tx buffer, "
428 "error = %d\n", sc->sc_dev.dv_xname, error);
429 IF_PREPEND(&ifp->if_snd, m0);
430 break;
431 }
432 }
433
434 /* Initialize the fraglist. */
435 fr->ef_nfrags = dmamap->dm_nsegs;
436 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
437 fr->ef_frags[seg].ef_addr =
438 dmamap->dm_segs[seg].ds_addr;
439 fr->ef_frags[seg].ef_length =
440 dmamap->dm_segs[seg].ds_len;
441 }
442
443 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
444
445 /* Sync the DMA map. */
446 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
447 BUS_DMASYNC_PREWRITE);
448
449 /*
450 * Store a pointer to the packet so we can free it later.
451 */
452 ds->ds_mbuf = m0;
453
454 /*
455 * Fill in the transmit descriptor. The EPIC doesn't
456 * auto-pad, so we have to do this ourselves.
457 */
458 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
459 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
460
461 /*
462 * If this is the first descriptor we're enqueueing,
463 * don't give it to the EPIC yet. That could cause
464 * a race condition. We'll do it below.
465 */
466 if (nexttx == firsttx)
467 txd->et_txstatus = 0;
468 else
469 txd->et_txstatus = ET_TXSTAT_OWNER;
470
471 EPIC_CDTXSYNC(sc, nexttx,
472 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
473
474 /* Advance the tx pointer. */
475 sc->sc_txpending++;
476 sc->sc_txlast = nexttx;
477
478 #if NBPFILTER > 0
479 /*
480 * Pass the packet to any BPF listeners.
481 */
482 if (ifp->if_bpf)
483 bpf_mtap(ifp->if_bpf, m0);
484 #endif
485 }
486
487 if (sc->sc_txpending == EPIC_NTXDESC) {
488 /* No more slots left; notify upper layer. */
489 ifp->if_flags |= IFF_OACTIVE;
490 }
491
492 if (sc->sc_txpending != opending) {
493 /*
494 * We enqueued packets. If the transmitter was idle,
495 * reset the txdirty pointer.
496 */
497 if (opending == 0)
498 sc->sc_txdirty = firsttx;
499
500 /*
501 * Cause a transmit interrupt to happen on the
502 * last packet we enqueued.
503 */
504 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
505 EPIC_CDTXSYNC(sc, sc->sc_txlast,
506 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
507
508 /*
509 * The entire packet chain is set up. Give the
510 * first descriptor to the EPIC now.
511 */
512 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
513 EPIC_CDTXSYNC(sc, firsttx,
514 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
515
516 /* Start the transmitter. */
517 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
518 COMMAND_TXQUEUED);
519
520 /* Set a watchdog timer in case the chip flakes out. */
521 ifp->if_timer = 5;
522 }
523 }
524
525 /*
526 * Watchdog timer handler.
527 * [ifnet interface function]
528 */
529 void
530 epic_watchdog(ifp)
531 struct ifnet *ifp;
532 {
533 struct epic_softc *sc = ifp->if_softc;
534
535 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
536 ifp->if_oerrors++;
537
538 epic_init(sc);
539 }
540
541 /*
542 * Handle control requests from the operator.
543 * [ifnet interface function]
544 */
545 int
546 epic_ioctl(ifp, cmd, data)
547 struct ifnet *ifp;
548 u_long cmd;
549 caddr_t data;
550 {
551 struct epic_softc *sc = ifp->if_softc;
552 struct ifreq *ifr = (struct ifreq *)data;
553 struct ifaddr *ifa = (struct ifaddr *)data;
554 int s, error = 0;
555
556 s = splnet();
557
558 switch (cmd) {
559 case SIOCSIFADDR:
560 ifp->if_flags |= IFF_UP;
561
562 switch (ifa->ifa_addr->sa_family) {
563 #ifdef INET
564 case AF_INET:
565 epic_init(sc);
566 arp_ifinit(ifp, ifa);
567 break;
568 #endif /* INET */
569 #ifdef NS
570 case AF_NS:
571 {
572 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
573
574 if (ns_nullhost(*ina))
575 ina->x_host = *(union ns_host *)
576 LLADDR(ifp->if_sadl);
577 else
578 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
579 ifp->if_addrlen);
580 /* Set new address. */
581 epic_init(sc);
582 break;
583 }
584 #endif /* NS */
585 default:
586 epic_init(sc);
587 break;
588 }
589 break;
590
591 case SIOCSIFMTU:
592 if (ifr->ifr_mtu > ETHERMTU)
593 error = EINVAL;
594 else
595 ifp->if_mtu = ifr->ifr_mtu;
596 break;
597
598 case SIOCSIFFLAGS:
599 if ((ifp->if_flags & IFF_UP) == 0 &&
600 (ifp->if_flags & IFF_RUNNING) != 0) {
601 /*
602 * If interface is marked down and it is running, then
603 * stop it.
604 */
605 epic_stop(sc);
606 } else if ((ifp->if_flags & IFF_UP) != 0 &&
607 (ifp->if_flags & IFF_RUNNING) == 0) {
608 /*
609 * If interfase it marked up and it is stopped, then
610 * start it.
611 */
612 epic_init(sc);
613 } else if ((ifp->if_flags & IFF_UP) != 0) {
614 /*
615 * Reset the interface to pick up changes in any other
616 * flags that affect the hardware state.
617 */
618 epic_init(sc);
619 }
620 break;
621
622 case SIOCADDMULTI:
623 case SIOCDELMULTI:
624 error = (cmd == SIOCADDMULTI) ?
625 ether_addmulti(ifr, &sc->sc_ethercom) :
626 ether_delmulti(ifr, &sc->sc_ethercom);
627
628 if (error == ENETRESET) {
629 /*
630 * Multicast list has changed; set the hardware filter
631 * accordingly. Update our idea of the current media;
632 * epic_set_mchash() needs to know what it is.
633 */
634 mii_pollstat(&sc->sc_mii);
635 epic_set_mchash(sc);
636 error = 0;
637 }
638 break;
639
640 case SIOCSIFMEDIA:
641 case SIOCGIFMEDIA:
642 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
643 break;
644
645 default:
646 error = EINVAL;
647 break;
648 }
649
650 splx(s);
651 return (error);
652 }
653
654 /*
655 * Interrupt handler.
656 */
657 int
658 epic_intr(arg)
659 void *arg;
660 {
661 struct epic_softc *sc = arg;
662 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
663 struct ether_header *eh;
664 struct epic_rxdesc *rxd;
665 struct epic_txdesc *txd;
666 struct epic_descsoft *ds;
667 struct mbuf *m;
668 u_int32_t intstat;
669 int i, len, claimed = 0;
670
671 top:
672 /*
673 * Get the interrupt status from the EPIC.
674 */
675 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
676 if ((intstat & INTSTAT_INT_ACTV) == 0)
677 return (claimed);
678
679 claimed = 1;
680
681 /*
682 * Acknowledge the interrupt.
683 */
684 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
685 intstat & INTMASK);
686
687 /*
688 * Check for receive interrupts.
689 */
690 if (intstat & (INTSTAT_RCC | INTSTAT_RQE)) {
691 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
692 rxd = EPIC_CDRX(sc, i);
693 ds = EPIC_DSRX(sc, i);
694
695 EPIC_CDRXSYNC(sc, i,
696 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
697
698 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
699 /*
700 * We have processed all of the
701 * receive buffers.
702 */
703 break;
704 }
705
706 /*
707 * Make sure the packet arrived intact. If an error
708 * occurred, update stats and reset the descriptor.
709 * The buffer will be reused the next time the
710 * descriptor comes up in the ring.
711 */
712 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
713 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
714 printf("%s: CRC error\n",
715 sc->sc_dev.dv_xname);
716 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
717 printf("%s: alignment error\n",
718 sc->sc_dev.dv_xname);
719 ifp->if_ierrors++;
720 EPIC_INIT_RXDESC(sc, i);
721 continue;
722 }
723
724 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
725 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
726
727 /*
728 * Add a new buffer to the receive chain. If this
729 * fails, the old buffer is recycled.
730 */
731 m = ds->ds_mbuf;
732 if (epic_add_rxbuf(sc, i) != 0) {
733 ifp->if_ierrors++;
734 EPIC_INIT_RXDESC(sc, i);
735 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
736 ds->ds_dmamap->dm_mapsize,
737 BUS_DMASYNC_PREREAD);
738 continue;
739 }
740
741 len = rxd->er_buflength;
742 if (len < sizeof(struct ether_header)) {
743 m_freem(m);
744 continue;
745 }
746
747 m->m_pkthdr.rcvif = ifp;
748 m->m_pkthdr.len = m->m_len = len;
749 eh = mtod(m, struct ether_header *);
750
751 #if NBPFILTER > 0
752 /*
753 * Pass this up to any BPF listeners, but only
754 * pass it up the stack if its for us.
755 */
756 if (ifp->if_bpf) {
757 bpf_mtap(ifp->if_bpf, m);
758 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
759 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
760 ETHER_ADDR_LEN) != 0 &&
761 (rxd->er_rxstatus &
762 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST)) == 0) {
763 m_freem(m);
764 continue;
765 }
766 }
767 #endif /* NPBFILTER > 0 */
768
769 /* Remove the Ethernet header and pass it on. */
770 m_adj(m, sizeof(struct ether_header));
771 ether_input(ifp, eh, m);
772 }
773
774 /* Update the recieve pointer. */
775 sc->sc_rxptr = i;
776
777 /*
778 * Check for receive queue underflow.
779 */
780 if (intstat & INTSTAT_RQE) {
781 printf("%s: receiver queue empty\n",
782 sc->sc_dev.dv_xname);
783 /*
784 * Ring is already built; just restart the
785 * receiver.
786 */
787 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
788 EPIC_CDRXADDR(sc, sc->sc_rxptr));
789 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
790 COMMAND_RXQUEUED | COMMAND_START_RX);
791 }
792 }
793
794 /*
795 * Check for transmission complete interrupts.
796 */
797 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
798 ifp->if_flags &= ~IFF_OACTIVE;
799 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
800 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
801 txd = EPIC_CDTX(sc, i);
802 ds = EPIC_DSTX(sc, i);
803
804 EPIC_CDTXSYNC(sc, i,
805 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
806
807 if (txd->et_txstatus & ET_TXSTAT_OWNER)
808 break;
809
810 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
811
812 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
813 0, ds->ds_dmamap->dm_mapsize,
814 BUS_DMASYNC_POSTWRITE);
815 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
816 m_freem(ds->ds_mbuf);
817 ds->ds_mbuf = NULL;
818
819 /*
820 * Check for errors and collisions.
821 */
822 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
823 ifp->if_oerrors++;
824 else
825 ifp->if_opackets++;
826 ifp->if_collisions +=
827 TXSTAT_COLLISIONS(txd->et_txstatus);
828 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
829 printf("%s: lost carrier\n",
830 sc->sc_dev.dv_xname);
831 }
832
833 /* Update the dirty transmit buffer pointer. */
834 sc->sc_txdirty = i;
835
836 /*
837 * Cancel the watchdog timer if there are no pending
838 * transmissions.
839 */
840 if (sc->sc_txpending == 0)
841 ifp->if_timer = 0;
842
843 /*
844 * Kick the transmitter after a DMA underrun.
845 */
846 if (intstat & INTSTAT_TXU) {
847 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
848 bus_space_write_4(sc->sc_st, sc->sc_sh,
849 EPIC_COMMAND, COMMAND_TXUGO);
850 if (sc->sc_txpending)
851 bus_space_write_4(sc->sc_st, sc->sc_sh,
852 EPIC_COMMAND, COMMAND_TXQUEUED);
853 }
854
855 /*
856 * Try to get more packets going.
857 */
858 epic_start(ifp);
859 }
860
861 /*
862 * Check for fatal interrupts.
863 */
864 if (intstat & INTSTAT_FATAL_INT) {
865 printf("%s: fatal error, resetting\n", sc->sc_dev.dv_xname);
866 epic_init(sc);
867 }
868
869 /*
870 * Check for more interrupts.
871 */
872 goto top;
873 }
874
875 /*
876 * One second timer, used to tick the MII.
877 */
878 void
879 epic_tick(arg)
880 void *arg;
881 {
882 struct epic_softc *sc = arg;
883 int s;
884
885 s = splnet();
886 mii_tick(&sc->sc_mii);
887 splx(s);
888
889 timeout(epic_tick, sc, hz);
890 }
891
892 /*
893 * Fixup the clock source on the EPIC.
894 */
895 void
896 epic_fixup_clock_source(sc)
897 struct epic_softc *sc;
898 {
899 int i;
900
901 /*
902 * According to SMC Application Note 7-15, the EPIC's clock
903 * source is incorrect following a reset. This manifests itself
904 * as failure to recognize when host software has written to
905 * a register on the EPIC. The appnote recommends issuing at
906 * least 16 consecutive writes to the CLOCK TEST bit to correctly
907 * configure the clock source.
908 */
909 for (i = 0; i < 16; i++)
910 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
911 TEST_CLOCKTEST);
912 }
913
914 /*
915 * Perform a soft reset on the EPIC.
916 */
917 void
918 epic_reset(sc)
919 struct epic_softc *sc;
920 {
921
922 epic_fixup_clock_source(sc);
923
924 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
925 delay(100);
926 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
927 delay(100);
928
929 epic_fixup_clock_source(sc);
930 }
931
932 /*
933 * Initialize the interface. Must be called at splnet().
934 */
935 void
936 epic_init(sc)
937 struct epic_softc *sc;
938 {
939 bus_space_tag_t st = sc->sc_st;
940 bus_space_handle_t sh = sc->sc_sh;
941 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
942 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
943 struct epic_txdesc *txd;
944 u_int32_t genctl, reg0;
945 int i;
946
947 /*
948 * Cancel any pending I/O.
949 */
950 epic_stop(sc);
951
952 /*
953 * Reset the EPIC to a known state.
954 */
955 epic_reset(sc);
956
957 /*
958 * Magical mystery initialization.
959 */
960 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
961
962 /*
963 * Initialize the EPIC genctl register:
964 *
965 * - 64 byte receive FIFO threshold
966 * - automatic advance to next receive frame
967 */
968 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
969 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
970
971 /*
972 * Reset the MII bus and PHY.
973 */
974 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
975 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
976 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
977 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
978 delay(100);
979 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
980 delay(100);
981 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
982
983 /*
984 * Initialize Ethernet address.
985 */
986 reg0 = enaddr[1] << 8 | enaddr[0];
987 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
988 reg0 = enaddr[3] << 8 | enaddr[2];
989 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
990 reg0 = enaddr[5] << 8 | enaddr[4];
991 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
992
993 /*
994 * Initialize receive control. Remember the external buffer
995 * size setting.
996 */
997 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
998 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
999 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1000 if (ifp->if_flags & IFF_PROMISC)
1001 reg0 |= RXCON_PROMISCMODE;
1002 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1003
1004 /* Set the current media. */
1005 mii_mediachg(&sc->sc_mii);
1006
1007 /* Set up the multicast hash table. */
1008 epic_set_mchash(sc);
1009
1010 /*
1011 * Initialize the transmit descriptor ring. txlast is initialized
1012 * to the end of the list so that it will wrap around to the first
1013 * descriptor when the first packet is transmitted.
1014 */
1015 for (i = 0; i < EPIC_NTXDESC; i++) {
1016 txd = EPIC_CDTX(sc, i);
1017 memset(txd, 0, sizeof(struct epic_txdesc));
1018 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1019 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1020 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1021 }
1022 sc->sc_txpending = 0;
1023 sc->sc_txdirty = 0;
1024 sc->sc_txlast = EPIC_NTXDESC - 1;
1025
1026 /*
1027 * Initialize the receive descriptor ring. The buffers are
1028 * already allocated.
1029 */
1030 for (i = 0; i < EPIC_NRXDESC; i++)
1031 EPIC_INIT_RXDESC(sc, i);
1032 sc->sc_rxptr = 0;
1033
1034 /*
1035 * Initialize the interrupt mask and enable interrupts.
1036 */
1037 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1038 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1039
1040 /*
1041 * Give the transmit and receive rings to the EPIC.
1042 */
1043 bus_space_write_4(st, sh, EPIC_PTCDAR,
1044 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1045 bus_space_write_4(st, sh, EPIC_PRCDAR,
1046 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1047
1048 /*
1049 * Set the EPIC in motion.
1050 */
1051 bus_space_write_4(st, sh, EPIC_COMMAND,
1052 COMMAND_RXQUEUED | COMMAND_START_RX);
1053
1054 /*
1055 * ...all done!
1056 */
1057 ifp->if_flags |= IFF_RUNNING;
1058 ifp->if_flags &= ~IFF_OACTIVE;
1059
1060 /*
1061 * Start the one second clock.
1062 */
1063 timeout(epic_tick, sc, hz);
1064
1065 /*
1066 * Attempt to start output on the interface.
1067 */
1068 epic_start(ifp);
1069 }
1070
1071 /*
1072 * Stop transmission on the interface.
1073 */
1074 void
1075 epic_stop(sc)
1076 struct epic_softc *sc;
1077 {
1078 bus_space_tag_t st = sc->sc_st;
1079 bus_space_handle_t sh = sc->sc_sh;
1080 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1081 struct epic_descsoft *ds;
1082 u_int32_t reg;
1083 int i;
1084
1085 /*
1086 * Stop the one second clock.
1087 */
1088 untimeout(epic_tick, sc);
1089
1090 /* Paranoia... */
1091 epic_fixup_clock_source(sc);
1092
1093 /*
1094 * Disable interrupts.
1095 */
1096 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1097 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1098 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1099
1100 /*
1101 * Stop the DMA engine and take the receiver off-line.
1102 */
1103 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1104 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1105
1106 /*
1107 * Release any queued transmit buffers.
1108 */
1109 for (i = 0; i < EPIC_NTXDESC; i++) {
1110 ds = EPIC_DSTX(sc, i);
1111 if (ds->ds_mbuf != NULL) {
1112 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1113 m_freem(ds->ds_mbuf);
1114 ds->ds_mbuf = NULL;
1115 }
1116 }
1117
1118 /*
1119 * Mark the interface down and cancel the watchdog timer.
1120 */
1121 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1122 ifp->if_timer = 0;
1123 }
1124
1125 /*
1126 * Read the EPIC Serial EEPROM.
1127 */
1128 void
1129 epic_read_eeprom(sc, word, wordcnt, data)
1130 struct epic_softc *sc;
1131 int word, wordcnt;
1132 u_int16_t *data;
1133 {
1134 bus_space_tag_t st = sc->sc_st;
1135 bus_space_handle_t sh = sc->sc_sh;
1136 u_int16_t reg;
1137 int i, x;
1138
1139 #define EEPROM_WAIT_READY(st, sh) \
1140 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1141 /* nothing */
1142
1143 /*
1144 * Enable the EEPROM.
1145 */
1146 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1147 EEPROM_WAIT_READY(st, sh);
1148
1149 for (i = 0; i < wordcnt; i++) {
1150 /* Send CHIP SELECT for one clock tick. */
1151 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1152 EEPROM_WAIT_READY(st, sh);
1153
1154 /* Shift in the READ opcode. */
1155 for (x = 3; x > 0; x--) {
1156 reg = EECTL_ENABLE|EECTL_EECS;
1157 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1158 reg |= EECTL_EEDI;
1159 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1160 EEPROM_WAIT_READY(st, sh);
1161 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1162 EEPROM_WAIT_READY(st, sh);
1163 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1164 EEPROM_WAIT_READY(st, sh);
1165 }
1166
1167 /* Shift in address. */
1168 for (x = 6; x > 0; x--) {
1169 reg = EECTL_ENABLE|EECTL_EECS;
1170 if ((word + i) & (1 << (x - 1)))
1171 reg |= EECTL_EEDI;
1172 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1173 EEPROM_WAIT_READY(st, sh);
1174 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1175 EEPROM_WAIT_READY(st, sh);
1176 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1177 EEPROM_WAIT_READY(st, sh);
1178 }
1179
1180 /* Shift out data. */
1181 reg = EECTL_ENABLE|EECTL_EECS;
1182 data[i] = 0;
1183 for (x = 16; x > 0; x--) {
1184 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1185 EEPROM_WAIT_READY(st, sh);
1186 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1187 data[i] |= (1 << (x - 1));
1188 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1189 EEPROM_WAIT_READY(st, sh);
1190 }
1191
1192 /* Clear CHIP SELECT. */
1193 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1194 EEPROM_WAIT_READY(st, sh);
1195 }
1196
1197 /*
1198 * Disable the EEPROM.
1199 */
1200 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1201
1202 #undef EEPROM_WAIT_READY
1203 }
1204
1205 /*
1206 * Add a receive buffer to the indicated descriptor.
1207 */
1208 int
1209 epic_add_rxbuf(sc, idx)
1210 struct epic_softc *sc;
1211 int idx;
1212 {
1213 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1214 struct mbuf *m;
1215 int error;
1216
1217 MGETHDR(m, M_DONTWAIT, MT_DATA);
1218 if (m == NULL)
1219 return (ENOBUFS);
1220
1221 MCLGET(m, M_DONTWAIT);
1222 if ((m->m_flags & M_EXT) == 0) {
1223 m_freem(m);
1224 return (ENOBUFS);
1225 }
1226
1227 if (ds->ds_mbuf != NULL)
1228 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1229
1230 ds->ds_mbuf = m;
1231
1232 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1233 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1234 if (error) {
1235 printf("%s: can't load rx DMA map %d, error = %d\n",
1236 sc->sc_dev.dv_xname, idx, error);
1237 panic("epic_add_rxbuf"); /* XXX */
1238 }
1239
1240 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1241 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1242
1243 EPIC_INIT_RXDESC(sc, idx);
1244
1245 return (0);
1246 }
1247
1248 /*
1249 * Set the EPIC multicast hash table.
1250 *
1251 * NOTE: We rely on a recently-updated mii_media_active here!
1252 */
1253 void
1254 epic_set_mchash(sc)
1255 struct epic_softc *sc;
1256 {
1257 struct ethercom *ec = &sc->sc_ethercom;
1258 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1259 struct ether_multi *enm;
1260 struct ether_multistep step;
1261 u_int8_t *cp;
1262 u_int32_t crc, mchash[4];
1263 int len;
1264 static const u_int32_t crctab[] = {
1265 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1266 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1267 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1268 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1269 };
1270
1271 /*
1272 * Set up the multicast address filter by passing all multicast
1273 * addresses through a CRC generator, and then using the high-order
1274 * 6 bits as an index into the 64 bit multicast hash table (only
1275 * the lower 16 bits of each 32 bit multicast hash register are
1276 * valid). The high order bit selects the register, while the
1277 * rest of the bits select the bit within the register.
1278 */
1279
1280 if (ifp->if_flags & IFF_PROMISC)
1281 goto allmulti;
1282
1283 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1284 /* XXX hardware bug in 10Mbps mode. */
1285 goto allmulti;
1286 }
1287
1288 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1289
1290 ETHER_FIRST_MULTI(step, ec, enm);
1291 while (enm != NULL) {
1292 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1293 /*
1294 * We must listen to a range of multicast addresses.
1295 * For now, just accept all multicasts, rather than
1296 * trying to set only those filter bits needed to match
1297 * the range. (At this time, the only use of address
1298 * ranges is for IP multicast routing, for which the
1299 * range is big enough to require all bits set.)
1300 */
1301 goto allmulti;
1302 }
1303
1304 cp = enm->enm_addrlo;
1305 crc = 0xffffffff;
1306 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1307 crc ^= *cp++;
1308 crc = (crc >> 4) ^ crctab[crc & 0xf];
1309 crc = (crc >> 4) ^ crctab[crc & 0xf];
1310 }
1311 /* Just want the 6 most significant bits. */
1312 crc >>= 26;
1313
1314 /* Set the corresponding bit in the hash table. */
1315 mchash[crc >> 4] |= 1 << (crc & 0xf);
1316
1317 ETHER_NEXT_MULTI(step, enm);
1318 }
1319
1320 ifp->if_flags &= ~IFF_ALLMULTI;
1321 goto sethash;
1322
1323 allmulti:
1324 ifp->if_flags |= IFF_ALLMULTI;
1325 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1326
1327 sethash:
1328 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1329 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1330 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1331 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1332 }
1333
1334 /*
1335 * Wait for the MII to become ready.
1336 */
1337 int
1338 epic_mii_wait(sc, rw)
1339 struct epic_softc *sc;
1340 u_int32_t rw;
1341 {
1342 int i;
1343
1344 for (i = 0; i < 50; i++) {
1345 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1346 == 0)
1347 break;
1348 delay(2);
1349 }
1350 if (i == 50) {
1351 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1352 return (1);
1353 }
1354
1355 return (0);
1356 }
1357
1358 /*
1359 * Read from the MII.
1360 */
1361 int
1362 epic_mii_read(self, phy, reg)
1363 struct device *self;
1364 int phy, reg;
1365 {
1366 struct epic_softc *sc = (struct epic_softc *)self;
1367
1368 if (epic_mii_wait(sc, MMCTL_WRITE))
1369 return (0);
1370
1371 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1372 MMCTL_ARG(phy, reg, MMCTL_READ));
1373
1374 if (epic_mii_wait(sc, MMCTL_READ))
1375 return (0);
1376
1377 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1378 MMDATA_MASK);
1379 }
1380
1381 /*
1382 * Write to the MII.
1383 */
1384 void
1385 epic_mii_write(self, phy, reg, val)
1386 struct device *self;
1387 int phy, reg, val;
1388 {
1389 struct epic_softc *sc = (struct epic_softc *)self;
1390
1391 if (epic_mii_wait(sc, MMCTL_WRITE))
1392 return;
1393
1394 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1395 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1396 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1397 }
1398
1399 /*
1400 * Callback from PHY when media changes.
1401 */
1402 void
1403 epic_statchg(self)
1404 struct device *self;
1405 {
1406 struct epic_softc *sc = (struct epic_softc *)self;
1407 u_int32_t txcon;
1408
1409 /*
1410 * Update loopback bits in TXCON to reflect duplex mode.
1411 */
1412 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1413 if (sc->sc_mii.mii_media_active & IFM_FDX)
1414 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1415 else
1416 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1417 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1418
1419 /*
1420 * There is a multicast filter bug in 10Mbps mode. Kick the
1421 * multicast filter in case the speed changed.
1422 */
1423 epic_set_mchash(sc);
1424
1425 /* XXX Update ifp->if_baudrate */
1426 }
1427
1428 /*
1429 * Callback from ifmedia to request current media status.
1430 */
1431 void
1432 epic_mediastatus(ifp, ifmr)
1433 struct ifnet *ifp;
1434 struct ifmediareq *ifmr;
1435 {
1436 struct epic_softc *sc = ifp->if_softc;
1437
1438 mii_pollstat(&sc->sc_mii);
1439 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1440 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1441 }
1442
1443 /*
1444 * Callback from ifmedia to request new media setting.
1445 */
1446 int
1447 epic_mediachange(ifp)
1448 struct ifnet *ifp;
1449 {
1450 struct epic_softc *sc = ifp->if_softc;
1451
1452 if (ifp->if_flags & IFF_UP)
1453 mii_mediachg(&sc->sc_mii);
1454 return (0);
1455 }
1456