smc83c170.c revision 1.9 1 /* $NetBSD: smc83c170.c,v 1.9 1998/10/05 19:10:22 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/mii/miivar.h>
82
83 #include <dev/ic/smc83c170reg.h>
84 #include <dev/ic/smc83c170var.h>
85
86 void epic_start __P((struct ifnet *));
87 void epic_watchdog __P((struct ifnet *));
88 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
89
90 void epic_shutdown __P((void *));
91
92 void epic_reset __P((struct epic_softc *));
93 void epic_init __P((struct epic_softc *));
94 void epic_stop __P((struct epic_softc *));
95 int epic_add_rxbuf __P((struct epic_softc *, int));
96 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
97 void epic_set_mchash __P((struct epic_softc *));
98 void epic_fixup_clock_source __P((struct epic_softc *));
99 int epic_mii_read __P((struct device *, int, int));
100 void epic_mii_write __P((struct device *, int, int, int));
101 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
102 void epic_tick __P((void *));
103
104 void epic_statchg __P((struct device *));
105 int epic_mediachange __P((struct ifnet *));
106 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
107
108 /*
109 * Fudge the incoming packets by this much, to ensure the data after
110 * the Ethernet header is aligned.
111 */
112 #define RX_ALIGNMENT_FUDGE 2
113
114 /* XXX Should be somewhere else. */
115 #define ETHER_MIN_LEN 60
116
117 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
118 INTSTAT_TXC | INTSTAT_RQE | INTSTAT_RCC)
119
120 /*
121 * Attach an EPIC interface to the system.
122 */
123 void
124 epic_attach(sc)
125 struct epic_softc *sc;
126 {
127 bus_space_tag_t st = sc->sc_st;
128 bus_space_handle_t sh = sc->sc_sh;
129 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
130 int i, rseg, error, attach_stage;
131 bus_dma_segment_t seg;
132 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
133 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
134
135 attach_stage = 0;
136
137 /*
138 * Allocate the control data structures, and create and load the
139 * DMA map for it.
140 */
141 if ((error = bus_dmamem_alloc(sc->sc_dmat,
142 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
143 BUS_DMA_NOWAIT)) != 0) {
144 printf("%s: unable to allocate control data, error = %d\n",
145 sc->sc_dev.dv_xname, error);
146 goto fail;
147 }
148
149 attach_stage = 1;
150
151 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
152 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
153 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
154 printf("%s: unable to map control data, error = %d\n",
155 sc->sc_dev.dv_xname, error);
156 goto fail;
157 }
158
159 attach_stage = 2;
160
161 if ((error = bus_dmamap_create(sc->sc_dmat,
162 sizeof(struct epic_control_data), 1,
163 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
164 &sc->sc_cddmamap)) != 0) {
165 printf("%s: unable to create control data DMA map, "
166 "error = %d\n", sc->sc_dev.dv_xname, error);
167 goto fail;
168 }
169
170 attach_stage = 3;
171
172 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
173 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
174 BUS_DMA_NOWAIT)) != 0) {
175 printf("%s: unable to load control data DMA map, error = %d\n",
176 sc->sc_dev.dv_xname, error);
177 goto fail;
178 }
179
180 attach_stage = 4;
181
182 /*
183 * Create the transmit buffer DMA maps.
184 */
185 for (i = 0; i < EPIC_NTXDESC; i++) {
186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
187 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
188 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
189 printf("%s: unable to create tx DMA map %d, "
190 "error = %d\n", sc->sc_dev.dv_xname, i, error);
191 goto fail;
192 }
193 }
194
195 attach_stage = 5;
196
197 /*
198 * Create the recieve buffer DMA maps.
199 */
200 for (i = 0; i < EPIC_NRXDESC; i++) {
201 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
202 MCLBYTES, 0, BUS_DMA_NOWAIT,
203 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
204 printf("%s: unable to create rx DMA map %d, "
205 "error = %d\n", sc->sc_dev.dv_xname, i, error);
206 goto fail;
207 }
208 }
209
210 attach_stage = 6;
211
212 /*
213 * Pre-allocate the receive buffers.
214 */
215 for (i = 0; i < EPIC_NRXDESC; i++) {
216 if ((error = epic_add_rxbuf(sc, i)) != 0) {
217 printf("%s: unable to allocate or map rx buffer %d\n,"
218 " error = %d\n", sc->sc_dev.dv_xname, i, error);
219 goto fail;
220 }
221 }
222
223 attach_stage = 7;
224
225 /*
226 * Bring the chip out of low-power mode and reset it to a known state.
227 */
228 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
229 epic_reset(sc);
230
231 /*
232 * Read the Ethernet address from the EEPROM.
233 */
234 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
235 bcopy(myea, enaddr, sizeof(myea));
236
237 /*
238 * ...and the device name.
239 */
240 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
241 mydevname);
242 bcopy(mydevname, devname, sizeof(mydevname));
243 devname[sizeof(mydevname)] = '\0';
244 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
245 if (devname[i] == ' ')
246 devname[i] = '\0';
247 else
248 break;
249 }
250
251 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
252 devname, ether_sprintf(enaddr));
253
254 /*
255 * Initialize our media structures and probe the MII.
256 */
257 sc->sc_mii.mii_ifp = ifp;
258 sc->sc_mii.mii_readreg = epic_mii_read;
259 sc->sc_mii.mii_writereg = epic_mii_write;
260 sc->sc_mii.mii_statchg = epic_statchg;
261 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
262 epic_mediastatus);
263 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
264 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
265 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
266 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
267 } else
268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
269
270 ifp = &sc->sc_ethercom.ec_if;
271 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
272 ifp->if_softc = sc;
273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
274 ifp->if_ioctl = epic_ioctl;
275 ifp->if_start = epic_start;
276 ifp->if_watchdog = epic_watchdog;
277
278 /*
279 * Attach the interface.
280 */
281 if_attach(ifp);
282 ether_ifattach(ifp, enaddr);
283 #if NBPFILTER > 0
284 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
285 sizeof(struct ether_header));
286 #endif
287
288 /*
289 * Make sure the interface is shutdown during reboot.
290 */
291 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
292 if (sc->sc_sdhook == NULL)
293 printf("%s: WARNING: unable to establish shutdown hook\n",
294 sc->sc_dev.dv_xname);
295 return;
296
297 fail:
298 /*
299 * Free any resources we've allocated during the failed attach
300 * attempt. Do this in reverse order and fall through.
301 */
302 switch (attach_stage) {
303 case 7:
304 for (i = 0; i < EPIC_NRXDESC; i++) {
305 if (sc->sc_rxsoft[i].ds_mbuf != NULL) {
306 bus_dmamap_unload(sc->sc_dmat,
307 sc->sc_rxsoft[i].ds_dmamap);
308 m_freem(sc->sc_rxsoft[i].ds_mbuf);
309 }
310 }
311 /* FALLTHROUGH */
312
313 case 6:
314 for (i = 0; i < EPIC_NRXDESC; i++)
315 bus_dmamap_destroy(sc->sc_dmat,
316 sc->sc_rxsoft[i].ds_dmamap);
317 /* FALLTHROUGH */
318
319 case 5:
320 for (i = 0; i < EPIC_NTXDESC; i++)
321 bus_dmamap_destroy(sc->sc_dmat,
322 sc->sc_txsoft[i].ds_dmamap);
323 /* FALLTHROUGH */
324
325 case 4:
326 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
327 /* FALLTHROUGH */
328
329 case 3:
330 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
331 /* FALLTHROUGH */
332
333 case 2:
334 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
335 sizeof(struct epic_control_data));
336 /* FALLTHROUGH */
337
338 case 1:
339 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
340 break;
341 }
342 }
343
344 /*
345 * Shutdown hook. Make sure the interface is stopped at reboot.
346 */
347 void
348 epic_shutdown(arg)
349 void *arg;
350 {
351 struct epic_softc *sc = arg;
352
353 epic_stop(sc);
354 }
355
356 /*
357 * Start packet transmission on the interface.
358 * [ifnet interface function]
359 */
360 void
361 epic_start(ifp)
362 struct ifnet *ifp;
363 {
364 struct epic_softc *sc = ifp->if_softc;
365 struct epic_txdesc *txd;
366 struct epic_descsoft *ds;
367 struct epic_fraglist *fr;
368 bus_dmamap_t dmamap;
369 struct mbuf *m0;
370 int nexttx, seg, error, txqueued;
371
372 txqueued = 0;
373
374 /*
375 * Loop through the send queue, setting up transmit descriptors
376 * until we drain the queue, or use up all available transmit
377 * descriptors.
378 */
379 while (ifp->if_snd.ifq_head != NULL &&
380 sc->sc_txpending < EPIC_NTXDESC) {
381 /*
382 * Grab a packet off the queue.
383 */
384 IF_DEQUEUE(&ifp->if_snd, m0);
385
386 /*
387 * Get the last and next available transmit descriptor.
388 */
389 nexttx = EPIC_NEXTTX(sc->sc_txlast);
390 txd = &sc->sc_control_data->ecd_txdescs[nexttx];
391 fr = &sc->sc_control_data->ecd_txfrags[nexttx];
392 ds = &sc->sc_txsoft[nexttx];
393 dmamap = ds->ds_dmamap;
394
395 loadmap:
396 /*
397 * Load the DMA map with the packet.
398 */
399 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
400 BUS_DMA_NOWAIT);
401 switch (error) {
402 case 0:
403 /* Success. */
404 break;
405
406 case EFBIG:
407 {
408 struct mbuf *mn;
409
410 /*
411 * We ran out of segments. We have to recopy this
412 * mbuf chain first. Bail out if we can't get the
413 * new buffers.
414 */
415 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
416
417 MGETHDR(mn, M_DONTWAIT, MT_DATA);
418 if (mn == NULL) {
419 m_freem(m0);
420 printf("aborting\n");
421 goto out;
422 }
423 if (m0->m_pkthdr.len > MHLEN) {
424 MCLGET(mn, M_DONTWAIT);
425 if ((mn->m_flags & M_EXT) == 0) {
426 m_freem(mn);
427 m_freem(m0);
428 printf("aborting\n");
429 goto out;
430 }
431 }
432 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mn, caddr_t));
433 mn->m_pkthdr.len = mn->m_len = m0->m_pkthdr.len;
434 m_freem(m0);
435 m0 = mn;
436 printf("retrying\n");
437 goto loadmap;
438 }
439
440 default:
441 /*
442 * Some other problem; report it.
443 */
444 printf("%s: can't load mbuf chain, error = %d\n",
445 sc->sc_dev.dv_xname, error);
446 m_freem(m0);
447 goto out;
448 }
449
450 /*
451 * Initialize the fraglist.
452 */
453 fr->ef_nfrags = dmamap->dm_nsegs;
454 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
455 fr->ef_frags[seg].ef_addr =
456 dmamap->dm_segs[seg].ds_addr;
457 fr->ef_frags[seg].ef_length =
458 dmamap->dm_segs[seg].ds_len;
459 }
460
461 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
462 BUS_DMASYNC_PREWRITE);
463
464 /*
465 * Store a pointer to the packet so we can free it later.
466 */
467 ds->ds_mbuf = m0;
468
469 /*
470 * Finish setting up the new transmit descriptor: set the
471 * packet length and give it to the EPIC.
472 */
473 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
474 txd->et_txstatus = ET_TXSTAT_OWNER;
475
476 /*
477 * Committed; advance the lasttx pointer. If nothing was
478 * previously queued, reset the dirty pointer.
479 */
480 sc->sc_txlast = nexttx;
481 if (sc->sc_txpending == 0)
482 sc->sc_txdirty = nexttx;
483
484 sc->sc_txpending++;
485
486 txqueued = 1;
487
488 #if NBPFILTER > 0
489 /*
490 * Pass the packet to any BPF listeners.
491 */
492 if (ifp->if_bpf)
493 bpf_mtap(ifp->if_bpf, m0);
494 #endif
495 }
496
497 out:
498 /*
499 * We're finished. If we added more packets, make sure the
500 * transmit DMA engine is running.
501 */
502 if (txqueued) {
503 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
504 COMMAND_TXQUEUED);
505
506 /*
507 * Set a 5 second watchdog timer.
508 */
509 ifp->if_timer = 5;
510 }
511 }
512
513 /*
514 * Watchdog timer handler.
515 * [ifnet interface function]
516 */
517 void
518 epic_watchdog(ifp)
519 struct ifnet *ifp;
520 {
521 struct epic_softc *sc = ifp->if_softc;
522
523 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
524 ifp->if_oerrors++;
525
526 epic_init(sc);
527 }
528
529 /*
530 * Handle control requests from the operator.
531 * [ifnet interface function]
532 */
533 int
534 epic_ioctl(ifp, cmd, data)
535 struct ifnet *ifp;
536 u_long cmd;
537 caddr_t data;
538 {
539 struct epic_softc *sc = ifp->if_softc;
540 struct ifreq *ifr = (struct ifreq *)data;
541 struct ifaddr *ifa = (struct ifaddr *)data;
542 int s, error = 0;
543
544 s = splnet();
545
546 switch (cmd) {
547 case SIOCSIFADDR:
548 ifp->if_flags |= IFF_UP;
549
550 switch (ifa->ifa_addr->sa_family) {
551 #ifdef INET
552 case AF_INET:
553 epic_init(sc);
554 arp_ifinit(ifp, ifa);
555 break;
556 #endif /* INET */
557 #ifdef NS
558 case AF_NS:
559 {
560 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
561
562 if (ns_nullhost(*ina))
563 ina->x_host = *(union ns_host *)
564 LLADDR(ifp->if_sadl);
565 else
566 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
567 ifp->if_addrlen);
568 /* Set new address. */
569 epic_init(sc);
570 break;
571 }
572 #endif /* NS */
573 default:
574 epic_init(sc);
575 break;
576 }
577 break;
578
579 case SIOCSIFMTU:
580 if (ifr->ifr_mtu > ETHERMTU)
581 error = EINVAL;
582 else
583 ifp->if_mtu = ifr->ifr_mtu;
584 break;
585
586 case SIOCSIFFLAGS:
587 if ((ifp->if_flags & IFF_UP) == 0 &&
588 (ifp->if_flags & IFF_RUNNING) != 0) {
589 /*
590 * If interface is marked down and it is running, then
591 * stop it.
592 */
593 epic_stop(sc);
594 ifp->if_flags &= ~IFF_RUNNING;
595 } else if ((ifp->if_flags & IFF_UP) != 0 &&
596 (ifp->if_flags & IFF_RUNNING) == 0) {
597 /*
598 * If interfase it marked up and it is stopped, then
599 * start it.
600 */
601 epic_init(sc);
602 } else {
603 /*
604 * Reset the interface to pick up changes in any other
605 * flags that affect the hardware state.
606 */
607 epic_init(sc);
608 }
609 break;
610
611 case SIOCADDMULTI:
612 case SIOCDELMULTI:
613 error = (cmd == SIOCADDMULTI) ?
614 ether_addmulti(ifr, &sc->sc_ethercom) :
615 ether_delmulti(ifr, &sc->sc_ethercom);
616
617 if (error == ENETRESET) {
618 /*
619 * Multicast list has changed; set the hardware filter
620 * accordingly.
621 */
622 epic_init(sc);
623 error = 0;
624 }
625 break;
626
627 case SIOCSIFMEDIA:
628 case SIOCGIFMEDIA:
629 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
630 break;
631
632 default:
633 error = EINVAL;
634 break;
635 }
636
637 splx(s);
638 return (error);
639 }
640
641 /*
642 * Interrupt handler.
643 */
644 int
645 epic_intr(arg)
646 void *arg;
647 {
648 struct epic_softc *sc = arg;
649 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
650 struct ether_header *eh;
651 struct epic_rxdesc *rxd;
652 struct epic_txdesc *txd;
653 struct epic_descsoft *ds;
654 struct mbuf *m;
655 u_int32_t intstat;
656 int i, len, claimed = 0, error;
657
658 top:
659 /*
660 * Get the interrupt status from the EPIC.
661 */
662 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
663 if ((intstat & INTSTAT_INT_ACTV) == 0)
664 return (claimed);
665
666 claimed = 1;
667
668 /*
669 * Acknowledge the interrupt.
670 */
671 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
672 intstat & INTMASK);
673
674 /*
675 * Check for receive interrupts.
676 */
677 if (intstat & (INTSTAT_RCC | INTSTAT_RQE)) {
678 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
679 rxd = &sc->sc_control_data->ecd_rxdescs[i];
680 ds = &sc->sc_rxsoft[i];
681 m = ds->ds_mbuf;
682 error = 0;
683
684 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
685 /*
686 * We have processed all of the
687 * receive buffers.
688 */
689 break;
690 }
691
692 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
693 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
694
695 /*
696 * Make sure the packet arrived intact.
697 */
698 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
699 #if 1
700 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
701 printf("%s: CRC error\n",
702 sc->sc_dev.dv_xname);
703 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
704 printf("%s: alignment error\n",
705 sc->sc_dev.dv_xname);
706 #endif
707 ifp->if_ierrors++;
708 error = 1;
709 }
710
711 /*
712 * Add a new buffer to the receive chain. If this
713 * fails, the old buffer is recycled.
714 */
715 if (epic_add_rxbuf(sc, i) == 0) {
716 /*
717 * We wanted to reset the buffer, but
718 * didn't want to pass it on up.
719 */
720 if (error) {
721 m_freem(m);
722 continue;
723 }
724
725 len = rxd->er_buflength;
726 if (len < sizeof(struct ether_header)) {
727 m_freem(m);
728 continue;
729 }
730
731 m->m_pkthdr.rcvif = ifp;
732 m->m_pkthdr.len = m->m_len = len;
733 eh = mtod(m, struct ether_header *);
734 #if NBPFILTER > 0
735 /*
736 * Pass this up to any BPF listeners.
737 */
738 if (ifp->if_bpf) {
739 bpf_mtap(ifp->if_bpf, m);
740
741 /*
742 * Only pass this up the stack
743 * if it's for us.
744 */
745 if ((ifp->if_flags & IFF_PROMISC) &&
746 bcmp(LLADDR(ifp->if_sadl),
747 eh->ether_dhost,
748 ETHER_ADDR_LEN) != 0 &&
749 (rxd->er_rxstatus &
750 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST))
751 == 0) {
752 m_freem(m);
753 continue;
754 }
755 }
756 #endif /* NPBFILTER > 0 */
757 m->m_data += sizeof(struct ether_header);
758 m->m_len -= sizeof(struct ether_header);
759 m->m_pkthdr.len = m->m_len;
760 ether_input(ifp, eh, m);
761 }
762 }
763
764 /*
765 * Update the recieve pointer.
766 */
767 sc->sc_rxptr = i;
768
769 /*
770 * Check for receive queue underflow.
771 */
772 if (intstat & INTSTAT_RQE) {
773 printf("%s: receiver queue empty\n",
774 sc->sc_dev.dv_xname);
775 /*
776 * Ring is already built; just restart the
777 * receiver.
778 */
779 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
780 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
781 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
782 COMMAND_RXQUEUED | COMMAND_START_RX);
783 }
784 }
785
786 /*
787 * Check for transmission complete interrupts.
788 */
789 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
790 for (i = sc->sc_txdirty;; i = EPIC_NEXTTX(i)) {
791 txd = &sc->sc_control_data->ecd_txdescs[i];
792 ds = &sc->sc_txsoft[i];
793
794 if (sc->sc_txpending == 0 ||
795 (txd->et_txstatus & ET_TXSTAT_OWNER) != 0)
796 break;
797
798 if (ds->ds_mbuf != NULL) {
799 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
800 0, ds->ds_dmamap->dm_mapsize,
801 BUS_DMASYNC_POSTWRITE);
802 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
803 m_freem(ds->ds_mbuf);
804 ds->ds_mbuf = NULL;
805 }
806 sc->sc_txpending--;
807
808 /*
809 * Check for errors and collisions.
810 */
811 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
812 ifp->if_oerrors++;
813 ifp->if_collisions +=
814 TXSTAT_COLLISIONS(txd->et_txstatus);
815 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST) {
816 #if 1
817 printf("%s: lost carrier\n",
818 sc->sc_dev.dv_xname);
819 #endif
820 /* XXX clear "active" but in media data */
821 }
822 }
823
824 /*
825 * Update the dirty transmit buffer pointer.
826 */
827 sc->sc_txdirty = i;
828
829 /*
830 * Cancel the watchdog timer if there are no pending
831 * transmissions.
832 */
833 if (sc->sc_txpending == 0)
834 ifp->if_timer = 0;
835
836 /*
837 * Kick the transmitter after a DMA underrun.
838 */
839 if (intstat & INTSTAT_TXU) {
840 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
841 bus_space_write_4(sc->sc_st, sc->sc_sh,
842 EPIC_COMMAND, COMMAND_TXUGO);
843 if (sc->sc_txpending)
844 bus_space_write_4(sc->sc_st, sc->sc_sh,
845 EPIC_COMMAND, COMMAND_TXQUEUED);
846 }
847
848 /*
849 * Try to get more packets going.
850 */
851 epic_start(ifp);
852 }
853
854 /*
855 * Check for fatal interrupts.
856 */
857 if (intstat & INTSTAT_FATAL_INT) {
858 printf("%s: fatal error, resetting\n", sc->sc_dev.dv_xname);
859 epic_init(sc);
860 }
861
862 /*
863 * Check for more interrupts.
864 */
865 goto top;
866 }
867
868 /*
869 * One second timer, used to tick the MII.
870 */
871 void
872 epic_tick(arg)
873 void *arg;
874 {
875 struct epic_softc *sc = arg;
876 int s;
877
878 s = splimp();
879 mii_tick(&sc->sc_mii);
880 splx(s);
881
882 timeout(epic_tick, sc, hz);
883 }
884
885 /*
886 * Fixup the clock source on the EPIC.
887 */
888 void
889 epic_fixup_clock_source(sc)
890 struct epic_softc *sc;
891 {
892 int i;
893
894 /*
895 * According to SMC Application Note 7-15, the EPIC's clock
896 * source is incorrect following a reset. This manifests itself
897 * as failure to recognize when host software has written to
898 * a register on the EPIC. The appnote recommends issuing at
899 * least 16 consecutive writes to the CLOCK TEST bit to correctly
900 * configure the clock source.
901 */
902 for (i = 0; i < 16; i++)
903 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
904 TEST_CLOCKTEST);
905 }
906
907 /*
908 * Perform a soft reset on the EPIC.
909 */
910 void
911 epic_reset(sc)
912 struct epic_softc *sc;
913 {
914
915 epic_fixup_clock_source(sc);
916
917 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
918 delay(100);
919 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
920 delay(100);
921
922 epic_fixup_clock_source(sc);
923 }
924
925 /*
926 * Initialize the interface. Must be called at splnet().
927 */
928 void
929 epic_init(sc)
930 struct epic_softc *sc;
931 {
932 bus_space_tag_t st = sc->sc_st;
933 bus_space_handle_t sh = sc->sc_sh;
934 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
935 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
936 struct epic_txdesc *txd;
937 struct epic_rxdesc *rxd;
938 u_int32_t genctl, reg0;
939 int i;
940
941 /*
942 * Cancel any pending I/O.
943 */
944 epic_stop(sc);
945
946 /*
947 * Reset the EPIC to a known state.
948 */
949 epic_reset(sc);
950
951 /*
952 * Magical mystery initialization.
953 */
954 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
955
956 /*
957 * Initialize the EPIC genctl register:
958 *
959 * - 64 byte receive FIFO threshold
960 * - automatic advance to next receive frame
961 */
962 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
963 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
964
965 /*
966 * Reset the MII bus and PHY.
967 */
968 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
969 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
970 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
971 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
972 delay(100);
973 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
974 delay(100);
975 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
976
977 /*
978 * Initialize Ethernet address.
979 */
980 reg0 = enaddr[1] << 8 | enaddr[0];
981 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
982 reg0 = enaddr[3] << 8 | enaddr[2];
983 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
984 reg0 = enaddr[5] << 8 | enaddr[4];
985 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
986
987 /*
988 * Set up the multicast hash table.
989 */
990 epic_set_mchash(sc);
991
992 /*
993 * Initialize receive control. Remember the external buffer
994 * size setting.
995 */
996 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
997 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
998 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
999 if (ifp->if_flags & IFF_PROMISC)
1000 reg0 |= RXCON_PROMISCMODE;
1001 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1002
1003 /* Set the media. (XXX full-duplex in TXCON?) */
1004 mii_mediachg(&sc->sc_mii);
1005
1006 /*
1007 * Initialize the transmit descriptors.
1008 */
1009 txd = sc->sc_control_data->ecd_txdescs;
1010 bzero(txd, sizeof(sc->sc_control_data->ecd_txdescs));
1011 for (i = 0; i < EPIC_NTXDESC; i++) {
1012 txd[i].et_control = ET_TXCTL_LASTDESC | ET_TXCTL_IAF |
1013 ET_TXCTL_FRAGLIST;
1014 txd[i].et_bufaddr = sc->sc_cddma + EPIC_CDOFF(ecd_txfrags[i]);
1015 txd[i].et_nextdesc = sc->sc_cddma +
1016 EPIC_CDOFF(ecd_txdescs[(i + 1) & EPIC_NTXDESC_MASK]);
1017 }
1018
1019 /*
1020 * Initialize the receive descriptors. Note the buffers
1021 * and control word have already been initialized; we only
1022 * need to initialize the ring.
1023 */
1024 rxd = sc->sc_control_data->ecd_rxdescs;
1025 for (i = 0; i < EPIC_NRXDESC; i++) {
1026 rxd[i].er_nextdesc = sc->sc_cddma +
1027 EPIC_CDOFF(ecd_rxdescs[(i + 1) & EPIC_NRXDESC_MASK]);
1028 }
1029
1030 /*
1031 * Initialize the interrupt mask and enable interrupts.
1032 */
1033 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1034 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1035
1036 /*
1037 * Give the transmit and receive rings to the EPIC.
1038 */
1039 bus_space_write_4(st, sh, EPIC_PTCDAR,
1040 sc->sc_cddma + EPIC_CDOFF(ecd_txdescs[0]));
1041 bus_space_write_4(st, sh, EPIC_PRCDAR,
1042 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
1043
1044 /*
1045 * Initialize our ring pointers. txlast it initialized to
1046 * the end of the list so that it will wrap around to the
1047 * first descriptor when the first packet is transmitted.
1048 */
1049 sc->sc_txpending = 0;
1050 sc->sc_txdirty = 0;
1051 sc->sc_txlast = EPIC_NTXDESC - 1;
1052
1053 sc->sc_rxptr = 0;
1054
1055 /*
1056 * Set the EPIC in motion.
1057 */
1058 bus_space_write_4(st, sh, EPIC_COMMAND,
1059 COMMAND_RXQUEUED | COMMAND_START_RX);
1060
1061 /*
1062 * ...all done!
1063 */
1064 ifp->if_flags |= IFF_RUNNING;
1065 ifp->if_flags &= ~IFF_OACTIVE;
1066
1067 /*
1068 * Start the one second clock.
1069 */
1070 timeout(epic_tick, sc, hz);
1071
1072 /*
1073 * Attempt to start output on the interface.
1074 */
1075 epic_start(ifp);
1076 }
1077
1078 /*
1079 * Stop transmission on the interface.
1080 */
1081 void
1082 epic_stop(sc)
1083 struct epic_softc *sc;
1084 {
1085 bus_space_tag_t st = sc->sc_st;
1086 bus_space_handle_t sh = sc->sc_sh;
1087 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1088 struct epic_descsoft *ds;
1089 u_int32_t reg;
1090 int i;
1091
1092 /*
1093 * Stop the one second clock.
1094 */
1095 untimeout(epic_tick, sc);
1096
1097 /* Paranoia... */
1098 epic_fixup_clock_source(sc);
1099
1100 /*
1101 * Disable interrupts.
1102 */
1103 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1104 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1105 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1106
1107 /*
1108 * Stop the DMA engine and take the receiver off-line.
1109 */
1110 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1111 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1112
1113 /*
1114 * Release any queued transmit buffers.
1115 */
1116 for (i = 0; i < EPIC_NTXDESC; i++) {
1117 ds = &sc->sc_txsoft[i];
1118 if (ds->ds_mbuf != NULL) {
1119 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1120 m_freem(ds->ds_mbuf);
1121 ds->ds_mbuf = NULL;
1122 }
1123 }
1124 sc->sc_txpending = 0;
1125
1126 /*
1127 * Release the receive buffers, then reallocate/reinitialize.
1128 */
1129 for (i = 0; i < EPIC_NRXDESC; i++) {
1130 ds = &sc->sc_rxsoft[i];
1131 if (ds->ds_mbuf != NULL) {
1132 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1133 m_freem(ds->ds_mbuf);
1134 ds->ds_mbuf = NULL;
1135 }
1136 if (epic_add_rxbuf(sc, i) != 0) {
1137 /*
1138 * This "can't happen" - we're at splnet()
1139 * and we just freed the buffer we need
1140 * above.
1141 */
1142 panic("epic_stop: no buffers!");
1143 }
1144 }
1145
1146 /*
1147 * Mark the interface down and cancel the watchdog timer.
1148 */
1149 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1150 ifp->if_timer = 0;
1151 }
1152
1153 /*
1154 * Read the EPIC Serial EEPROM.
1155 */
1156 void
1157 epic_read_eeprom(sc, word, wordcnt, data)
1158 struct epic_softc *sc;
1159 int word, wordcnt;
1160 u_int16_t *data;
1161 {
1162 bus_space_tag_t st = sc->sc_st;
1163 bus_space_handle_t sh = sc->sc_sh;
1164 u_int16_t reg;
1165 int i, x;
1166
1167 #define EEPROM_WAIT_READY(st, sh) \
1168 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1169 /* nothing */
1170
1171 /*
1172 * Enable the EEPROM.
1173 */
1174 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1175 EEPROM_WAIT_READY(st, sh);
1176
1177 for (i = 0; i < wordcnt; i++) {
1178 /* Send CHIP SELECT for one clock tick. */
1179 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1180 EEPROM_WAIT_READY(st, sh);
1181
1182 /* Shift in the READ opcode. */
1183 for (x = 3; x > 0; x--) {
1184 reg = EECTL_ENABLE|EECTL_EECS;
1185 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1186 reg |= EECTL_EEDI;
1187 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1188 EEPROM_WAIT_READY(st, sh);
1189 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1190 EEPROM_WAIT_READY(st, sh);
1191 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1192 EEPROM_WAIT_READY(st, sh);
1193 }
1194
1195 /* Shift in address. */
1196 for (x = 6; x > 0; x--) {
1197 reg = EECTL_ENABLE|EECTL_EECS;
1198 if ((word + i) & (1 << (x - 1)))
1199 reg |= EECTL_EEDI;
1200 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1201 EEPROM_WAIT_READY(st, sh);
1202 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1203 EEPROM_WAIT_READY(st, sh);
1204 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1205 EEPROM_WAIT_READY(st, sh);
1206 }
1207
1208 /* Shift out data. */
1209 reg = EECTL_ENABLE|EECTL_EECS;
1210 data[i] = 0;
1211 for (x = 16; x > 0; x--) {
1212 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1213 EEPROM_WAIT_READY(st, sh);
1214 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1215 data[i] |= (1 << (x - 1));
1216 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1217 EEPROM_WAIT_READY(st, sh);
1218 }
1219
1220 /* Clear CHIP SELECT. */
1221 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1222 EEPROM_WAIT_READY(st, sh);
1223 }
1224
1225 /*
1226 * Disable the EEPROM.
1227 */
1228 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1229
1230 #undef EEPROM_WAIT_READY
1231 }
1232
1233 /*
1234 * Add a receive buffer to the indicated descriptor.
1235 */
1236 int
1237 epic_add_rxbuf(sc, idx)
1238 struct epic_softc *sc;
1239 int idx;
1240 {
1241 struct epic_rxdesc *rxd = &sc->sc_control_data->ecd_rxdescs[idx];
1242 struct epic_descsoft *ds = &sc->sc_rxsoft[idx];
1243 struct mbuf *m, *oldm;
1244 int error = 0;
1245
1246 oldm = ds->ds_mbuf;
1247
1248 MGETHDR(m, M_DONTWAIT, MT_DATA);
1249 if (m != NULL) {
1250 MCLGET(m, M_DONTWAIT);
1251 if ((m->m_flags & M_EXT) == 0) {
1252 error = ENOMEM;
1253 m_freem(m);
1254 if (oldm == NULL)
1255 return (error);
1256 m = oldm;
1257 m->m_data = m->m_ext.ext_buf;
1258 }
1259 } else {
1260 error = ENOMEM;
1261 if (oldm == NULL)
1262 return (error);
1263 m = oldm;
1264 m->m_data = m->m_ext.ext_buf;
1265 }
1266
1267 ds->ds_mbuf = m;
1268
1269 /*
1270 * Set up the DMA map for this receive buffer.
1271 */
1272 if (m != oldm) {
1273 if (oldm != NULL)
1274 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1275 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1276 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1277 if (error) {
1278 printf("%s: can't load rx buffer, error = %d\n",
1279 sc->sc_dev.dv_xname, error);
1280 panic("epic_add_rxbuf"); /* XXX */
1281 }
1282 }
1283
1284 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1285 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1286
1287 /*
1288 * Move the data pointer up so that the incoming packet
1289 * will be 32-bit aligned.
1290 */
1291 m->m_data += RX_ALIGNMENT_FUDGE;
1292
1293 /*
1294 * Initialize the receive descriptor.
1295 */
1296 rxd->er_bufaddr = ds->ds_dmamap->dm_segs[0].ds_addr +
1297 RX_ALIGNMENT_FUDGE;
1298 rxd->er_buflength = m->m_ext.ext_size - RX_ALIGNMENT_FUDGE;
1299 rxd->er_control = 0;
1300 rxd->er_rxstatus = ER_RXSTAT_OWNER;
1301
1302 return (error);
1303 }
1304
1305 /*
1306 * Set the EPIC multicast hash table.
1307 */
1308 void
1309 epic_set_mchash(sc)
1310 struct epic_softc *sc;
1311 {
1312 struct ethercom *ec = &sc->sc_ethercom;
1313 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1314 struct ether_multi *enm;
1315 struct ether_multistep step;
1316 u_int8_t *cp;
1317 u_int32_t crc, mchash[4];
1318 int len;
1319 static const u_int32_t crctab[] = {
1320 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1321 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1322 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1323 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1324 };
1325
1326 /*
1327 * Set up the multicast address filter by passing all multicast
1328 * addresses through a CRC generator, and then using the high-order
1329 * 6 bits as an index into the 64 bit multicast hash table (only
1330 * the lower 16 bits of each 32 bit multicast hash register are
1331 * valid). The high order bit selects the register, while the
1332 * rest of the bits select the bit within the register.
1333 */
1334
1335 if (ifp->if_flags & IFF_PROMISC)
1336 goto allmulti;
1337
1338 #if 1 /* XXX thorpej - hardware bug in 10Mb mode */
1339 goto allmulti;
1340 #endif
1341
1342 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1343
1344 ETHER_FIRST_MULTI(step, ec, enm);
1345 while (enm != NULL) {
1346 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1347 /*
1348 * We must listen to a range of multicast addresses.
1349 * For now, just accept all multicasts, rather than
1350 * trying to set only those filter bits needed to match
1351 * the range. (At this time, the only use of address
1352 * ranges is for IP multicast routing, for which the
1353 * range is big enough to require all bits set.)
1354 */
1355 goto allmulti;
1356 }
1357
1358 cp = enm->enm_addrlo;
1359 crc = 0xffffffff;
1360 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1361 crc ^= *cp++;
1362 crc = (crc >> 4) ^ crctab[crc & 0xf];
1363 crc = (crc >> 4) ^ crctab[crc & 0xf];
1364 }
1365 /* Just want the 6 most significant bits. */
1366 crc >>= 26;
1367
1368 /* Set the corresponding bit in the hash table. */
1369 mchash[crc >> 4] |= 1 << (crc & 0xf);
1370
1371 ETHER_NEXT_MULTI(step, enm);
1372 }
1373
1374 ifp->if_flags &= ~IFF_ALLMULTI;
1375 goto sethash;
1376
1377 allmulti:
1378 ifp->if_flags |= IFF_ALLMULTI;
1379 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1380
1381 sethash:
1382 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1383 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1384 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1385 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1386 }
1387
1388 /*
1389 * Wait for the MII to become ready.
1390 */
1391 int
1392 epic_mii_wait(sc, rw)
1393 struct epic_softc *sc;
1394 u_int32_t rw;
1395 {
1396 int i;
1397
1398 for (i = 0; i < 50; i++) {
1399 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1400 == 0)
1401 break;
1402 delay(2);
1403 }
1404 if (i == 50) {
1405 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1406 return (1);
1407 }
1408
1409 return (0);
1410 }
1411
1412 /*
1413 * Read from the MII.
1414 */
1415 int
1416 epic_mii_read(self, phy, reg)
1417 struct device *self;
1418 int phy, reg;
1419 {
1420 struct epic_softc *sc = (struct epic_softc *)self;
1421
1422 if (epic_mii_wait(sc, MMCTL_WRITE))
1423 return (0);
1424
1425 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1426 MMCTL_ARG(phy, reg, MMCTL_READ));
1427
1428 if (epic_mii_wait(sc, MMCTL_READ))
1429 return (0);
1430
1431 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1432 MMDATA_MASK);
1433 }
1434
1435 /*
1436 * Write to the MII.
1437 */
1438 void
1439 epic_mii_write(self, phy, reg, val)
1440 struct device *self;
1441 int phy, reg, val;
1442 {
1443 struct epic_softc *sc = (struct epic_softc *)self;
1444
1445 if (epic_mii_wait(sc, MMCTL_WRITE))
1446 return;
1447
1448 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1449 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1450 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1451 }
1452
1453 /*
1454 * Callback from PHY when media changes.
1455 */
1456 void
1457 epic_statchg(self)
1458 struct device *self;
1459 {
1460
1461 /* XXX Update ifp->if_baudrate */
1462 }
1463
1464 /*
1465 * Callback from ifmedia to request current media status.
1466 */
1467 void
1468 epic_mediastatus(ifp, ifmr)
1469 struct ifnet *ifp;
1470 struct ifmediareq *ifmr;
1471 {
1472 struct epic_softc *sc = ifp->if_softc;
1473
1474 mii_pollstat(&sc->sc_mii);
1475 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1476 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1477 }
1478
1479 /*
1480 * Callback from ifmedia to request new media setting.
1481 */
1482 int
1483 epic_mediachange(ifp)
1484 struct ifnet *ifp;
1485 {
1486
1487 if (ifp->if_flags & IFF_UP)
1488 epic_init((struct epic_softc *)ifp->if_softc);
1489 return (0);
1490 }
1491