smc83c170.c revision 1.10 1 /* $NetBSD: smc83c170.c,v 1.10 1999/02/12 05:55:27 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/mii/miivar.h>
82
83 #include <dev/ic/smc83c170reg.h>
84 #include <dev/ic/smc83c170var.h>
85
86 void epic_start __P((struct ifnet *));
87 void epic_watchdog __P((struct ifnet *));
88 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
89
90 void epic_shutdown __P((void *));
91
92 void epic_reset __P((struct epic_softc *));
93 void epic_init __P((struct epic_softc *));
94 void epic_stop __P((struct epic_softc *));
95 int epic_add_rxbuf __P((struct epic_softc *, int));
96 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
97 void epic_set_mchash __P((struct epic_softc *));
98 void epic_fixup_clock_source __P((struct epic_softc *));
99 int epic_mii_read __P((struct device *, int, int));
100 void epic_mii_write __P((struct device *, int, int, int));
101 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
102 void epic_tick __P((void *));
103
104 void epic_statchg __P((struct device *));
105 int epic_mediachange __P((struct ifnet *));
106 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
107
108 /* XXX Should be somewhere else. */
109 #define ETHER_MIN_LEN 60
110
111 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
112 INTSTAT_TXC | INTSTAT_RQE | INTSTAT_RCC)
113
114 /*
115 * Attach an EPIC interface to the system.
116 */
117 void
118 epic_attach(sc)
119 struct epic_softc *sc;
120 {
121 bus_space_tag_t st = sc->sc_st;
122 bus_space_handle_t sh = sc->sc_sh;
123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
124 int i, rseg, error, attach_stage;
125 bus_dma_segment_t seg;
126 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
127 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
128
129 attach_stage = 0;
130
131 /*
132 * Allocate the control data structures, and create and load the
133 * DMA map for it.
134 */
135 if ((error = bus_dmamem_alloc(sc->sc_dmat,
136 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
137 BUS_DMA_NOWAIT)) != 0) {
138 printf("%s: unable to allocate control data, error = %d\n",
139 sc->sc_dev.dv_xname, error);
140 goto fail;
141 }
142
143 attach_stage = 1;
144
145 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
146 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
147 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
148 printf("%s: unable to map control data, error = %d\n",
149 sc->sc_dev.dv_xname, error);
150 goto fail;
151 }
152
153 attach_stage = 2;
154
155 if ((error = bus_dmamap_create(sc->sc_dmat,
156 sizeof(struct epic_control_data), 1,
157 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
158 &sc->sc_cddmamap)) != 0) {
159 printf("%s: unable to create control data DMA map, "
160 "error = %d\n", sc->sc_dev.dv_xname, error);
161 goto fail;
162 }
163
164 attach_stage = 3;
165
166 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
167 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
168 BUS_DMA_NOWAIT)) != 0) {
169 printf("%s: unable to load control data DMA map, error = %d\n",
170 sc->sc_dev.dv_xname, error);
171 goto fail;
172 }
173
174 attach_stage = 4;
175
176 /*
177 * Create the transmit buffer DMA maps.
178 */
179 for (i = 0; i < EPIC_NTXDESC; i++) {
180 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
181 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
182 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
183 printf("%s: unable to create tx DMA map %d, "
184 "error = %d\n", sc->sc_dev.dv_xname, i, error);
185 goto fail;
186 }
187 }
188
189 attach_stage = 5;
190
191 /*
192 * Create the recieve buffer DMA maps.
193 */
194 for (i = 0; i < EPIC_NRXDESC; i++) {
195 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
196 MCLBYTES, 0, BUS_DMA_NOWAIT,
197 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
198 printf("%s: unable to create rx DMA map %d, "
199 "error = %d\n", sc->sc_dev.dv_xname, i, error);
200 goto fail;
201 }
202 }
203
204 attach_stage = 6;
205
206 /*
207 * Pre-allocate the receive buffers.
208 */
209 for (i = 0; i < EPIC_NRXDESC; i++) {
210 if ((error = epic_add_rxbuf(sc, i)) != 0) {
211 printf("%s: unable to allocate or map rx buffer %d\n,"
212 " error = %d\n", sc->sc_dev.dv_xname, i, error);
213 goto fail;
214 }
215 }
216
217 attach_stage = 7;
218
219 /*
220 * Bring the chip out of low-power mode and reset it to a known state.
221 */
222 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
223 epic_reset(sc);
224
225 /*
226 * Read the Ethernet address from the EEPROM.
227 */
228 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
229 bcopy(myea, enaddr, sizeof(myea));
230
231 /*
232 * ...and the device name.
233 */
234 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
235 mydevname);
236 bcopy(mydevname, devname, sizeof(mydevname));
237 devname[sizeof(mydevname)] = '\0';
238 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
239 if (devname[i] == ' ')
240 devname[i] = '\0';
241 else
242 break;
243 }
244
245 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
246 devname, ether_sprintf(enaddr));
247
248 /*
249 * Initialize our media structures and probe the MII.
250 */
251 sc->sc_mii.mii_ifp = ifp;
252 sc->sc_mii.mii_readreg = epic_mii_read;
253 sc->sc_mii.mii_writereg = epic_mii_write;
254 sc->sc_mii.mii_statchg = epic_statchg;
255 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
256 epic_mediastatus);
257 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
258 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
259 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
260 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
261 } else
262 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
263
264 ifp = &sc->sc_ethercom.ec_if;
265 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
266 ifp->if_softc = sc;
267 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
268 ifp->if_ioctl = epic_ioctl;
269 ifp->if_start = epic_start;
270 ifp->if_watchdog = epic_watchdog;
271
272 /*
273 * Attach the interface.
274 */
275 if_attach(ifp);
276 ether_ifattach(ifp, enaddr);
277 #if NBPFILTER > 0
278 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
279 sizeof(struct ether_header));
280 #endif
281
282 /*
283 * Make sure the interface is shutdown during reboot.
284 */
285 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
286 if (sc->sc_sdhook == NULL)
287 printf("%s: WARNING: unable to establish shutdown hook\n",
288 sc->sc_dev.dv_xname);
289 return;
290
291 fail:
292 /*
293 * Free any resources we've allocated during the failed attach
294 * attempt. Do this in reverse order and fall through.
295 */
296 switch (attach_stage) {
297 case 7:
298 for (i = 0; i < EPIC_NRXDESC; i++) {
299 if (EPIC_DSRX(sc, i)->ds_mbuf != NULL) {
300 bus_dmamap_unload(sc->sc_dmat,
301 EPIC_DSRX(sc, i)->ds_dmamap);
302 m_freem(EPIC_DSRX(sc, i)->ds_mbuf);
303 }
304 }
305 /* FALLTHROUGH */
306
307 case 6:
308 for (i = 0; i < EPIC_NRXDESC; i++)
309 bus_dmamap_destroy(sc->sc_dmat,
310 EPIC_DSRX(sc, i)->ds_dmamap);
311 /* FALLTHROUGH */
312
313 case 5:
314 for (i = 0; i < EPIC_NTXDESC; i++)
315 bus_dmamap_destroy(sc->sc_dmat,
316 EPIC_DSTX(sc, i)->ds_dmamap);
317 /* FALLTHROUGH */
318
319 case 4:
320 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
321 /* FALLTHROUGH */
322
323 case 3:
324 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
325 /* FALLTHROUGH */
326
327 case 2:
328 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
329 sizeof(struct epic_control_data));
330 /* FALLTHROUGH */
331
332 case 1:
333 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
334 break;
335 }
336 }
337
338 /*
339 * Shutdown hook. Make sure the interface is stopped at reboot.
340 */
341 void
342 epic_shutdown(arg)
343 void *arg;
344 {
345 struct epic_softc *sc = arg;
346
347 epic_stop(sc);
348 }
349
350 /*
351 * Start packet transmission on the interface.
352 * [ifnet interface function]
353 */
354 void
355 epic_start(ifp)
356 struct ifnet *ifp;
357 {
358 struct epic_softc *sc = ifp->if_softc;
359 struct mbuf *m0, *m;
360 struct epic_txdesc *txd;
361 struct epic_descsoft *ds;
362 struct epic_fraglist *fr;
363 bus_dmamap_t dmamap;
364 int error, firsttx, nexttx, opending, seg;
365
366 /*
367 * Remember the previous txpending and the first transmit
368 * descriptor we use.
369 */
370 opending = sc->sc_txpending;
371 firsttx = EPIC_NEXTTX(sc->sc_txlast);
372
373 /*
374 * Loop through the send queue, setting up transmit descriptors
375 * until we drain the queue, or use up all available transmit
376 * descriptors.
377 */
378 while (sc->sc_txpending < EPIC_NTXDESC) {
379 /*
380 * Grab a packet off the queue.
381 */
382 IF_DEQUEUE(&ifp->if_snd, m0);
383 if (m0 == NULL)
384 break;
385
386 /*
387 * Get the last and next available transmit descriptor.
388 */
389 nexttx = EPIC_NEXTTX(sc->sc_txlast);
390 txd = EPIC_CDTX(sc, nexttx);
391 fr = EPIC_CDFL(sc, nexttx);
392 ds = EPIC_DSTX(sc, nexttx);
393 dmamap = ds->ds_dmamap;
394
395 /*
396 * Load the DMA map. If this fails, the packet either
397 * didn't fit in the alloted number of frags, or we were
398 * short on resources. In this case, we'll copy and try
399 * again.
400 */
401 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
402 BUS_DMA_NOWAIT) != 0) {
403 MGETHDR(m, M_DONTWAIT, MT_DATA);
404 if (m == NULL) {
405 printf("%s: unable to allocate Tx mbuf\n",
406 sc->sc_dev.dv_xname);
407 IF_PREPEND(&ifp->if_snd, m0);
408 break;
409 }
410 if (m0->m_pkthdr.len > MHLEN) {
411 MCLGET(m, M_DONTWAIT);
412 if ((m->m_flags & M_EXT) == 0) {
413 printf("%s: unable to allocate Tx "
414 "cluster\n", sc->sc_dev.dv_xname);
415 m_freem(m);
416 IF_PREPEND(&ifp->if_snd, m0);
417 break;
418 }
419 }
420 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
421 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
422 m_freem(m0);
423 m0 = m;
424 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
425 m0, BUS_DMA_NOWAIT);
426 if (error) {
427 printf("%s: unable to load Tx buffer, "
428 "error = %d\n", sc->sc_dev.dv_xname, error);
429 IF_PREPEND(&ifp->if_snd, m0);
430 break;
431 }
432 }
433
434 /* Initialize the fraglist. */
435 fr->ef_nfrags = dmamap->dm_nsegs;
436 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
437 fr->ef_frags[seg].ef_addr =
438 dmamap->dm_segs[seg].ds_addr;
439 fr->ef_frags[seg].ef_length =
440 dmamap->dm_segs[seg].ds_len;
441 }
442
443 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
444
445 /* Sync the DMA map. */
446 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
447 BUS_DMASYNC_PREWRITE);
448
449 /*
450 * Store a pointer to the packet so we can free it later.
451 */
452 ds->ds_mbuf = m0;
453
454 /*
455 * Fill in the transmit descriptor. The EPIC doesn't
456 * auto-pad, so we have to do this ourselves.
457 */
458 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
459 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
460
461 /*
462 * If this is the first descriptor we're enqueueing,
463 * don't give it to the EPIC yet. That could cause
464 * a race condition. We'll do it below.
465 */
466 if (nexttx == firsttx)
467 txd->et_txstatus = 0;
468 else
469 txd->et_txstatus = ET_TXSTAT_OWNER;
470
471 EPIC_CDTXSYNC(sc, nexttx,
472 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
473
474 /* Advance the tx pointer. */
475 sc->sc_txpending++;
476 sc->sc_txlast = nexttx;
477
478 #if NBPFILTER > 0
479 /*
480 * Pass the packet to any BPF listeners.
481 */
482 if (ifp->if_bpf)
483 bpf_mtap(ifp->if_bpf, m0);
484 #endif
485 }
486
487 if (sc->sc_txpending == EPIC_NTXDESC) {
488 /* No more slots left; notify upper layer. */
489 ifp->if_flags |= IFF_OACTIVE;
490 }
491
492 if (sc->sc_txpending != opending) {
493 /*
494 * We enqueued packets. If the transmitter was idle,
495 * reset the txdirty pointer.
496 */
497 if (opending == 0)
498 sc->sc_txdirty = firsttx;
499
500 /*
501 * Cause a transmit interrupt to happen on the
502 * last packet we enqueued.
503 */
504 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
505 EPIC_CDTXSYNC(sc, sc->sc_txlast,
506 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
507
508 /*
509 * The entire packet chain is set up. Give the
510 * first descriptor to the EPIC now.
511 */
512 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
513 EPIC_CDTXSYNC(sc, firsttx,
514 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
515
516 /* Start the transmitter. */
517 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
518 COMMAND_TXQUEUED);
519
520 /* Set a watchdog timer in case the chip flakes out. */
521 ifp->if_timer = 5;
522 }
523 }
524
525 /*
526 * Watchdog timer handler.
527 * [ifnet interface function]
528 */
529 void
530 epic_watchdog(ifp)
531 struct ifnet *ifp;
532 {
533 struct epic_softc *sc = ifp->if_softc;
534
535 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
536 ifp->if_oerrors++;
537
538 epic_init(sc);
539 }
540
541 /*
542 * Handle control requests from the operator.
543 * [ifnet interface function]
544 */
545 int
546 epic_ioctl(ifp, cmd, data)
547 struct ifnet *ifp;
548 u_long cmd;
549 caddr_t data;
550 {
551 struct epic_softc *sc = ifp->if_softc;
552 struct ifreq *ifr = (struct ifreq *)data;
553 struct ifaddr *ifa = (struct ifaddr *)data;
554 int s, error = 0;
555
556 s = splnet();
557
558 switch (cmd) {
559 case SIOCSIFADDR:
560 ifp->if_flags |= IFF_UP;
561
562 switch (ifa->ifa_addr->sa_family) {
563 #ifdef INET
564 case AF_INET:
565 epic_init(sc);
566 arp_ifinit(ifp, ifa);
567 break;
568 #endif /* INET */
569 #ifdef NS
570 case AF_NS:
571 {
572 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
573
574 if (ns_nullhost(*ina))
575 ina->x_host = *(union ns_host *)
576 LLADDR(ifp->if_sadl);
577 else
578 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
579 ifp->if_addrlen);
580 /* Set new address. */
581 epic_init(sc);
582 break;
583 }
584 #endif /* NS */
585 default:
586 epic_init(sc);
587 break;
588 }
589 break;
590
591 case SIOCSIFMTU:
592 if (ifr->ifr_mtu > ETHERMTU)
593 error = EINVAL;
594 else
595 ifp->if_mtu = ifr->ifr_mtu;
596 break;
597
598 case SIOCSIFFLAGS:
599 if ((ifp->if_flags & IFF_UP) == 0 &&
600 (ifp->if_flags & IFF_RUNNING) != 0) {
601 /*
602 * If interface is marked down and it is running, then
603 * stop it.
604 */
605 epic_stop(sc);
606 } else if ((ifp->if_flags & IFF_UP) != 0 &&
607 (ifp->if_flags & IFF_RUNNING) == 0) {
608 /*
609 * If interfase it marked up and it is stopped, then
610 * start it.
611 */
612 epic_init(sc);
613 } else if ((ifp->if_flags & IFF_UP) != 0) {
614 /*
615 * Reset the interface to pick up changes in any other
616 * flags that affect the hardware state.
617 */
618 epic_init(sc);
619 }
620 break;
621
622 case SIOCADDMULTI:
623 case SIOCDELMULTI:
624 error = (cmd == SIOCADDMULTI) ?
625 ether_addmulti(ifr, &sc->sc_ethercom) :
626 ether_delmulti(ifr, &sc->sc_ethercom);
627
628 if (error == ENETRESET) {
629 /*
630 * Multicast list has changed; set the hardware filter
631 * accordingly.
632 */
633 epic_init(sc);
634 error = 0;
635 }
636 break;
637
638 case SIOCSIFMEDIA:
639 case SIOCGIFMEDIA:
640 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
641 break;
642
643 default:
644 error = EINVAL;
645 break;
646 }
647
648 splx(s);
649 return (error);
650 }
651
652 /*
653 * Interrupt handler.
654 */
655 int
656 epic_intr(arg)
657 void *arg;
658 {
659 struct epic_softc *sc = arg;
660 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
661 struct ether_header *eh;
662 struct epic_rxdesc *rxd;
663 struct epic_txdesc *txd;
664 struct epic_descsoft *ds;
665 struct mbuf *m;
666 u_int32_t intstat;
667 int i, len, claimed = 0;
668
669 top:
670 /*
671 * Get the interrupt status from the EPIC.
672 */
673 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
674 if ((intstat & INTSTAT_INT_ACTV) == 0)
675 return (claimed);
676
677 claimed = 1;
678
679 /*
680 * Acknowledge the interrupt.
681 */
682 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
683 intstat & INTMASK);
684
685 /*
686 * Check for receive interrupts.
687 */
688 if (intstat & (INTSTAT_RCC | INTSTAT_RQE)) {
689 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
690 rxd = EPIC_CDRX(sc, i);
691 ds = EPIC_DSRX(sc, i);
692
693 EPIC_CDRXSYNC(sc, i,
694 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
695
696 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
697 /*
698 * We have processed all of the
699 * receive buffers.
700 */
701 break;
702 }
703
704 /*
705 * Make sure the packet arrived intact. If an error
706 * occurred, update stats and reset the descriptor.
707 * The buffer will be reused the next time the
708 * descriptor comes up in the ring.
709 */
710 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
711 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
712 printf("%s: CRC error\n",
713 sc->sc_dev.dv_xname);
714 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
715 printf("%s: alignment error\n",
716 sc->sc_dev.dv_xname);
717 ifp->if_ierrors++;
718 EPIC_INIT_RXDESC(sc, i);
719 continue;
720 }
721
722 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
723 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
724
725 /*
726 * Add a new buffer to the receive chain. If this
727 * fails, the old buffer is recycled.
728 */
729 m = ds->ds_mbuf;
730 if (epic_add_rxbuf(sc, i) != 0) {
731 ifp->if_ierrors++;
732 EPIC_INIT_RXDESC(sc, i);
733 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
734 ds->ds_dmamap->dm_mapsize,
735 BUS_DMASYNC_PREREAD);
736 continue;
737 }
738
739 len = rxd->er_buflength;
740 if (len < sizeof(struct ether_header)) {
741 m_freem(m);
742 continue;
743 }
744
745 m->m_pkthdr.rcvif = ifp;
746 m->m_pkthdr.len = m->m_len = len;
747 eh = mtod(m, struct ether_header *);
748
749 #if NBPFILTER > 0
750 /*
751 * Pass this up to any BPF listeners, but only
752 * pass it up the stack if its for us.
753 */
754 if (ifp->if_bpf) {
755 bpf_mtap(ifp->if_bpf, m);
756 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
757 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
758 ETHER_ADDR_LEN) != 0 &&
759 (rxd->er_rxstatus &
760 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST)) == 0) {
761 m_freem(m);
762 continue;
763 }
764 }
765 #endif /* NPBFILTER > 0 */
766
767 /* Remove the Ethernet header and pass it on. */
768 m_adj(m, sizeof(struct ether_header));
769 ether_input(ifp, eh, m);
770 }
771
772 /* Update the recieve pointer. */
773 sc->sc_rxptr = i;
774
775 /*
776 * Check for receive queue underflow.
777 */
778 if (intstat & INTSTAT_RQE) {
779 printf("%s: receiver queue empty\n",
780 sc->sc_dev.dv_xname);
781 /*
782 * Ring is already built; just restart the
783 * receiver.
784 */
785 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
786 EPIC_CDRXADDR(sc, sc->sc_rxptr));
787 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
788 COMMAND_RXQUEUED | COMMAND_START_RX);
789 }
790 }
791
792 /*
793 * Check for transmission complete interrupts.
794 */
795 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
796 ifp->if_flags &= ~IFF_OACTIVE;
797 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
798 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
799 txd = EPIC_CDTX(sc, i);
800 ds = EPIC_DSTX(sc, i);
801
802 EPIC_CDTXSYNC(sc, i,
803 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
804
805 if (txd->et_txstatus & ET_TXSTAT_OWNER)
806 break;
807
808 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
809
810 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
811 0, ds->ds_dmamap->dm_mapsize,
812 BUS_DMASYNC_POSTWRITE);
813 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
814 m_freem(ds->ds_mbuf);
815 ds->ds_mbuf = NULL;
816
817 /*
818 * Check for errors and collisions.
819 */
820 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
821 ifp->if_oerrors++;
822 else
823 ifp->if_opackets++;
824 ifp->if_collisions +=
825 TXSTAT_COLLISIONS(txd->et_txstatus);
826 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
827 printf("%s: lost carrier\n",
828 sc->sc_dev.dv_xname);
829 }
830
831 /* Update the dirty transmit buffer pointer. */
832 sc->sc_txdirty = i;
833
834 /*
835 * Cancel the watchdog timer if there are no pending
836 * transmissions.
837 */
838 if (sc->sc_txpending == 0)
839 ifp->if_timer = 0;
840
841 /*
842 * Kick the transmitter after a DMA underrun.
843 */
844 if (intstat & INTSTAT_TXU) {
845 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
846 bus_space_write_4(sc->sc_st, sc->sc_sh,
847 EPIC_COMMAND, COMMAND_TXUGO);
848 if (sc->sc_txpending)
849 bus_space_write_4(sc->sc_st, sc->sc_sh,
850 EPIC_COMMAND, COMMAND_TXQUEUED);
851 }
852
853 /*
854 * Try to get more packets going.
855 */
856 epic_start(ifp);
857 }
858
859 /*
860 * Check for fatal interrupts.
861 */
862 if (intstat & INTSTAT_FATAL_INT) {
863 printf("%s: fatal error, resetting\n", sc->sc_dev.dv_xname);
864 epic_init(sc);
865 }
866
867 /*
868 * Check for more interrupts.
869 */
870 goto top;
871 }
872
873 /*
874 * One second timer, used to tick the MII.
875 */
876 void
877 epic_tick(arg)
878 void *arg;
879 {
880 struct epic_softc *sc = arg;
881 int s;
882
883 s = splimp();
884 mii_tick(&sc->sc_mii);
885 splx(s);
886
887 timeout(epic_tick, sc, hz);
888 }
889
890 /*
891 * Fixup the clock source on the EPIC.
892 */
893 void
894 epic_fixup_clock_source(sc)
895 struct epic_softc *sc;
896 {
897 int i;
898
899 /*
900 * According to SMC Application Note 7-15, the EPIC's clock
901 * source is incorrect following a reset. This manifests itself
902 * as failure to recognize when host software has written to
903 * a register on the EPIC. The appnote recommends issuing at
904 * least 16 consecutive writes to the CLOCK TEST bit to correctly
905 * configure the clock source.
906 */
907 for (i = 0; i < 16; i++)
908 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
909 TEST_CLOCKTEST);
910 }
911
912 /*
913 * Perform a soft reset on the EPIC.
914 */
915 void
916 epic_reset(sc)
917 struct epic_softc *sc;
918 {
919
920 epic_fixup_clock_source(sc);
921
922 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
923 delay(100);
924 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
925 delay(100);
926
927 epic_fixup_clock_source(sc);
928 }
929
930 /*
931 * Initialize the interface. Must be called at splnet().
932 */
933 void
934 epic_init(sc)
935 struct epic_softc *sc;
936 {
937 bus_space_tag_t st = sc->sc_st;
938 bus_space_handle_t sh = sc->sc_sh;
939 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
940 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
941 struct epic_txdesc *txd;
942 u_int32_t genctl, reg0;
943 int i;
944
945 /*
946 * Cancel any pending I/O.
947 */
948 epic_stop(sc);
949
950 /*
951 * Reset the EPIC to a known state.
952 */
953 epic_reset(sc);
954
955 /*
956 * Magical mystery initialization.
957 */
958 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
959
960 /*
961 * Initialize the EPIC genctl register:
962 *
963 * - 64 byte receive FIFO threshold
964 * - automatic advance to next receive frame
965 */
966 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
967 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
968
969 /*
970 * Reset the MII bus and PHY.
971 */
972 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
973 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
974 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
975 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
976 delay(100);
977 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
978 delay(100);
979 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
980
981 /*
982 * Initialize Ethernet address.
983 */
984 reg0 = enaddr[1] << 8 | enaddr[0];
985 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
986 reg0 = enaddr[3] << 8 | enaddr[2];
987 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
988 reg0 = enaddr[5] << 8 | enaddr[4];
989 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
990
991 /*
992 * Set up the multicast hash table.
993 */
994 epic_set_mchash(sc);
995
996 /*
997 * Initialize receive control. Remember the external buffer
998 * size setting.
999 */
1000 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
1001 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
1002 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1003 if (ifp->if_flags & IFF_PROMISC)
1004 reg0 |= RXCON_PROMISCMODE;
1005 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1006
1007 /* Set the media. (XXX full-duplex in TXCON?) */
1008 mii_mediachg(&sc->sc_mii);
1009
1010 /*
1011 * Initialize the transmit descriptor ring. txlast is initialized
1012 * to the end of the list so that it will wrap around to the first
1013 * descriptor when the first packet is transmitted.
1014 */
1015 for (i = 0; i < EPIC_NTXDESC; i++) {
1016 txd = EPIC_CDTX(sc, i);
1017 memset(txd, 0, sizeof(struct epic_txdesc));
1018 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1019 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1020 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1021 }
1022 sc->sc_txpending = 0;
1023 sc->sc_txdirty = 0;
1024 sc->sc_txlast = EPIC_NTXDESC - 1;
1025
1026 /*
1027 * Initialize the receive descriptor ring. The buffers are
1028 * already allocated.
1029 */
1030 for (i = 0; i < EPIC_NRXDESC; i++)
1031 EPIC_INIT_RXDESC(sc, i);
1032 sc->sc_rxptr = 0;
1033
1034 /*
1035 * Initialize the interrupt mask and enable interrupts.
1036 */
1037 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1038 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1039
1040 /*
1041 * Give the transmit and receive rings to the EPIC.
1042 */
1043 bus_space_write_4(st, sh, EPIC_PTCDAR,
1044 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1045 bus_space_write_4(st, sh, EPIC_PRCDAR,
1046 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1047
1048 /*
1049 * Set the EPIC in motion.
1050 */
1051 bus_space_write_4(st, sh, EPIC_COMMAND,
1052 COMMAND_RXQUEUED | COMMAND_START_RX);
1053
1054 /*
1055 * ...all done!
1056 */
1057 ifp->if_flags |= IFF_RUNNING;
1058 ifp->if_flags &= ~IFF_OACTIVE;
1059
1060 /*
1061 * Start the one second clock.
1062 */
1063 timeout(epic_tick, sc, hz);
1064
1065 /*
1066 * Attempt to start output on the interface.
1067 */
1068 epic_start(ifp);
1069 }
1070
1071 /*
1072 * Stop transmission on the interface.
1073 */
1074 void
1075 epic_stop(sc)
1076 struct epic_softc *sc;
1077 {
1078 bus_space_tag_t st = sc->sc_st;
1079 bus_space_handle_t sh = sc->sc_sh;
1080 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1081 struct epic_descsoft *ds;
1082 u_int32_t reg;
1083 int i;
1084
1085 /*
1086 * Stop the one second clock.
1087 */
1088 untimeout(epic_tick, sc);
1089
1090 /* Paranoia... */
1091 epic_fixup_clock_source(sc);
1092
1093 /*
1094 * Disable interrupts.
1095 */
1096 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1097 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1098 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1099
1100 /*
1101 * Stop the DMA engine and take the receiver off-line.
1102 */
1103 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1104 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1105
1106 /*
1107 * Release any queued transmit buffers.
1108 */
1109 for (i = 0; i < EPIC_NTXDESC; i++) {
1110 ds = EPIC_DSTX(sc, i);
1111 if (ds->ds_mbuf != NULL) {
1112 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1113 m_freem(ds->ds_mbuf);
1114 ds->ds_mbuf = NULL;
1115 }
1116 }
1117
1118 /*
1119 * Mark the interface down and cancel the watchdog timer.
1120 */
1121 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1122 ifp->if_timer = 0;
1123 }
1124
1125 /*
1126 * Read the EPIC Serial EEPROM.
1127 */
1128 void
1129 epic_read_eeprom(sc, word, wordcnt, data)
1130 struct epic_softc *sc;
1131 int word, wordcnt;
1132 u_int16_t *data;
1133 {
1134 bus_space_tag_t st = sc->sc_st;
1135 bus_space_handle_t sh = sc->sc_sh;
1136 u_int16_t reg;
1137 int i, x;
1138
1139 #define EEPROM_WAIT_READY(st, sh) \
1140 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1141 /* nothing */
1142
1143 /*
1144 * Enable the EEPROM.
1145 */
1146 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1147 EEPROM_WAIT_READY(st, sh);
1148
1149 for (i = 0; i < wordcnt; i++) {
1150 /* Send CHIP SELECT for one clock tick. */
1151 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1152 EEPROM_WAIT_READY(st, sh);
1153
1154 /* Shift in the READ opcode. */
1155 for (x = 3; x > 0; x--) {
1156 reg = EECTL_ENABLE|EECTL_EECS;
1157 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1158 reg |= EECTL_EEDI;
1159 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1160 EEPROM_WAIT_READY(st, sh);
1161 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1162 EEPROM_WAIT_READY(st, sh);
1163 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1164 EEPROM_WAIT_READY(st, sh);
1165 }
1166
1167 /* Shift in address. */
1168 for (x = 6; x > 0; x--) {
1169 reg = EECTL_ENABLE|EECTL_EECS;
1170 if ((word + i) & (1 << (x - 1)))
1171 reg |= EECTL_EEDI;
1172 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1173 EEPROM_WAIT_READY(st, sh);
1174 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1175 EEPROM_WAIT_READY(st, sh);
1176 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1177 EEPROM_WAIT_READY(st, sh);
1178 }
1179
1180 /* Shift out data. */
1181 reg = EECTL_ENABLE|EECTL_EECS;
1182 data[i] = 0;
1183 for (x = 16; x > 0; x--) {
1184 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1185 EEPROM_WAIT_READY(st, sh);
1186 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1187 data[i] |= (1 << (x - 1));
1188 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1189 EEPROM_WAIT_READY(st, sh);
1190 }
1191
1192 /* Clear CHIP SELECT. */
1193 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1194 EEPROM_WAIT_READY(st, sh);
1195 }
1196
1197 /*
1198 * Disable the EEPROM.
1199 */
1200 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1201
1202 #undef EEPROM_WAIT_READY
1203 }
1204
1205 /*
1206 * Add a receive buffer to the indicated descriptor.
1207 */
1208 int
1209 epic_add_rxbuf(sc, idx)
1210 struct epic_softc *sc;
1211 int idx;
1212 {
1213 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1214 struct mbuf *m;
1215 int error;
1216
1217 MGETHDR(m, M_DONTWAIT, MT_DATA);
1218 if (m == NULL)
1219 return (ENOBUFS);
1220
1221 MCLGET(m, M_DONTWAIT);
1222 if ((m->m_flags & M_EXT) == 0) {
1223 m_freem(m);
1224 return (ENOBUFS);
1225 }
1226
1227 if (ds->ds_mbuf != NULL)
1228 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1229
1230 ds->ds_mbuf = m;
1231
1232 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1233 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1234 if (error) {
1235 printf("%s: can't load rx DMA map %d, error = %d\n",
1236 sc->sc_dev.dv_xname, idx, error);
1237 panic("epic_add_rxbuf"); /* XXX */
1238 }
1239
1240 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1241 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1242
1243 EPIC_INIT_RXDESC(sc, idx);
1244
1245 return (0);
1246 }
1247
1248 /*
1249 * Set the EPIC multicast hash table.
1250 */
1251 void
1252 epic_set_mchash(sc)
1253 struct epic_softc *sc;
1254 {
1255 struct ethercom *ec = &sc->sc_ethercom;
1256 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1257 struct ether_multi *enm;
1258 struct ether_multistep step;
1259 u_int8_t *cp;
1260 u_int32_t crc, mchash[4];
1261 int len;
1262 static const u_int32_t crctab[] = {
1263 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1264 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1265 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1266 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1267 };
1268
1269 /*
1270 * Set up the multicast address filter by passing all multicast
1271 * addresses through a CRC generator, and then using the high-order
1272 * 6 bits as an index into the 64 bit multicast hash table (only
1273 * the lower 16 bits of each 32 bit multicast hash register are
1274 * valid). The high order bit selects the register, while the
1275 * rest of the bits select the bit within the register.
1276 */
1277
1278 if (ifp->if_flags & IFF_PROMISC)
1279 goto allmulti;
1280
1281 #if 1 /* XXX thorpej - hardware bug in 10Mb mode */
1282 goto allmulti;
1283 #endif
1284
1285 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1286
1287 ETHER_FIRST_MULTI(step, ec, enm);
1288 while (enm != NULL) {
1289 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1290 /*
1291 * We must listen to a range of multicast addresses.
1292 * For now, just accept all multicasts, rather than
1293 * trying to set only those filter bits needed to match
1294 * the range. (At this time, the only use of address
1295 * ranges is for IP multicast routing, for which the
1296 * range is big enough to require all bits set.)
1297 */
1298 goto allmulti;
1299 }
1300
1301 cp = enm->enm_addrlo;
1302 crc = 0xffffffff;
1303 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1304 crc ^= *cp++;
1305 crc = (crc >> 4) ^ crctab[crc & 0xf];
1306 crc = (crc >> 4) ^ crctab[crc & 0xf];
1307 }
1308 /* Just want the 6 most significant bits. */
1309 crc >>= 26;
1310
1311 /* Set the corresponding bit in the hash table. */
1312 mchash[crc >> 4] |= 1 << (crc & 0xf);
1313
1314 ETHER_NEXT_MULTI(step, enm);
1315 }
1316
1317 ifp->if_flags &= ~IFF_ALLMULTI;
1318 goto sethash;
1319
1320 allmulti:
1321 ifp->if_flags |= IFF_ALLMULTI;
1322 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1323
1324 sethash:
1325 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1326 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1327 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1328 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1329 }
1330
1331 /*
1332 * Wait for the MII to become ready.
1333 */
1334 int
1335 epic_mii_wait(sc, rw)
1336 struct epic_softc *sc;
1337 u_int32_t rw;
1338 {
1339 int i;
1340
1341 for (i = 0; i < 50; i++) {
1342 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1343 == 0)
1344 break;
1345 delay(2);
1346 }
1347 if (i == 50) {
1348 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1349 return (1);
1350 }
1351
1352 return (0);
1353 }
1354
1355 /*
1356 * Read from the MII.
1357 */
1358 int
1359 epic_mii_read(self, phy, reg)
1360 struct device *self;
1361 int phy, reg;
1362 {
1363 struct epic_softc *sc = (struct epic_softc *)self;
1364
1365 if (epic_mii_wait(sc, MMCTL_WRITE))
1366 return (0);
1367
1368 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1369 MMCTL_ARG(phy, reg, MMCTL_READ));
1370
1371 if (epic_mii_wait(sc, MMCTL_READ))
1372 return (0);
1373
1374 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1375 MMDATA_MASK);
1376 }
1377
1378 /*
1379 * Write to the MII.
1380 */
1381 void
1382 epic_mii_write(self, phy, reg, val)
1383 struct device *self;
1384 int phy, reg, val;
1385 {
1386 struct epic_softc *sc = (struct epic_softc *)self;
1387
1388 if (epic_mii_wait(sc, MMCTL_WRITE))
1389 return;
1390
1391 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1392 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1393 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1394 }
1395
1396 /*
1397 * Callback from PHY when media changes.
1398 */
1399 void
1400 epic_statchg(self)
1401 struct device *self;
1402 {
1403
1404 /* XXX Update ifp->if_baudrate */
1405 }
1406
1407 /*
1408 * Callback from ifmedia to request current media status.
1409 */
1410 void
1411 epic_mediastatus(ifp, ifmr)
1412 struct ifnet *ifp;
1413 struct ifmediareq *ifmr;
1414 {
1415 struct epic_softc *sc = ifp->if_softc;
1416
1417 mii_pollstat(&sc->sc_mii);
1418 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1419 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1420 }
1421
1422 /*
1423 * Callback from ifmedia to request new media setting.
1424 */
1425 int
1426 epic_mediachange(ifp)
1427 struct ifnet *ifp;
1428 {
1429
1430 if (ifp->if_flags & IFF_UP)
1431 epic_init((struct epic_softc *)ifp->if_softc);
1432 return (0);
1433 }
1434