smc83c170.c revision 1.21 1 /* $NetBSD: smc83c170.c,v 1.21 1999/08/27 19:13:00 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/mii/miivar.h>
82
83 #include <dev/ic/smc83c170reg.h>
84 #include <dev/ic/smc83c170var.h>
85
86 void epic_start __P((struct ifnet *));
87 void epic_watchdog __P((struct ifnet *));
88 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
89
90 void epic_shutdown __P((void *));
91
92 void epic_reset __P((struct epic_softc *));
93 int epic_init __P((struct epic_softc *));
94 void epic_rxdrain __P((struct epic_softc *));
95 void epic_stop __P((struct epic_softc *, int));
96 int epic_add_rxbuf __P((struct epic_softc *, int));
97 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
98 void epic_set_mchash __P((struct epic_softc *));
99 void epic_fixup_clock_source __P((struct epic_softc *));
100 int epic_mii_read __P((struct device *, int, int));
101 void epic_mii_write __P((struct device *, int, int, int));
102 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
103 void epic_tick __P((void *));
104
105 void epic_statchg __P((struct device *));
106 int epic_mediachange __P((struct ifnet *));
107 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
108
109 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
110 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
111
112 int epic_copy_small = 0;
113
114 /*
115 * Attach an EPIC interface to the system.
116 */
117 void
118 epic_attach(sc)
119 struct epic_softc *sc;
120 {
121 bus_space_tag_t st = sc->sc_st;
122 bus_space_handle_t sh = sc->sc_sh;
123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
124 int i, rseg, error;
125 bus_dma_segment_t seg;
126 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
127 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
128
129 /*
130 * Allocate the control data structures, and create and load the
131 * DMA map for it.
132 */
133 if ((error = bus_dmamem_alloc(sc->sc_dmat,
134 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
135 BUS_DMA_NOWAIT)) != 0) {
136 printf("%s: unable to allocate control data, error = %d\n",
137 sc->sc_dev.dv_xname, error);
138 goto fail_0;
139 }
140
141 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
142 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
143 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
144 printf("%s: unable to map control data, error = %d\n",
145 sc->sc_dev.dv_xname, error);
146 goto fail_1;
147 }
148
149 if ((error = bus_dmamap_create(sc->sc_dmat,
150 sizeof(struct epic_control_data), 1,
151 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
152 &sc->sc_cddmamap)) != 0) {
153 printf("%s: unable to create control data DMA map, "
154 "error = %d\n", sc->sc_dev.dv_xname, error);
155 goto fail_2;
156 }
157
158 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
159 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
160 BUS_DMA_NOWAIT)) != 0) {
161 printf("%s: unable to load control data DMA map, error = %d\n",
162 sc->sc_dev.dv_xname, error);
163 goto fail_3;
164 }
165
166 /*
167 * Create the transmit buffer DMA maps.
168 */
169 for (i = 0; i < EPIC_NTXDESC; i++) {
170 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
171 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
172 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
173 printf("%s: unable to create tx DMA map %d, "
174 "error = %d\n", sc->sc_dev.dv_xname, i, error);
175 goto fail_4;
176 }
177 }
178
179 /*
180 * Create the recieve buffer DMA maps.
181 */
182 for (i = 0; i < EPIC_NRXDESC; i++) {
183 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
184 MCLBYTES, 0, BUS_DMA_NOWAIT,
185 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
186 printf("%s: unable to create rx DMA map %d, "
187 "error = %d\n", sc->sc_dev.dv_xname, i, error);
188 goto fail_5;
189 }
190 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
191 }
192
193
194 /*
195 * Bring the chip out of low-power mode and reset it to a known state.
196 */
197 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
198 epic_reset(sc);
199
200 /*
201 * Read the Ethernet address from the EEPROM.
202 */
203 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
204 bcopy(myea, enaddr, sizeof(myea));
205
206 /*
207 * ...and the device name.
208 */
209 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
210 mydevname);
211 bcopy(mydevname, devname, sizeof(mydevname));
212 devname[sizeof(mydevname)] = '\0';
213 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
214 if (devname[i] == ' ')
215 devname[i] = '\0';
216 else
217 break;
218 }
219
220 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
221 devname, ether_sprintf(enaddr));
222
223 /*
224 * Initialize our media structures and probe the MII.
225 */
226 sc->sc_mii.mii_ifp = ifp;
227 sc->sc_mii.mii_readreg = epic_mii_read;
228 sc->sc_mii.mii_writereg = epic_mii_write;
229 sc->sc_mii.mii_statchg = epic_statchg;
230 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
231 epic_mediastatus);
232 mii_phy_probe(&sc->sc_dev, &sc->sc_mii, 0xffffffff);
233 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
234 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
235 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
236 } else
237 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
238
239 ifp = &sc->sc_ethercom.ec_if;
240 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
241 ifp->if_softc = sc;
242 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
243 ifp->if_ioctl = epic_ioctl;
244 ifp->if_start = epic_start;
245 ifp->if_watchdog = epic_watchdog;
246
247 /*
248 * Attach the interface.
249 */
250 if_attach(ifp);
251 ether_ifattach(ifp, enaddr);
252 #if NBPFILTER > 0
253 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
254 sizeof(struct ether_header));
255 #endif
256
257 /*
258 * Make sure the interface is shutdown during reboot.
259 */
260 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
261 if (sc->sc_sdhook == NULL)
262 printf("%s: WARNING: unable to establish shutdown hook\n",
263 sc->sc_dev.dv_xname);
264 return;
265
266 /*
267 * Free any resources we've allocated during the failed attach
268 * attempt. Do this in reverse order and fall through.
269 */
270 fail_5:
271 for (i = 0; i < EPIC_NRXDESC; i++) {
272 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
273 bus_dmamap_destroy(sc->sc_dmat,
274 EPIC_DSRX(sc, i)->ds_dmamap);
275 }
276 fail_4:
277 for (i = 0; i < EPIC_NTXDESC; i++) {
278 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
279 bus_dmamap_destroy(sc->sc_dmat,
280 EPIC_DSTX(sc, i)->ds_dmamap);
281 }
282 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
283 fail_3:
284 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
285 fail_2:
286 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
287 sizeof(struct epic_control_data));
288 fail_1:
289 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
290 fail_0:
291 return;
292 }
293
294 /*
295 * Shutdown hook. Make sure the interface is stopped at reboot.
296 */
297 void
298 epic_shutdown(arg)
299 void *arg;
300 {
301 struct epic_softc *sc = arg;
302
303 epic_stop(sc, 1);
304 }
305
306 /*
307 * Start packet transmission on the interface.
308 * [ifnet interface function]
309 */
310 void
311 epic_start(ifp)
312 struct ifnet *ifp;
313 {
314 struct epic_softc *sc = ifp->if_softc;
315 struct mbuf *m0, *m;
316 struct epic_txdesc *txd;
317 struct epic_descsoft *ds;
318 struct epic_fraglist *fr;
319 bus_dmamap_t dmamap;
320 int error, firsttx, nexttx, opending, seg;
321
322 /*
323 * Remember the previous txpending and the first transmit
324 * descriptor we use.
325 */
326 opending = sc->sc_txpending;
327 firsttx = EPIC_NEXTTX(sc->sc_txlast);
328
329 /*
330 * Loop through the send queue, setting up transmit descriptors
331 * until we drain the queue, or use up all available transmit
332 * descriptors.
333 */
334 while (sc->sc_txpending < EPIC_NTXDESC) {
335 /*
336 * Grab a packet off the queue.
337 */
338 IF_DEQUEUE(&ifp->if_snd, m0);
339 if (m0 == NULL)
340 break;
341
342 /*
343 * Get the last and next available transmit descriptor.
344 */
345 nexttx = EPIC_NEXTTX(sc->sc_txlast);
346 txd = EPIC_CDTX(sc, nexttx);
347 fr = EPIC_CDFL(sc, nexttx);
348 ds = EPIC_DSTX(sc, nexttx);
349 dmamap = ds->ds_dmamap;
350
351 /*
352 * Load the DMA map. If this fails, the packet either
353 * didn't fit in the alloted number of frags, or we were
354 * short on resources. In this case, we'll copy and try
355 * again.
356 */
357 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
358 BUS_DMA_NOWAIT) != 0) {
359 MGETHDR(m, M_DONTWAIT, MT_DATA);
360 if (m == NULL) {
361 printf("%s: unable to allocate Tx mbuf\n",
362 sc->sc_dev.dv_xname);
363 IF_PREPEND(&ifp->if_snd, m0);
364 break;
365 }
366 if (m0->m_pkthdr.len > MHLEN) {
367 MCLGET(m, M_DONTWAIT);
368 if ((m->m_flags & M_EXT) == 0) {
369 printf("%s: unable to allocate Tx "
370 "cluster\n", sc->sc_dev.dv_xname);
371 m_freem(m);
372 IF_PREPEND(&ifp->if_snd, m0);
373 break;
374 }
375 }
376 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
377 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
378 m_freem(m0);
379 m0 = m;
380 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
381 m0, BUS_DMA_NOWAIT);
382 if (error) {
383 printf("%s: unable to load Tx buffer, "
384 "error = %d\n", sc->sc_dev.dv_xname, error);
385 IF_PREPEND(&ifp->if_snd, m0);
386 break;
387 }
388 }
389
390 /* Initialize the fraglist. */
391 fr->ef_nfrags = dmamap->dm_nsegs;
392 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
393 fr->ef_frags[seg].ef_addr =
394 dmamap->dm_segs[seg].ds_addr;
395 fr->ef_frags[seg].ef_length =
396 dmamap->dm_segs[seg].ds_len;
397 }
398
399 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
400
401 /* Sync the DMA map. */
402 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
403 BUS_DMASYNC_PREWRITE);
404
405 /*
406 * Store a pointer to the packet so we can free it later.
407 */
408 ds->ds_mbuf = m0;
409
410 /*
411 * Fill in the transmit descriptor. The EPIC doesn't
412 * auto-pad, so we have to do this ourselves.
413 */
414 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
415 txd->et_txlength = max(m0->m_pkthdr.len,
416 ETHER_MIN_LEN - ETHER_CRC_LEN);
417
418 /*
419 * If this is the first descriptor we're enqueueing,
420 * don't give it to the EPIC yet. That could cause
421 * a race condition. We'll do it below.
422 */
423 if (nexttx == firsttx)
424 txd->et_txstatus = 0;
425 else
426 txd->et_txstatus = ET_TXSTAT_OWNER;
427
428 EPIC_CDTXSYNC(sc, nexttx,
429 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
430
431 /* Advance the tx pointer. */
432 sc->sc_txpending++;
433 sc->sc_txlast = nexttx;
434
435 #if NBPFILTER > 0
436 /*
437 * Pass the packet to any BPF listeners.
438 */
439 if (ifp->if_bpf)
440 bpf_mtap(ifp->if_bpf, m0);
441 #endif
442 }
443
444 if (sc->sc_txpending == EPIC_NTXDESC) {
445 /* No more slots left; notify upper layer. */
446 ifp->if_flags |= IFF_OACTIVE;
447 }
448
449 if (sc->sc_txpending != opending) {
450 /*
451 * We enqueued packets. If the transmitter was idle,
452 * reset the txdirty pointer.
453 */
454 if (opending == 0)
455 sc->sc_txdirty = firsttx;
456
457 /*
458 * Cause a transmit interrupt to happen on the
459 * last packet we enqueued.
460 */
461 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
462 EPIC_CDTXSYNC(sc, sc->sc_txlast,
463 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
464
465 /*
466 * The entire packet chain is set up. Give the
467 * first descriptor to the EPIC now.
468 */
469 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
470 EPIC_CDTXSYNC(sc, firsttx,
471 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
472
473 /* Start the transmitter. */
474 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
475 COMMAND_TXQUEUED);
476
477 /* Set a watchdog timer in case the chip flakes out. */
478 ifp->if_timer = 5;
479 }
480 }
481
482 /*
483 * Watchdog timer handler.
484 * [ifnet interface function]
485 */
486 void
487 epic_watchdog(ifp)
488 struct ifnet *ifp;
489 {
490 struct epic_softc *sc = ifp->if_softc;
491
492 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
493 ifp->if_oerrors++;
494
495 (void) epic_init(sc);
496 }
497
498 /*
499 * Handle control requests from the operator.
500 * [ifnet interface function]
501 */
502 int
503 epic_ioctl(ifp, cmd, data)
504 struct ifnet *ifp;
505 u_long cmd;
506 caddr_t data;
507 {
508 struct epic_softc *sc = ifp->if_softc;
509 struct ifreq *ifr = (struct ifreq *)data;
510 struct ifaddr *ifa = (struct ifaddr *)data;
511 int s, error = 0;
512
513 s = splnet();
514
515 switch (cmd) {
516 case SIOCSIFADDR:
517 ifp->if_flags |= IFF_UP;
518
519 switch (ifa->ifa_addr->sa_family) {
520 #ifdef INET
521 case AF_INET:
522 if ((error = epic_init(sc)) != 0)
523 break;
524 arp_ifinit(ifp, ifa);
525 break;
526 #endif /* INET */
527 #ifdef NS
528 case AF_NS:
529 {
530 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
531
532 if (ns_nullhost(*ina))
533 ina->x_host = *(union ns_host *)
534 LLADDR(ifp->if_sadl);
535 else
536 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
537 ifp->if_addrlen);
538 /* Set new address. */
539 error = epic_init(sc);
540 break;
541 }
542 #endif /* NS */
543 default:
544 error = epic_init(sc);
545 break;
546 }
547 break;
548
549 case SIOCSIFMTU:
550 if (ifr->ifr_mtu > ETHERMTU)
551 error = EINVAL;
552 else
553 ifp->if_mtu = ifr->ifr_mtu;
554 break;
555
556 case SIOCSIFFLAGS:
557 if ((ifp->if_flags & IFF_UP) == 0 &&
558 (ifp->if_flags & IFF_RUNNING) != 0) {
559 /*
560 * If interface is marked down and it is running, then
561 * stop it.
562 */
563 epic_stop(sc, 1);
564 } else if ((ifp->if_flags & IFF_UP) != 0 &&
565 (ifp->if_flags & IFF_RUNNING) == 0) {
566 /*
567 * If interfase it marked up and it is stopped, then
568 * start it.
569 */
570 error = epic_init(sc);
571 } else if ((ifp->if_flags & IFF_UP) != 0) {
572 /*
573 * Reset the interface to pick up changes in any other
574 * flags that affect the hardware state.
575 */
576 error = epic_init(sc);
577 }
578 break;
579
580 case SIOCADDMULTI:
581 case SIOCDELMULTI:
582 error = (cmd == SIOCADDMULTI) ?
583 ether_addmulti(ifr, &sc->sc_ethercom) :
584 ether_delmulti(ifr, &sc->sc_ethercom);
585
586 if (error == ENETRESET) {
587 /*
588 * Multicast list has changed; set the hardware filter
589 * accordingly. Update our idea of the current media;
590 * epic_set_mchash() needs to know what it is.
591 */
592 mii_pollstat(&sc->sc_mii);
593 epic_set_mchash(sc);
594 error = 0;
595 }
596 break;
597
598 case SIOCSIFMEDIA:
599 case SIOCGIFMEDIA:
600 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
601 break;
602
603 default:
604 error = EINVAL;
605 break;
606 }
607
608 splx(s);
609 return (error);
610 }
611
612 /*
613 * Interrupt handler.
614 */
615 int
616 epic_intr(arg)
617 void *arg;
618 {
619 struct epic_softc *sc = arg;
620 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
621 struct ether_header *eh;
622 struct epic_rxdesc *rxd;
623 struct epic_txdesc *txd;
624 struct epic_descsoft *ds;
625 struct mbuf *m;
626 u_int32_t intstat;
627 int i, len, claimed = 0;
628
629 top:
630 /*
631 * Get the interrupt status from the EPIC.
632 */
633 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
634 if ((intstat & INTSTAT_INT_ACTV) == 0)
635 return (claimed);
636
637 claimed = 1;
638
639 /*
640 * Acknowledge the interrupt.
641 */
642 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
643 intstat & INTMASK);
644
645 /*
646 * Check for receive interrupts.
647 */
648 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
649 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
650 rxd = EPIC_CDRX(sc, i);
651 ds = EPIC_DSRX(sc, i);
652
653 EPIC_CDRXSYNC(sc, i,
654 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
655
656 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
657 /*
658 * We have processed all of the
659 * receive buffers.
660 */
661 break;
662 }
663
664 /*
665 * Make sure the packet arrived intact. If an error
666 * occurred, update stats and reset the descriptor.
667 * The buffer will be reused the next time the
668 * descriptor comes up in the ring.
669 */
670 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
671 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
672 printf("%s: CRC error\n",
673 sc->sc_dev.dv_xname);
674 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
675 printf("%s: alignment error\n",
676 sc->sc_dev.dv_xname);
677 ifp->if_ierrors++;
678 EPIC_INIT_RXDESC(sc, i);
679 continue;
680 }
681
682 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
683 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
684
685 /*
686 * The EPIC includes the CRC with every packet;
687 * trim it.
688 */
689 len = rxd->er_rxlength - ETHER_CRC_LEN;
690
691 if (len < sizeof(struct ether_header)) {
692 /*
693 * Runt packet; drop it now.
694 */
695 ifp->if_ierrors++;
696 EPIC_INIT_RXDESC(sc, i);
697 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
698 ds->ds_dmamap->dm_mapsize,
699 BUS_DMASYNC_PREREAD);
700 continue;
701 }
702
703 /*
704 * If the packet is small enough to fit in a
705 * single header mbuf, allocate one and copy
706 * the data into it. This greatly reduces
707 * memory consumption when we receive lots
708 * of small packets.
709 *
710 * Otherwise, we add a new buffer to the receive
711 * chain. If this fails, we drop the packet and
712 * recycle the old buffer.
713 */
714 if (epic_copy_small != 0 && len <= MHLEN) {
715 MGETHDR(m, M_DONTWAIT, MT_DATA);
716 if (m == NULL)
717 goto dropit;
718 memcpy(mtod(m, caddr_t),
719 mtod(ds->ds_mbuf, caddr_t), len);
720 EPIC_INIT_RXDESC(sc, i);
721 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
722 ds->ds_dmamap->dm_mapsize,
723 BUS_DMASYNC_PREREAD);
724 } else {
725 m = ds->ds_mbuf;
726 if (epic_add_rxbuf(sc, i) != 0) {
727 dropit:
728 ifp->if_ierrors++;
729 EPIC_INIT_RXDESC(sc, i);
730 bus_dmamap_sync(sc->sc_dmat,
731 ds->ds_dmamap, 0,
732 ds->ds_dmamap->dm_mapsize,
733 BUS_DMASYNC_PREREAD);
734 continue;
735 }
736 }
737
738 m->m_pkthdr.rcvif = ifp;
739 m->m_pkthdr.len = m->m_len = len;
740 eh = mtod(m, struct ether_header *);
741
742 #if NBPFILTER > 0
743 /*
744 * Pass this up to any BPF listeners, but only
745 * pass it up the stack if its for us.
746 */
747 if (ifp->if_bpf) {
748 bpf_mtap(ifp->if_bpf, m);
749 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
750 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
751 ETHER_ADDR_LEN) != 0 &&
752 (rxd->er_rxstatus &
753 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST)) == 0) {
754 m_freem(m);
755 continue;
756 }
757 }
758 #endif /* NPBFILTER > 0 */
759
760 /* Pass it on. */
761 (*ifp->if_input)(ifp, m);
762 ifp->if_ipackets++;
763 }
764
765 /* Update the recieve pointer. */
766 sc->sc_rxptr = i;
767
768 /*
769 * Check for receive queue underflow.
770 */
771 if (intstat & INTSTAT_RQE) {
772 printf("%s: receiver queue empty\n",
773 sc->sc_dev.dv_xname);
774 /*
775 * Ring is already built; just restart the
776 * receiver.
777 */
778 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
779 EPIC_CDRXADDR(sc, sc->sc_rxptr));
780 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
781 COMMAND_RXQUEUED | COMMAND_START_RX);
782 }
783 }
784
785 /*
786 * Check for transmission complete interrupts.
787 */
788 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
789 ifp->if_flags &= ~IFF_OACTIVE;
790 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
791 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
792 txd = EPIC_CDTX(sc, i);
793 ds = EPIC_DSTX(sc, i);
794
795 EPIC_CDTXSYNC(sc, i,
796 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
797
798 if (txd->et_txstatus & ET_TXSTAT_OWNER)
799 break;
800
801 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
802
803 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
804 0, ds->ds_dmamap->dm_mapsize,
805 BUS_DMASYNC_POSTWRITE);
806 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
807 m_freem(ds->ds_mbuf);
808 ds->ds_mbuf = NULL;
809
810 /*
811 * Check for errors and collisions.
812 */
813 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
814 ifp->if_oerrors++;
815 else
816 ifp->if_opackets++;
817 ifp->if_collisions +=
818 TXSTAT_COLLISIONS(txd->et_txstatus);
819 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
820 printf("%s: lost carrier\n",
821 sc->sc_dev.dv_xname);
822 }
823
824 /* Update the dirty transmit buffer pointer. */
825 sc->sc_txdirty = i;
826
827 /*
828 * Cancel the watchdog timer if there are no pending
829 * transmissions.
830 */
831 if (sc->sc_txpending == 0)
832 ifp->if_timer = 0;
833
834 /*
835 * Kick the transmitter after a DMA underrun.
836 */
837 if (intstat & INTSTAT_TXU) {
838 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
839 bus_space_write_4(sc->sc_st, sc->sc_sh,
840 EPIC_COMMAND, COMMAND_TXUGO);
841 if (sc->sc_txpending)
842 bus_space_write_4(sc->sc_st, sc->sc_sh,
843 EPIC_COMMAND, COMMAND_TXQUEUED);
844 }
845
846 /*
847 * Try to get more packets going.
848 */
849 epic_start(ifp);
850 }
851
852 /*
853 * Check for fatal interrupts.
854 */
855 if (intstat & INTSTAT_FATAL_INT) {
856 if (intstat & INTSTAT_PTA)
857 printf("%s: PCI target abort error\n",
858 sc->sc_dev.dv_xname);
859 else if (intstat & INTSTAT_PMA)
860 printf("%s: PCI master abort error\n",
861 sc->sc_dev.dv_xname);
862 else if (intstat & INTSTAT_APE)
863 printf("%s: PCI address parity error\n",
864 sc->sc_dev.dv_xname);
865 else if (intstat & INTSTAT_DPE)
866 printf("%s: PCI data parity error\n",
867 sc->sc_dev.dv_xname);
868 else
869 printf("%s: unknown fatal error\n",
870 sc->sc_dev.dv_xname);
871 (void) epic_init(sc);
872 }
873
874 /*
875 * Check for more interrupts.
876 */
877 goto top;
878 }
879
880 /*
881 * One second timer, used to tick the MII.
882 */
883 void
884 epic_tick(arg)
885 void *arg;
886 {
887 struct epic_softc *sc = arg;
888 int s;
889
890 s = splnet();
891 mii_tick(&sc->sc_mii);
892 splx(s);
893
894 timeout(epic_tick, sc, hz);
895 }
896
897 /*
898 * Fixup the clock source on the EPIC.
899 */
900 void
901 epic_fixup_clock_source(sc)
902 struct epic_softc *sc;
903 {
904 int i;
905
906 /*
907 * According to SMC Application Note 7-15, the EPIC's clock
908 * source is incorrect following a reset. This manifests itself
909 * as failure to recognize when host software has written to
910 * a register on the EPIC. The appnote recommends issuing at
911 * least 16 consecutive writes to the CLOCK TEST bit to correctly
912 * configure the clock source.
913 */
914 for (i = 0; i < 16; i++)
915 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
916 TEST_CLOCKTEST);
917 }
918
919 /*
920 * Perform a soft reset on the EPIC.
921 */
922 void
923 epic_reset(sc)
924 struct epic_softc *sc;
925 {
926
927 epic_fixup_clock_source(sc);
928
929 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
930 delay(100);
931 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
932 delay(100);
933
934 epic_fixup_clock_source(sc);
935 }
936
937 /*
938 * Initialize the interface. Must be called at splnet().
939 */
940 int
941 epic_init(sc)
942 struct epic_softc *sc;
943 {
944 bus_space_tag_t st = sc->sc_st;
945 bus_space_handle_t sh = sc->sc_sh;
946 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
947 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
948 struct epic_txdesc *txd;
949 struct epic_descsoft *ds;
950 u_int32_t genctl, reg0;
951 int i, error = 0;
952
953 /*
954 * Cancel any pending I/O.
955 */
956 epic_stop(sc, 0);
957
958 /*
959 * Reset the EPIC to a known state.
960 */
961 epic_reset(sc);
962
963 /*
964 * Magical mystery initialization.
965 */
966 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
967
968 /*
969 * Initialize the EPIC genctl register:
970 *
971 * - 64 byte receive FIFO threshold
972 * - automatic advance to next receive frame
973 */
974 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
975 #if BYTE_ORDER == BIG_ENDIAN
976 genctl |= GENCTL_BIG_ENDIAN;
977 #endif
978 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
979
980 /*
981 * Reset the MII bus and PHY.
982 */
983 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
984 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
985 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
986 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
987 delay(100);
988 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
989 delay(100);
990 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
991
992 /*
993 * Initialize Ethernet address.
994 */
995 reg0 = enaddr[1] << 8 | enaddr[0];
996 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
997 reg0 = enaddr[3] << 8 | enaddr[2];
998 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
999 reg0 = enaddr[5] << 8 | enaddr[4];
1000 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
1001
1002 /*
1003 * Initialize receive control. Remember the external buffer
1004 * size setting.
1005 */
1006 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
1007 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
1008 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1009 if (ifp->if_flags & IFF_PROMISC)
1010 reg0 |= RXCON_PROMISCMODE;
1011 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1012
1013 /* Set the current media. */
1014 mii_mediachg(&sc->sc_mii);
1015
1016 /* Set up the multicast hash table. */
1017 epic_set_mchash(sc);
1018
1019 /*
1020 * Initialize the transmit descriptor ring. txlast is initialized
1021 * to the end of the list so that it will wrap around to the first
1022 * descriptor when the first packet is transmitted.
1023 */
1024 for (i = 0; i < EPIC_NTXDESC; i++) {
1025 txd = EPIC_CDTX(sc, i);
1026 memset(txd, 0, sizeof(struct epic_txdesc));
1027 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1028 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1029 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1030 }
1031 sc->sc_txpending = 0;
1032 sc->sc_txdirty = 0;
1033 sc->sc_txlast = EPIC_NTXDESC - 1;
1034
1035 /*
1036 * Initialize the receive descriptor ring.
1037 */
1038 for (i = 0; i < EPIC_NRXDESC; i++) {
1039 ds = EPIC_DSRX(sc, i);
1040 if (ds->ds_mbuf == NULL) {
1041 if ((error = epic_add_rxbuf(sc, i)) != 0) {
1042 printf("%s: unable to allocate or map rx "
1043 "buffer %d error = %d\n",
1044 sc->sc_dev.dv_xname, i, error);
1045 /*
1046 * XXX Should attempt to run with fewer receive
1047 * XXX buffers instead of just failing.
1048 */
1049 epic_rxdrain(sc);
1050 goto out;
1051 }
1052 }
1053 }
1054 sc->sc_rxptr = 0;
1055
1056 /*
1057 * Initialize the interrupt mask and enable interrupts.
1058 */
1059 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1060 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1061
1062 /*
1063 * Give the transmit and receive rings to the EPIC.
1064 */
1065 bus_space_write_4(st, sh, EPIC_PTCDAR,
1066 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1067 bus_space_write_4(st, sh, EPIC_PRCDAR,
1068 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1069
1070 /*
1071 * Set the EPIC in motion.
1072 */
1073 bus_space_write_4(st, sh, EPIC_COMMAND,
1074 COMMAND_RXQUEUED | COMMAND_START_RX);
1075
1076 /*
1077 * ...all done!
1078 */
1079 ifp->if_flags |= IFF_RUNNING;
1080 ifp->if_flags &= ~IFF_OACTIVE;
1081
1082 /*
1083 * Start the one second clock.
1084 */
1085 timeout(epic_tick, sc, hz);
1086
1087 /*
1088 * Attempt to start output on the interface.
1089 */
1090 epic_start(ifp);
1091
1092 out:
1093 if (error)
1094 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1095 return (error);
1096 }
1097
1098 /*
1099 * Drain the receive queue.
1100 */
1101 void
1102 epic_rxdrain(sc)
1103 struct epic_softc *sc;
1104 {
1105 struct epic_descsoft *ds;
1106 int i;
1107
1108 for (i = 0; i < EPIC_NRXDESC; i++) {
1109 ds = EPIC_DSRX(sc, i);
1110 if (ds->ds_mbuf != NULL) {
1111 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1112 m_freem(ds->ds_mbuf);
1113 ds->ds_mbuf = NULL;
1114 }
1115 }
1116 }
1117
1118 /*
1119 * Stop transmission on the interface.
1120 */
1121 void
1122 epic_stop(sc, drain)
1123 struct epic_softc *sc;
1124 int drain;
1125 {
1126 bus_space_tag_t st = sc->sc_st;
1127 bus_space_handle_t sh = sc->sc_sh;
1128 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1129 struct epic_descsoft *ds;
1130 u_int32_t reg;
1131 int i;
1132
1133 /*
1134 * Stop the one second clock.
1135 */
1136 untimeout(epic_tick, sc);
1137
1138 /* Paranoia... */
1139 epic_fixup_clock_source(sc);
1140
1141 /*
1142 * Disable interrupts.
1143 */
1144 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1145 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1146 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1147
1148 /*
1149 * Stop the DMA engine and take the receiver off-line.
1150 */
1151 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1152 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1153
1154 /*
1155 * Release any queued transmit buffers.
1156 */
1157 for (i = 0; i < EPIC_NTXDESC; i++) {
1158 ds = EPIC_DSTX(sc, i);
1159 if (ds->ds_mbuf != NULL) {
1160 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1161 m_freem(ds->ds_mbuf);
1162 ds->ds_mbuf = NULL;
1163 }
1164 }
1165
1166 if (drain) {
1167 /*
1168 * Release the receive buffers.
1169 */
1170 epic_rxdrain(sc);
1171 }
1172
1173 /*
1174 * Mark the interface down and cancel the watchdog timer.
1175 */
1176 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1177 ifp->if_timer = 0;
1178 }
1179
1180 /*
1181 * Read the EPIC Serial EEPROM.
1182 */
1183 void
1184 epic_read_eeprom(sc, word, wordcnt, data)
1185 struct epic_softc *sc;
1186 int word, wordcnt;
1187 u_int16_t *data;
1188 {
1189 bus_space_tag_t st = sc->sc_st;
1190 bus_space_handle_t sh = sc->sc_sh;
1191 u_int16_t reg;
1192 int i, x;
1193
1194 #define EEPROM_WAIT_READY(st, sh) \
1195 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1196 /* nothing */
1197
1198 /*
1199 * Enable the EEPROM.
1200 */
1201 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1202 EEPROM_WAIT_READY(st, sh);
1203
1204 for (i = 0; i < wordcnt; i++) {
1205 /* Send CHIP SELECT for one clock tick. */
1206 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1207 EEPROM_WAIT_READY(st, sh);
1208
1209 /* Shift in the READ opcode. */
1210 for (x = 3; x > 0; x--) {
1211 reg = EECTL_ENABLE|EECTL_EECS;
1212 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1213 reg |= EECTL_EEDI;
1214 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1215 EEPROM_WAIT_READY(st, sh);
1216 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1217 EEPROM_WAIT_READY(st, sh);
1218 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1219 EEPROM_WAIT_READY(st, sh);
1220 }
1221
1222 /* Shift in address. */
1223 for (x = 6; x > 0; x--) {
1224 reg = EECTL_ENABLE|EECTL_EECS;
1225 if ((word + i) & (1 << (x - 1)))
1226 reg |= EECTL_EEDI;
1227 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1228 EEPROM_WAIT_READY(st, sh);
1229 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1230 EEPROM_WAIT_READY(st, sh);
1231 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1232 EEPROM_WAIT_READY(st, sh);
1233 }
1234
1235 /* Shift out data. */
1236 reg = EECTL_ENABLE|EECTL_EECS;
1237 data[i] = 0;
1238 for (x = 16; x > 0; x--) {
1239 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1240 EEPROM_WAIT_READY(st, sh);
1241 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1242 data[i] |= (1 << (x - 1));
1243 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1244 EEPROM_WAIT_READY(st, sh);
1245 }
1246
1247 /* Clear CHIP SELECT. */
1248 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1249 EEPROM_WAIT_READY(st, sh);
1250 }
1251
1252 /*
1253 * Disable the EEPROM.
1254 */
1255 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1256
1257 #undef EEPROM_WAIT_READY
1258 }
1259
1260 /*
1261 * Add a receive buffer to the indicated descriptor.
1262 */
1263 int
1264 epic_add_rxbuf(sc, idx)
1265 struct epic_softc *sc;
1266 int idx;
1267 {
1268 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1269 struct mbuf *m;
1270 int error;
1271
1272 MGETHDR(m, M_DONTWAIT, MT_DATA);
1273 if (m == NULL)
1274 return (ENOBUFS);
1275
1276 MCLGET(m, M_DONTWAIT);
1277 if ((m->m_flags & M_EXT) == 0) {
1278 m_freem(m);
1279 return (ENOBUFS);
1280 }
1281
1282 if (ds->ds_mbuf != NULL)
1283 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1284
1285 ds->ds_mbuf = m;
1286
1287 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1288 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1289 if (error) {
1290 printf("%s: can't load rx DMA map %d, error = %d\n",
1291 sc->sc_dev.dv_xname, idx, error);
1292 panic("epic_add_rxbuf"); /* XXX */
1293 }
1294
1295 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1296 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1297
1298 EPIC_INIT_RXDESC(sc, idx);
1299
1300 return (0);
1301 }
1302
1303 /*
1304 * Set the EPIC multicast hash table.
1305 *
1306 * NOTE: We rely on a recently-updated mii_media_active here!
1307 */
1308 void
1309 epic_set_mchash(sc)
1310 struct epic_softc *sc;
1311 {
1312 struct ethercom *ec = &sc->sc_ethercom;
1313 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1314 struct ether_multi *enm;
1315 struct ether_multistep step;
1316 u_int8_t *cp;
1317 u_int32_t crc, mchash[4];
1318 int len;
1319 static const u_int32_t crctab[] = {
1320 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1321 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1322 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1323 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1324 };
1325
1326 /*
1327 * Set up the multicast address filter by passing all multicast
1328 * addresses through a CRC generator, and then using the high-order
1329 * 6 bits as an index into the 64 bit multicast hash table (only
1330 * the lower 16 bits of each 32 bit multicast hash register are
1331 * valid). The high order bit selects the register, while the
1332 * rest of the bits select the bit within the register.
1333 */
1334
1335 if (ifp->if_flags & IFF_PROMISC)
1336 goto allmulti;
1337
1338 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1339 /* XXX hardware bug in 10Mbps mode. */
1340 goto allmulti;
1341 }
1342
1343 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1344
1345 ETHER_FIRST_MULTI(step, ec, enm);
1346 while (enm != NULL) {
1347 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1348 /*
1349 * We must listen to a range of multicast addresses.
1350 * For now, just accept all multicasts, rather than
1351 * trying to set only those filter bits needed to match
1352 * the range. (At this time, the only use of address
1353 * ranges is for IP multicast routing, for which the
1354 * range is big enough to require all bits set.)
1355 */
1356 goto allmulti;
1357 }
1358
1359 cp = enm->enm_addrlo;
1360 crc = 0xffffffff;
1361 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1362 crc ^= *cp++;
1363 crc = (crc >> 4) ^ crctab[crc & 0xf];
1364 crc = (crc >> 4) ^ crctab[crc & 0xf];
1365 }
1366 /* Just want the 6 most significant bits. */
1367 crc >>= 26;
1368
1369 /* Set the corresponding bit in the hash table. */
1370 mchash[crc >> 4] |= 1 << (crc & 0xf);
1371
1372 ETHER_NEXT_MULTI(step, enm);
1373 }
1374
1375 ifp->if_flags &= ~IFF_ALLMULTI;
1376 goto sethash;
1377
1378 allmulti:
1379 ifp->if_flags |= IFF_ALLMULTI;
1380 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1381
1382 sethash:
1383 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1384 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1385 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1386 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1387 }
1388
1389 /*
1390 * Wait for the MII to become ready.
1391 */
1392 int
1393 epic_mii_wait(sc, rw)
1394 struct epic_softc *sc;
1395 u_int32_t rw;
1396 {
1397 int i;
1398
1399 for (i = 0; i < 50; i++) {
1400 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1401 == 0)
1402 break;
1403 delay(2);
1404 }
1405 if (i == 50) {
1406 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1407 return (1);
1408 }
1409
1410 return (0);
1411 }
1412
1413 /*
1414 * Read from the MII.
1415 */
1416 int
1417 epic_mii_read(self, phy, reg)
1418 struct device *self;
1419 int phy, reg;
1420 {
1421 struct epic_softc *sc = (struct epic_softc *)self;
1422
1423 if (epic_mii_wait(sc, MMCTL_WRITE))
1424 return (0);
1425
1426 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1427 MMCTL_ARG(phy, reg, MMCTL_READ));
1428
1429 if (epic_mii_wait(sc, MMCTL_READ))
1430 return (0);
1431
1432 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1433 MMDATA_MASK);
1434 }
1435
1436 /*
1437 * Write to the MII.
1438 */
1439 void
1440 epic_mii_write(self, phy, reg, val)
1441 struct device *self;
1442 int phy, reg, val;
1443 {
1444 struct epic_softc *sc = (struct epic_softc *)self;
1445
1446 if (epic_mii_wait(sc, MMCTL_WRITE))
1447 return;
1448
1449 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1450 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1451 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1452 }
1453
1454 /*
1455 * Callback from PHY when media changes.
1456 */
1457 void
1458 epic_statchg(self)
1459 struct device *self;
1460 {
1461 struct epic_softc *sc = (struct epic_softc *)self;
1462 u_int32_t txcon;
1463
1464 /*
1465 * Update loopback bits in TXCON to reflect duplex mode.
1466 */
1467 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1468 if (sc->sc_mii.mii_media_active & IFM_FDX)
1469 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1470 else
1471 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1472 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1473
1474 /*
1475 * There is a multicast filter bug in 10Mbps mode. Kick the
1476 * multicast filter in case the speed changed.
1477 */
1478 epic_set_mchash(sc);
1479
1480 /* XXX Update ifp->if_baudrate */
1481 }
1482
1483 /*
1484 * Callback from ifmedia to request current media status.
1485 */
1486 void
1487 epic_mediastatus(ifp, ifmr)
1488 struct ifnet *ifp;
1489 struct ifmediareq *ifmr;
1490 {
1491 struct epic_softc *sc = ifp->if_softc;
1492
1493 mii_pollstat(&sc->sc_mii);
1494 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1495 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1496 }
1497
1498 /*
1499 * Callback from ifmedia to request new media setting.
1500 */
1501 int
1502 epic_mediachange(ifp)
1503 struct ifnet *ifp;
1504 {
1505 struct epic_softc *sc = ifp->if_softc;
1506
1507 if (ifp->if_flags & IFF_UP)
1508 mii_mediachg(&sc->sc_mii);
1509 return (0);
1510 }
1511