smc83c170.c revision 1.24 1 /* $NetBSD: smc83c170.c,v 1.24 2000/02/02 08:05:27 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/mii/miivar.h>
82
83 #include <dev/ic/smc83c170reg.h>
84 #include <dev/ic/smc83c170var.h>
85
86 void epic_start __P((struct ifnet *));
87 void epic_watchdog __P((struct ifnet *));
88 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
89
90 void epic_shutdown __P((void *));
91
92 void epic_reset __P((struct epic_softc *));
93 int epic_init __P((struct epic_softc *));
94 void epic_rxdrain __P((struct epic_softc *));
95 void epic_stop __P((struct epic_softc *, int));
96 int epic_add_rxbuf __P((struct epic_softc *, int));
97 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
98 void epic_set_mchash __P((struct epic_softc *));
99 void epic_fixup_clock_source __P((struct epic_softc *));
100 int epic_mii_read __P((struct device *, int, int));
101 void epic_mii_write __P((struct device *, int, int, int));
102 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
103 void epic_tick __P((void *));
104
105 void epic_statchg __P((struct device *));
106 int epic_mediachange __P((struct ifnet *));
107 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
108
109 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
110 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
111
112 int epic_copy_small = 0;
113
114 /*
115 * Attach an EPIC interface to the system.
116 */
117 void
118 epic_attach(sc)
119 struct epic_softc *sc;
120 {
121 bus_space_tag_t st = sc->sc_st;
122 bus_space_handle_t sh = sc->sc_sh;
123 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
124 int i, rseg, error;
125 bus_dma_segment_t seg;
126 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
127 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
128
129 /*
130 * Allocate the control data structures, and create and load the
131 * DMA map for it.
132 */
133 if ((error = bus_dmamem_alloc(sc->sc_dmat,
134 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
135 BUS_DMA_NOWAIT)) != 0) {
136 printf("%s: unable to allocate control data, error = %d\n",
137 sc->sc_dev.dv_xname, error);
138 goto fail_0;
139 }
140
141 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
142 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
143 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
144 printf("%s: unable to map control data, error = %d\n",
145 sc->sc_dev.dv_xname, error);
146 goto fail_1;
147 }
148
149 if ((error = bus_dmamap_create(sc->sc_dmat,
150 sizeof(struct epic_control_data), 1,
151 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
152 &sc->sc_cddmamap)) != 0) {
153 printf("%s: unable to create control data DMA map, "
154 "error = %d\n", sc->sc_dev.dv_xname, error);
155 goto fail_2;
156 }
157
158 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
159 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
160 BUS_DMA_NOWAIT)) != 0) {
161 printf("%s: unable to load control data DMA map, error = %d\n",
162 sc->sc_dev.dv_xname, error);
163 goto fail_3;
164 }
165
166 /*
167 * Create the transmit buffer DMA maps.
168 */
169 for (i = 0; i < EPIC_NTXDESC; i++) {
170 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
171 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
172 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
173 printf("%s: unable to create tx DMA map %d, "
174 "error = %d\n", sc->sc_dev.dv_xname, i, error);
175 goto fail_4;
176 }
177 }
178
179 /*
180 * Create the recieve buffer DMA maps.
181 */
182 for (i = 0; i < EPIC_NRXDESC; i++) {
183 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
184 MCLBYTES, 0, BUS_DMA_NOWAIT,
185 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
186 printf("%s: unable to create rx DMA map %d, "
187 "error = %d\n", sc->sc_dev.dv_xname, i, error);
188 goto fail_5;
189 }
190 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
191 }
192
193
194 /*
195 * Bring the chip out of low-power mode and reset it to a known state.
196 */
197 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
198 epic_reset(sc);
199
200 /*
201 * Read the Ethernet address from the EEPROM.
202 */
203 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
204 bcopy(myea, enaddr, sizeof(myea));
205
206 /*
207 * ...and the device name.
208 */
209 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
210 mydevname);
211 bcopy(mydevname, devname, sizeof(mydevname));
212 devname[sizeof(mydevname)] = '\0';
213 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
214 if (devname[i] == ' ')
215 devname[i] = '\0';
216 else
217 break;
218 }
219
220 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
221 devname, ether_sprintf(enaddr));
222
223 /*
224 * Initialize our media structures and probe the MII.
225 */
226 sc->sc_mii.mii_ifp = ifp;
227 sc->sc_mii.mii_readreg = epic_mii_read;
228 sc->sc_mii.mii_writereg = epic_mii_write;
229 sc->sc_mii.mii_statchg = epic_statchg;
230 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
231 epic_mediastatus);
232 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
233 MII_OFFSET_ANY);
234 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
235 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
236 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
237 } else
238 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
239
240 ifp = &sc->sc_ethercom.ec_if;
241 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
242 ifp->if_softc = sc;
243 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
244 ifp->if_ioctl = epic_ioctl;
245 ifp->if_start = epic_start;
246 ifp->if_watchdog = epic_watchdog;
247
248 /*
249 * Attach the interface.
250 */
251 if_attach(ifp);
252 ether_ifattach(ifp, enaddr);
253 #if NBPFILTER > 0
254 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
255 sizeof(struct ether_header));
256 #endif
257
258 /*
259 * Make sure the interface is shutdown during reboot.
260 */
261 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
262 if (sc->sc_sdhook == NULL)
263 printf("%s: WARNING: unable to establish shutdown hook\n",
264 sc->sc_dev.dv_xname);
265 return;
266
267 /*
268 * Free any resources we've allocated during the failed attach
269 * attempt. Do this in reverse order and fall through.
270 */
271 fail_5:
272 for (i = 0; i < EPIC_NRXDESC; i++) {
273 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
274 bus_dmamap_destroy(sc->sc_dmat,
275 EPIC_DSRX(sc, i)->ds_dmamap);
276 }
277 fail_4:
278 for (i = 0; i < EPIC_NTXDESC; i++) {
279 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
280 bus_dmamap_destroy(sc->sc_dmat,
281 EPIC_DSTX(sc, i)->ds_dmamap);
282 }
283 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
284 fail_3:
285 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
286 fail_2:
287 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
288 sizeof(struct epic_control_data));
289 fail_1:
290 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
291 fail_0:
292 return;
293 }
294
295 /*
296 * Shutdown hook. Make sure the interface is stopped at reboot.
297 */
298 void
299 epic_shutdown(arg)
300 void *arg;
301 {
302 struct epic_softc *sc = arg;
303
304 epic_stop(sc, 1);
305 }
306
307 /*
308 * Start packet transmission on the interface.
309 * [ifnet interface function]
310 */
311 void
312 epic_start(ifp)
313 struct ifnet *ifp;
314 {
315 struct epic_softc *sc = ifp->if_softc;
316 struct mbuf *m0, *m;
317 struct epic_txdesc *txd;
318 struct epic_descsoft *ds;
319 struct epic_fraglist *fr;
320 bus_dmamap_t dmamap;
321 int error, firsttx, nexttx, opending, seg;
322
323 /*
324 * Remember the previous txpending and the first transmit
325 * descriptor we use.
326 */
327 opending = sc->sc_txpending;
328 firsttx = EPIC_NEXTTX(sc->sc_txlast);
329
330 /*
331 * Loop through the send queue, setting up transmit descriptors
332 * until we drain the queue, or use up all available transmit
333 * descriptors.
334 */
335 while (sc->sc_txpending < EPIC_NTXDESC) {
336 /*
337 * Grab a packet off the queue.
338 */
339 IF_DEQUEUE(&ifp->if_snd, m0);
340 if (m0 == NULL)
341 break;
342
343 /*
344 * Get the last and next available transmit descriptor.
345 */
346 nexttx = EPIC_NEXTTX(sc->sc_txlast);
347 txd = EPIC_CDTX(sc, nexttx);
348 fr = EPIC_CDFL(sc, nexttx);
349 ds = EPIC_DSTX(sc, nexttx);
350 dmamap = ds->ds_dmamap;
351
352 /*
353 * Load the DMA map. If this fails, the packet either
354 * didn't fit in the alloted number of frags, or we were
355 * short on resources. In this case, we'll copy and try
356 * again.
357 */
358 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
359 BUS_DMA_NOWAIT) != 0) {
360 MGETHDR(m, M_DONTWAIT, MT_DATA);
361 if (m == NULL) {
362 printf("%s: unable to allocate Tx mbuf\n",
363 sc->sc_dev.dv_xname);
364 IF_PREPEND(&ifp->if_snd, m0);
365 break;
366 }
367 if (m0->m_pkthdr.len > MHLEN) {
368 MCLGET(m, M_DONTWAIT);
369 if ((m->m_flags & M_EXT) == 0) {
370 printf("%s: unable to allocate Tx "
371 "cluster\n", sc->sc_dev.dv_xname);
372 m_freem(m);
373 IF_PREPEND(&ifp->if_snd, m0);
374 break;
375 }
376 }
377 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
378 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
379 m_freem(m0);
380 m0 = m;
381 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
382 m0, BUS_DMA_NOWAIT);
383 if (error) {
384 printf("%s: unable to load Tx buffer, "
385 "error = %d\n", sc->sc_dev.dv_xname, error);
386 IF_PREPEND(&ifp->if_snd, m0);
387 break;
388 }
389 }
390
391 /* Initialize the fraglist. */
392 fr->ef_nfrags = dmamap->dm_nsegs;
393 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
394 fr->ef_frags[seg].ef_addr =
395 dmamap->dm_segs[seg].ds_addr;
396 fr->ef_frags[seg].ef_length =
397 dmamap->dm_segs[seg].ds_len;
398 }
399
400 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
401
402 /* Sync the DMA map. */
403 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
404 BUS_DMASYNC_PREWRITE);
405
406 /*
407 * Store a pointer to the packet so we can free it later.
408 */
409 ds->ds_mbuf = m0;
410
411 /*
412 * Fill in the transmit descriptor. The EPIC doesn't
413 * auto-pad, so we have to do this ourselves.
414 */
415 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
416 txd->et_txlength = max(m0->m_pkthdr.len,
417 ETHER_MIN_LEN - ETHER_CRC_LEN);
418
419 /*
420 * If this is the first descriptor we're enqueueing,
421 * don't give it to the EPIC yet. That could cause
422 * a race condition. We'll do it below.
423 */
424 if (nexttx == firsttx)
425 txd->et_txstatus = 0;
426 else
427 txd->et_txstatus = ET_TXSTAT_OWNER;
428
429 EPIC_CDTXSYNC(sc, nexttx,
430 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
431
432 /* Advance the tx pointer. */
433 sc->sc_txpending++;
434 sc->sc_txlast = nexttx;
435
436 #if NBPFILTER > 0
437 /*
438 * Pass the packet to any BPF listeners.
439 */
440 if (ifp->if_bpf)
441 bpf_mtap(ifp->if_bpf, m0);
442 #endif
443 }
444
445 if (sc->sc_txpending == EPIC_NTXDESC) {
446 /* No more slots left; notify upper layer. */
447 ifp->if_flags |= IFF_OACTIVE;
448 }
449
450 if (sc->sc_txpending != opending) {
451 /*
452 * We enqueued packets. If the transmitter was idle,
453 * reset the txdirty pointer.
454 */
455 if (opending == 0)
456 sc->sc_txdirty = firsttx;
457
458 /*
459 * Cause a transmit interrupt to happen on the
460 * last packet we enqueued.
461 */
462 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
463 EPIC_CDTXSYNC(sc, sc->sc_txlast,
464 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
465
466 /*
467 * The entire packet chain is set up. Give the
468 * first descriptor to the EPIC now.
469 */
470 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
471 EPIC_CDTXSYNC(sc, firsttx,
472 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
473
474 /* Start the transmitter. */
475 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
476 COMMAND_TXQUEUED);
477
478 /* Set a watchdog timer in case the chip flakes out. */
479 ifp->if_timer = 5;
480 }
481 }
482
483 /*
484 * Watchdog timer handler.
485 * [ifnet interface function]
486 */
487 void
488 epic_watchdog(ifp)
489 struct ifnet *ifp;
490 {
491 struct epic_softc *sc = ifp->if_softc;
492
493 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
494 ifp->if_oerrors++;
495
496 (void) epic_init(sc);
497 }
498
499 /*
500 * Handle control requests from the operator.
501 * [ifnet interface function]
502 */
503 int
504 epic_ioctl(ifp, cmd, data)
505 struct ifnet *ifp;
506 u_long cmd;
507 caddr_t data;
508 {
509 struct epic_softc *sc = ifp->if_softc;
510 struct ifreq *ifr = (struct ifreq *)data;
511 struct ifaddr *ifa = (struct ifaddr *)data;
512 int s, error = 0;
513
514 s = splnet();
515
516 switch (cmd) {
517 case SIOCSIFADDR:
518 ifp->if_flags |= IFF_UP;
519
520 switch (ifa->ifa_addr->sa_family) {
521 #ifdef INET
522 case AF_INET:
523 if ((error = epic_init(sc)) != 0)
524 break;
525 arp_ifinit(ifp, ifa);
526 break;
527 #endif /* INET */
528 #ifdef NS
529 case AF_NS:
530 {
531 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
532
533 if (ns_nullhost(*ina))
534 ina->x_host = *(union ns_host *)
535 LLADDR(ifp->if_sadl);
536 else
537 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
538 ifp->if_addrlen);
539 /* Set new address. */
540 error = epic_init(sc);
541 break;
542 }
543 #endif /* NS */
544 default:
545 error = epic_init(sc);
546 break;
547 }
548 break;
549
550 case SIOCSIFMTU:
551 if (ifr->ifr_mtu > ETHERMTU)
552 error = EINVAL;
553 else
554 ifp->if_mtu = ifr->ifr_mtu;
555 break;
556
557 case SIOCSIFFLAGS:
558 if ((ifp->if_flags & IFF_UP) == 0 &&
559 (ifp->if_flags & IFF_RUNNING) != 0) {
560 /*
561 * If interface is marked down and it is running, then
562 * stop it.
563 */
564 epic_stop(sc, 1);
565 } else if ((ifp->if_flags & IFF_UP) != 0 &&
566 (ifp->if_flags & IFF_RUNNING) == 0) {
567 /*
568 * If interfase it marked up and it is stopped, then
569 * start it.
570 */
571 error = epic_init(sc);
572 } else if ((ifp->if_flags & IFF_UP) != 0) {
573 /*
574 * Reset the interface to pick up changes in any other
575 * flags that affect the hardware state.
576 */
577 error = epic_init(sc);
578 }
579 break;
580
581 case SIOCADDMULTI:
582 case SIOCDELMULTI:
583 error = (cmd == SIOCADDMULTI) ?
584 ether_addmulti(ifr, &sc->sc_ethercom) :
585 ether_delmulti(ifr, &sc->sc_ethercom);
586
587 if (error == ENETRESET) {
588 /*
589 * Multicast list has changed; set the hardware filter
590 * accordingly. Update our idea of the current media;
591 * epic_set_mchash() needs to know what it is.
592 */
593 mii_pollstat(&sc->sc_mii);
594 epic_set_mchash(sc);
595 error = 0;
596 }
597 break;
598
599 case SIOCSIFMEDIA:
600 case SIOCGIFMEDIA:
601 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
602 break;
603
604 default:
605 error = EINVAL;
606 break;
607 }
608
609 splx(s);
610 return (error);
611 }
612
613 /*
614 * Interrupt handler.
615 */
616 int
617 epic_intr(arg)
618 void *arg;
619 {
620 struct epic_softc *sc = arg;
621 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
622 struct ether_header *eh;
623 struct epic_rxdesc *rxd;
624 struct epic_txdesc *txd;
625 struct epic_descsoft *ds;
626 struct mbuf *m;
627 u_int32_t intstat;
628 int i, len, claimed = 0;
629
630 top:
631 /*
632 * Get the interrupt status from the EPIC.
633 */
634 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
635 if ((intstat & INTSTAT_INT_ACTV) == 0)
636 return (claimed);
637
638 claimed = 1;
639
640 /*
641 * Acknowledge the interrupt.
642 */
643 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
644 intstat & INTMASK);
645
646 /*
647 * Check for receive interrupts.
648 */
649 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
650 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
651 rxd = EPIC_CDRX(sc, i);
652 ds = EPIC_DSRX(sc, i);
653
654 EPIC_CDRXSYNC(sc, i,
655 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
656
657 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
658 /*
659 * We have processed all of the
660 * receive buffers.
661 */
662 break;
663 }
664
665 /*
666 * Make sure the packet arrived intact. If an error
667 * occurred, update stats and reset the descriptor.
668 * The buffer will be reused the next time the
669 * descriptor comes up in the ring.
670 */
671 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
672 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
673 printf("%s: CRC error\n",
674 sc->sc_dev.dv_xname);
675 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
676 printf("%s: alignment error\n",
677 sc->sc_dev.dv_xname);
678 ifp->if_ierrors++;
679 EPIC_INIT_RXDESC(sc, i);
680 continue;
681 }
682
683 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
684 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
685
686 /*
687 * The EPIC includes the CRC with every packet;
688 * trim it.
689 */
690 len = rxd->er_rxlength - ETHER_CRC_LEN;
691
692 if (len < sizeof(struct ether_header)) {
693 /*
694 * Runt packet; drop it now.
695 */
696 ifp->if_ierrors++;
697 EPIC_INIT_RXDESC(sc, i);
698 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
699 ds->ds_dmamap->dm_mapsize,
700 BUS_DMASYNC_PREREAD);
701 continue;
702 }
703
704 /*
705 * If the packet is small enough to fit in a
706 * single header mbuf, allocate one and copy
707 * the data into it. This greatly reduces
708 * memory consumption when we receive lots
709 * of small packets.
710 *
711 * Otherwise, we add a new buffer to the receive
712 * chain. If this fails, we drop the packet and
713 * recycle the old buffer.
714 */
715 if (epic_copy_small != 0 && len <= MHLEN) {
716 MGETHDR(m, M_DONTWAIT, MT_DATA);
717 if (m == NULL)
718 goto dropit;
719 memcpy(mtod(m, caddr_t),
720 mtod(ds->ds_mbuf, caddr_t), len);
721 EPIC_INIT_RXDESC(sc, i);
722 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
723 ds->ds_dmamap->dm_mapsize,
724 BUS_DMASYNC_PREREAD);
725 } else {
726 m = ds->ds_mbuf;
727 if (epic_add_rxbuf(sc, i) != 0) {
728 dropit:
729 ifp->if_ierrors++;
730 EPIC_INIT_RXDESC(sc, i);
731 bus_dmamap_sync(sc->sc_dmat,
732 ds->ds_dmamap, 0,
733 ds->ds_dmamap->dm_mapsize,
734 BUS_DMASYNC_PREREAD);
735 continue;
736 }
737 }
738
739 m->m_pkthdr.rcvif = ifp;
740 m->m_pkthdr.len = m->m_len = len;
741 eh = mtod(m, struct ether_header *);
742
743 #if NBPFILTER > 0
744 /*
745 * Pass this up to any BPF listeners, but only
746 * pass it up the stack if its for us.
747 */
748 if (ifp->if_bpf) {
749 bpf_mtap(ifp->if_bpf, m);
750 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
751 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
752 ETHER_ADDR_LEN) != 0 &&
753 (rxd->er_rxstatus &
754 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST)) == 0) {
755 m_freem(m);
756 continue;
757 }
758 }
759 #endif /* NPBFILTER > 0 */
760
761 /* Pass it on. */
762 (*ifp->if_input)(ifp, m);
763 ifp->if_ipackets++;
764 }
765
766 /* Update the recieve pointer. */
767 sc->sc_rxptr = i;
768
769 /*
770 * Check for receive queue underflow.
771 */
772 if (intstat & INTSTAT_RQE) {
773 printf("%s: receiver queue empty\n",
774 sc->sc_dev.dv_xname);
775 /*
776 * Ring is already built; just restart the
777 * receiver.
778 */
779 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
780 EPIC_CDRXADDR(sc, sc->sc_rxptr));
781 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
782 COMMAND_RXQUEUED | COMMAND_START_RX);
783 }
784 }
785
786 /*
787 * Check for transmission complete interrupts.
788 */
789 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
790 ifp->if_flags &= ~IFF_OACTIVE;
791 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
792 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
793 txd = EPIC_CDTX(sc, i);
794 ds = EPIC_DSTX(sc, i);
795
796 EPIC_CDTXSYNC(sc, i,
797 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
798
799 if (txd->et_txstatus & ET_TXSTAT_OWNER)
800 break;
801
802 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
803
804 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
805 0, ds->ds_dmamap->dm_mapsize,
806 BUS_DMASYNC_POSTWRITE);
807 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
808 m_freem(ds->ds_mbuf);
809 ds->ds_mbuf = NULL;
810
811 /*
812 * Check for errors and collisions.
813 */
814 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
815 ifp->if_oerrors++;
816 else
817 ifp->if_opackets++;
818 ifp->if_collisions +=
819 TXSTAT_COLLISIONS(txd->et_txstatus);
820 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
821 printf("%s: lost carrier\n",
822 sc->sc_dev.dv_xname);
823 }
824
825 /* Update the dirty transmit buffer pointer. */
826 sc->sc_txdirty = i;
827
828 /*
829 * Cancel the watchdog timer if there are no pending
830 * transmissions.
831 */
832 if (sc->sc_txpending == 0)
833 ifp->if_timer = 0;
834
835 /*
836 * Kick the transmitter after a DMA underrun.
837 */
838 if (intstat & INTSTAT_TXU) {
839 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
840 bus_space_write_4(sc->sc_st, sc->sc_sh,
841 EPIC_COMMAND, COMMAND_TXUGO);
842 if (sc->sc_txpending)
843 bus_space_write_4(sc->sc_st, sc->sc_sh,
844 EPIC_COMMAND, COMMAND_TXQUEUED);
845 }
846
847 /*
848 * Try to get more packets going.
849 */
850 epic_start(ifp);
851 }
852
853 /*
854 * Check for fatal interrupts.
855 */
856 if (intstat & INTSTAT_FATAL_INT) {
857 if (intstat & INTSTAT_PTA)
858 printf("%s: PCI target abort error\n",
859 sc->sc_dev.dv_xname);
860 else if (intstat & INTSTAT_PMA)
861 printf("%s: PCI master abort error\n",
862 sc->sc_dev.dv_xname);
863 else if (intstat & INTSTAT_APE)
864 printf("%s: PCI address parity error\n",
865 sc->sc_dev.dv_xname);
866 else if (intstat & INTSTAT_DPE)
867 printf("%s: PCI data parity error\n",
868 sc->sc_dev.dv_xname);
869 else
870 printf("%s: unknown fatal error\n",
871 sc->sc_dev.dv_xname);
872 (void) epic_init(sc);
873 }
874
875 /*
876 * Check for more interrupts.
877 */
878 goto top;
879 }
880
881 /*
882 * One second timer, used to tick the MII.
883 */
884 void
885 epic_tick(arg)
886 void *arg;
887 {
888 struct epic_softc *sc = arg;
889 int s;
890
891 s = splnet();
892 mii_tick(&sc->sc_mii);
893 splx(s);
894
895 timeout(epic_tick, sc, hz);
896 }
897
898 /*
899 * Fixup the clock source on the EPIC.
900 */
901 void
902 epic_fixup_clock_source(sc)
903 struct epic_softc *sc;
904 {
905 int i;
906
907 /*
908 * According to SMC Application Note 7-15, the EPIC's clock
909 * source is incorrect following a reset. This manifests itself
910 * as failure to recognize when host software has written to
911 * a register on the EPIC. The appnote recommends issuing at
912 * least 16 consecutive writes to the CLOCK TEST bit to correctly
913 * configure the clock source.
914 */
915 for (i = 0; i < 16; i++)
916 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
917 TEST_CLOCKTEST);
918 }
919
920 /*
921 * Perform a soft reset on the EPIC.
922 */
923 void
924 epic_reset(sc)
925 struct epic_softc *sc;
926 {
927
928 epic_fixup_clock_source(sc);
929
930 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
931 delay(100);
932 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
933 delay(100);
934
935 epic_fixup_clock_source(sc);
936 }
937
938 /*
939 * Initialize the interface. Must be called at splnet().
940 */
941 int
942 epic_init(sc)
943 struct epic_softc *sc;
944 {
945 bus_space_tag_t st = sc->sc_st;
946 bus_space_handle_t sh = sc->sc_sh;
947 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
948 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
949 struct epic_txdesc *txd;
950 struct epic_descsoft *ds;
951 u_int32_t genctl, reg0;
952 int i, error = 0;
953
954 /*
955 * Cancel any pending I/O.
956 */
957 epic_stop(sc, 0);
958
959 /*
960 * Reset the EPIC to a known state.
961 */
962 epic_reset(sc);
963
964 /*
965 * Magical mystery initialization.
966 */
967 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
968
969 /*
970 * Initialize the EPIC genctl register:
971 *
972 * - 64 byte receive FIFO threshold
973 * - automatic advance to next receive frame
974 */
975 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
976 #if BYTE_ORDER == BIG_ENDIAN
977 genctl |= GENCTL_BIG_ENDIAN;
978 #endif
979 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
980
981 /*
982 * Reset the MII bus and PHY.
983 */
984 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
985 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
986 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
987 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
988 delay(100);
989 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
990 delay(100);
991 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
992
993 /*
994 * Initialize Ethernet address.
995 */
996 reg0 = enaddr[1] << 8 | enaddr[0];
997 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
998 reg0 = enaddr[3] << 8 | enaddr[2];
999 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
1000 reg0 = enaddr[5] << 8 | enaddr[4];
1001 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
1002
1003 /*
1004 * Initialize receive control. Remember the external buffer
1005 * size setting.
1006 */
1007 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
1008 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
1009 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1010 if (ifp->if_flags & IFF_PROMISC)
1011 reg0 |= RXCON_PROMISCMODE;
1012 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1013
1014 /* Set the current media. */
1015 mii_mediachg(&sc->sc_mii);
1016
1017 /* Set up the multicast hash table. */
1018 epic_set_mchash(sc);
1019
1020 /*
1021 * Initialize the transmit descriptor ring. txlast is initialized
1022 * to the end of the list so that it will wrap around to the first
1023 * descriptor when the first packet is transmitted.
1024 */
1025 for (i = 0; i < EPIC_NTXDESC; i++) {
1026 txd = EPIC_CDTX(sc, i);
1027 memset(txd, 0, sizeof(struct epic_txdesc));
1028 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1029 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1030 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1031 }
1032 sc->sc_txpending = 0;
1033 sc->sc_txdirty = 0;
1034 sc->sc_txlast = EPIC_NTXDESC - 1;
1035
1036 /*
1037 * Initialize the receive descriptor ring.
1038 */
1039 for (i = 0; i < EPIC_NRXDESC; i++) {
1040 ds = EPIC_DSRX(sc, i);
1041 if (ds->ds_mbuf == NULL) {
1042 if ((error = epic_add_rxbuf(sc, i)) != 0) {
1043 printf("%s: unable to allocate or map rx "
1044 "buffer %d error = %d\n",
1045 sc->sc_dev.dv_xname, i, error);
1046 /*
1047 * XXX Should attempt to run with fewer receive
1048 * XXX buffers instead of just failing.
1049 */
1050 epic_rxdrain(sc);
1051 goto out;
1052 }
1053 }
1054 }
1055 sc->sc_rxptr = 0;
1056
1057 /*
1058 * Initialize the interrupt mask and enable interrupts.
1059 */
1060 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1061 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1062
1063 /*
1064 * Give the transmit and receive rings to the EPIC.
1065 */
1066 bus_space_write_4(st, sh, EPIC_PTCDAR,
1067 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1068 bus_space_write_4(st, sh, EPIC_PRCDAR,
1069 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1070
1071 /*
1072 * Set the EPIC in motion.
1073 */
1074 bus_space_write_4(st, sh, EPIC_COMMAND,
1075 COMMAND_RXQUEUED | COMMAND_START_RX);
1076
1077 /*
1078 * ...all done!
1079 */
1080 ifp->if_flags |= IFF_RUNNING;
1081 ifp->if_flags &= ~IFF_OACTIVE;
1082
1083 /*
1084 * Start the one second clock.
1085 */
1086 timeout(epic_tick, sc, hz);
1087
1088 /*
1089 * Attempt to start output on the interface.
1090 */
1091 epic_start(ifp);
1092
1093 out:
1094 if (error)
1095 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1096 return (error);
1097 }
1098
1099 /*
1100 * Drain the receive queue.
1101 */
1102 void
1103 epic_rxdrain(sc)
1104 struct epic_softc *sc;
1105 {
1106 struct epic_descsoft *ds;
1107 int i;
1108
1109 for (i = 0; i < EPIC_NRXDESC; i++) {
1110 ds = EPIC_DSRX(sc, i);
1111 if (ds->ds_mbuf != NULL) {
1112 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1113 m_freem(ds->ds_mbuf);
1114 ds->ds_mbuf = NULL;
1115 }
1116 }
1117 }
1118
1119 /*
1120 * Stop transmission on the interface.
1121 */
1122 void
1123 epic_stop(sc, drain)
1124 struct epic_softc *sc;
1125 int drain;
1126 {
1127 bus_space_tag_t st = sc->sc_st;
1128 bus_space_handle_t sh = sc->sc_sh;
1129 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1130 struct epic_descsoft *ds;
1131 u_int32_t reg;
1132 int i;
1133
1134 /*
1135 * Stop the one second clock.
1136 */
1137 untimeout(epic_tick, sc);
1138
1139 /* Down the MII. */
1140 mii_down(&sc->sc_mii);
1141
1142 /* Paranoia... */
1143 epic_fixup_clock_source(sc);
1144
1145 /*
1146 * Disable interrupts.
1147 */
1148 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1149 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1150 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1151
1152 /*
1153 * Stop the DMA engine and take the receiver off-line.
1154 */
1155 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1156 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1157
1158 /*
1159 * Release any queued transmit buffers.
1160 */
1161 for (i = 0; i < EPIC_NTXDESC; i++) {
1162 ds = EPIC_DSTX(sc, i);
1163 if (ds->ds_mbuf != NULL) {
1164 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1165 m_freem(ds->ds_mbuf);
1166 ds->ds_mbuf = NULL;
1167 }
1168 }
1169
1170 if (drain) {
1171 /*
1172 * Release the receive buffers.
1173 */
1174 epic_rxdrain(sc);
1175 }
1176
1177 /*
1178 * Mark the interface down and cancel the watchdog timer.
1179 */
1180 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1181 ifp->if_timer = 0;
1182 }
1183
1184 /*
1185 * Read the EPIC Serial EEPROM.
1186 */
1187 void
1188 epic_read_eeprom(sc, word, wordcnt, data)
1189 struct epic_softc *sc;
1190 int word, wordcnt;
1191 u_int16_t *data;
1192 {
1193 bus_space_tag_t st = sc->sc_st;
1194 bus_space_handle_t sh = sc->sc_sh;
1195 u_int16_t reg;
1196 int i, x;
1197
1198 #define EEPROM_WAIT_READY(st, sh) \
1199 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1200 /* nothing */
1201
1202 /*
1203 * Enable the EEPROM.
1204 */
1205 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1206 EEPROM_WAIT_READY(st, sh);
1207
1208 for (i = 0; i < wordcnt; i++) {
1209 /* Send CHIP SELECT for one clock tick. */
1210 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1211 EEPROM_WAIT_READY(st, sh);
1212
1213 /* Shift in the READ opcode. */
1214 for (x = 3; x > 0; x--) {
1215 reg = EECTL_ENABLE|EECTL_EECS;
1216 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1217 reg |= EECTL_EEDI;
1218 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1219 EEPROM_WAIT_READY(st, sh);
1220 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1221 EEPROM_WAIT_READY(st, sh);
1222 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1223 EEPROM_WAIT_READY(st, sh);
1224 }
1225
1226 /* Shift in address. */
1227 for (x = 6; x > 0; x--) {
1228 reg = EECTL_ENABLE|EECTL_EECS;
1229 if ((word + i) & (1 << (x - 1)))
1230 reg |= EECTL_EEDI;
1231 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1232 EEPROM_WAIT_READY(st, sh);
1233 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1234 EEPROM_WAIT_READY(st, sh);
1235 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1236 EEPROM_WAIT_READY(st, sh);
1237 }
1238
1239 /* Shift out data. */
1240 reg = EECTL_ENABLE|EECTL_EECS;
1241 data[i] = 0;
1242 for (x = 16; x > 0; x--) {
1243 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1244 EEPROM_WAIT_READY(st, sh);
1245 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1246 data[i] |= (1 << (x - 1));
1247 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1248 EEPROM_WAIT_READY(st, sh);
1249 }
1250
1251 /* Clear CHIP SELECT. */
1252 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1253 EEPROM_WAIT_READY(st, sh);
1254 }
1255
1256 /*
1257 * Disable the EEPROM.
1258 */
1259 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1260
1261 #undef EEPROM_WAIT_READY
1262 }
1263
1264 /*
1265 * Add a receive buffer to the indicated descriptor.
1266 */
1267 int
1268 epic_add_rxbuf(sc, idx)
1269 struct epic_softc *sc;
1270 int idx;
1271 {
1272 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1273 struct mbuf *m;
1274 int error;
1275
1276 MGETHDR(m, M_DONTWAIT, MT_DATA);
1277 if (m == NULL)
1278 return (ENOBUFS);
1279
1280 MCLGET(m, M_DONTWAIT);
1281 if ((m->m_flags & M_EXT) == 0) {
1282 m_freem(m);
1283 return (ENOBUFS);
1284 }
1285
1286 if (ds->ds_mbuf != NULL)
1287 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1288
1289 ds->ds_mbuf = m;
1290
1291 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1292 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1293 if (error) {
1294 printf("%s: can't load rx DMA map %d, error = %d\n",
1295 sc->sc_dev.dv_xname, idx, error);
1296 panic("epic_add_rxbuf"); /* XXX */
1297 }
1298
1299 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1300 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1301
1302 EPIC_INIT_RXDESC(sc, idx);
1303
1304 return (0);
1305 }
1306
1307 /*
1308 * Set the EPIC multicast hash table.
1309 *
1310 * NOTE: We rely on a recently-updated mii_media_active here!
1311 */
1312 void
1313 epic_set_mchash(sc)
1314 struct epic_softc *sc;
1315 {
1316 struct ethercom *ec = &sc->sc_ethercom;
1317 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1318 struct ether_multi *enm;
1319 struct ether_multistep step;
1320 u_int8_t *cp;
1321 u_int32_t crc, mchash[4];
1322 int len;
1323 static const u_int32_t crctab[] = {
1324 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1325 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1326 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1327 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1328 };
1329
1330 /*
1331 * Set up the multicast address filter by passing all multicast
1332 * addresses through a CRC generator, and then using the high-order
1333 * 6 bits as an index into the 64 bit multicast hash table (only
1334 * the lower 16 bits of each 32 bit multicast hash register are
1335 * valid). The high order bit selects the register, while the
1336 * rest of the bits select the bit within the register.
1337 */
1338
1339 if (ifp->if_flags & IFF_PROMISC)
1340 goto allmulti;
1341
1342 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1343 /* XXX hardware bug in 10Mbps mode. */
1344 goto allmulti;
1345 }
1346
1347 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1348
1349 ETHER_FIRST_MULTI(step, ec, enm);
1350 while (enm != NULL) {
1351 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1352 /*
1353 * We must listen to a range of multicast addresses.
1354 * For now, just accept all multicasts, rather than
1355 * trying to set only those filter bits needed to match
1356 * the range. (At this time, the only use of address
1357 * ranges is for IP multicast routing, for which the
1358 * range is big enough to require all bits set.)
1359 */
1360 goto allmulti;
1361 }
1362
1363 cp = enm->enm_addrlo;
1364 crc = 0xffffffff;
1365 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1366 crc ^= *cp++;
1367 crc = (crc >> 4) ^ crctab[crc & 0xf];
1368 crc = (crc >> 4) ^ crctab[crc & 0xf];
1369 }
1370 /* Just want the 6 most significant bits. */
1371 crc >>= 26;
1372
1373 /* Set the corresponding bit in the hash table. */
1374 mchash[crc >> 4] |= 1 << (crc & 0xf);
1375
1376 ETHER_NEXT_MULTI(step, enm);
1377 }
1378
1379 ifp->if_flags &= ~IFF_ALLMULTI;
1380 goto sethash;
1381
1382 allmulti:
1383 ifp->if_flags |= IFF_ALLMULTI;
1384 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1385
1386 sethash:
1387 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1388 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1389 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1390 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1391 }
1392
1393 /*
1394 * Wait for the MII to become ready.
1395 */
1396 int
1397 epic_mii_wait(sc, rw)
1398 struct epic_softc *sc;
1399 u_int32_t rw;
1400 {
1401 int i;
1402
1403 for (i = 0; i < 50; i++) {
1404 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1405 == 0)
1406 break;
1407 delay(2);
1408 }
1409 if (i == 50) {
1410 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1411 return (1);
1412 }
1413
1414 return (0);
1415 }
1416
1417 /*
1418 * Read from the MII.
1419 */
1420 int
1421 epic_mii_read(self, phy, reg)
1422 struct device *self;
1423 int phy, reg;
1424 {
1425 struct epic_softc *sc = (struct epic_softc *)self;
1426
1427 if (epic_mii_wait(sc, MMCTL_WRITE))
1428 return (0);
1429
1430 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1431 MMCTL_ARG(phy, reg, MMCTL_READ));
1432
1433 if (epic_mii_wait(sc, MMCTL_READ))
1434 return (0);
1435
1436 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1437 MMDATA_MASK);
1438 }
1439
1440 /*
1441 * Write to the MII.
1442 */
1443 void
1444 epic_mii_write(self, phy, reg, val)
1445 struct device *self;
1446 int phy, reg, val;
1447 {
1448 struct epic_softc *sc = (struct epic_softc *)self;
1449
1450 if (epic_mii_wait(sc, MMCTL_WRITE))
1451 return;
1452
1453 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1454 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1455 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1456 }
1457
1458 /*
1459 * Callback from PHY when media changes.
1460 */
1461 void
1462 epic_statchg(self)
1463 struct device *self;
1464 {
1465 struct epic_softc *sc = (struct epic_softc *)self;
1466 u_int32_t txcon;
1467
1468 /*
1469 * Update loopback bits in TXCON to reflect duplex mode.
1470 */
1471 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1472 if (sc->sc_mii.mii_media_active & IFM_FDX)
1473 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1474 else
1475 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1476 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1477
1478 /*
1479 * There is a multicast filter bug in 10Mbps mode. Kick the
1480 * multicast filter in case the speed changed.
1481 */
1482 epic_set_mchash(sc);
1483
1484 /* XXX Update ifp->if_baudrate */
1485 }
1486
1487 /*
1488 * Callback from ifmedia to request current media status.
1489 */
1490 void
1491 epic_mediastatus(ifp, ifmr)
1492 struct ifnet *ifp;
1493 struct ifmediareq *ifmr;
1494 {
1495 struct epic_softc *sc = ifp->if_softc;
1496
1497 mii_pollstat(&sc->sc_mii);
1498 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1499 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1500 }
1501
1502 /*
1503 * Callback from ifmedia to request new media setting.
1504 */
1505 int
1506 epic_mediachange(ifp)
1507 struct ifnet *ifp;
1508 {
1509 struct epic_softc *sc = ifp->if_softc;
1510
1511 if (ifp->if_flags & IFF_UP)
1512 mii_mediachg(&sc->sc_mii);
1513 return (0);
1514 }
1515