smc83c170.c revision 1.29 1 /* $NetBSD: smc83c170.c,v 1.29 2000/03/23 07:01:32 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #endif
73
74 #ifdef NS
75 #include <netns/ns.h>
76 #include <netns/ns_if.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/mii/miivar.h>
83
84 #include <dev/ic/smc83c170reg.h>
85 #include <dev/ic/smc83c170var.h>
86
87 void epic_start __P((struct ifnet *));
88 void epic_watchdog __P((struct ifnet *));
89 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
90
91 void epic_shutdown __P((void *));
92
93 void epic_reset __P((struct epic_softc *));
94 int epic_init __P((struct epic_softc *));
95 void epic_rxdrain __P((struct epic_softc *));
96 void epic_stop __P((struct epic_softc *, int));
97 int epic_add_rxbuf __P((struct epic_softc *, int));
98 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
99 void epic_set_mchash __P((struct epic_softc *));
100 void epic_fixup_clock_source __P((struct epic_softc *));
101 int epic_mii_read __P((struct device *, int, int));
102 void epic_mii_write __P((struct device *, int, int, int));
103 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
104 void epic_tick __P((void *));
105
106 void epic_statchg __P((struct device *));
107 int epic_mediachange __P((struct ifnet *));
108 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
109
110 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
111 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
112
113 int epic_copy_small = 0;
114
115 /*
116 * Attach an EPIC interface to the system.
117 */
118 void
119 epic_attach(sc)
120 struct epic_softc *sc;
121 {
122 bus_space_tag_t st = sc->sc_st;
123 bus_space_handle_t sh = sc->sc_sh;
124 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
125 int i, rseg, error;
126 bus_dma_segment_t seg;
127 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
128 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
129
130 callout_init(&sc->sc_mii_callout);
131
132 /*
133 * Allocate the control data structures, and create and load the
134 * DMA map for it.
135 */
136 if ((error = bus_dmamem_alloc(sc->sc_dmat,
137 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
138 BUS_DMA_NOWAIT)) != 0) {
139 printf("%s: unable to allocate control data, error = %d\n",
140 sc->sc_dev.dv_xname, error);
141 goto fail_0;
142 }
143
144 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
145 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
147 printf("%s: unable to map control data, error = %d\n",
148 sc->sc_dev.dv_xname, error);
149 goto fail_1;
150 }
151
152 if ((error = bus_dmamap_create(sc->sc_dmat,
153 sizeof(struct epic_control_data), 1,
154 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
155 &sc->sc_cddmamap)) != 0) {
156 printf("%s: unable to create control data DMA map, "
157 "error = %d\n", sc->sc_dev.dv_xname, error);
158 goto fail_2;
159 }
160
161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
162 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
163 BUS_DMA_NOWAIT)) != 0) {
164 printf("%s: unable to load control data DMA map, error = %d\n",
165 sc->sc_dev.dv_xname, error);
166 goto fail_3;
167 }
168
169 /*
170 * Create the transmit buffer DMA maps.
171 */
172 for (i = 0; i < EPIC_NTXDESC; i++) {
173 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
174 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
175 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
176 printf("%s: unable to create tx DMA map %d, "
177 "error = %d\n", sc->sc_dev.dv_xname, i, error);
178 goto fail_4;
179 }
180 }
181
182 /*
183 * Create the recieve buffer DMA maps.
184 */
185 for (i = 0; i < EPIC_NRXDESC; i++) {
186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
187 MCLBYTES, 0, BUS_DMA_NOWAIT,
188 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
189 printf("%s: unable to create rx DMA map %d, "
190 "error = %d\n", sc->sc_dev.dv_xname, i, error);
191 goto fail_5;
192 }
193 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
194 }
195
196
197 /*
198 * Bring the chip out of low-power mode and reset it to a known state.
199 */
200 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
201 epic_reset(sc);
202
203 /*
204 * Read the Ethernet address from the EEPROM.
205 */
206 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
207 bcopy(myea, enaddr, sizeof(myea));
208
209 /*
210 * ...and the device name.
211 */
212 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
213 mydevname);
214 bcopy(mydevname, devname, sizeof(mydevname));
215 devname[sizeof(mydevname)] = '\0';
216 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
217 if (devname[i] == ' ')
218 devname[i] = '\0';
219 else
220 break;
221 }
222
223 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
224 devname, ether_sprintf(enaddr));
225
226 /*
227 * Initialize our media structures and probe the MII.
228 */
229 sc->sc_mii.mii_ifp = ifp;
230 sc->sc_mii.mii_readreg = epic_mii_read;
231 sc->sc_mii.mii_writereg = epic_mii_write;
232 sc->sc_mii.mii_statchg = epic_statchg;
233 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
234 epic_mediastatus);
235 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
236 MII_OFFSET_ANY, 0);
237 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
238 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
239 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
240 } else
241 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
242
243 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
244 ifp->if_softc = sc;
245 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
246 ifp->if_ioctl = epic_ioctl;
247 ifp->if_start = epic_start;
248 ifp->if_watchdog = epic_watchdog;
249
250 /*
251 * Attach the interface.
252 */
253 if_attach(ifp);
254 ether_ifattach(ifp, enaddr);
255 #if NBPFILTER > 0
256 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
257 sizeof(struct ether_header));
258 #endif
259
260 /*
261 * Make sure the interface is shutdown during reboot.
262 */
263 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
264 if (sc->sc_sdhook == NULL)
265 printf("%s: WARNING: unable to establish shutdown hook\n",
266 sc->sc_dev.dv_xname);
267 return;
268
269 /*
270 * Free any resources we've allocated during the failed attach
271 * attempt. Do this in reverse order and fall through.
272 */
273 fail_5:
274 for (i = 0; i < EPIC_NRXDESC; i++) {
275 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
276 bus_dmamap_destroy(sc->sc_dmat,
277 EPIC_DSRX(sc, i)->ds_dmamap);
278 }
279 fail_4:
280 for (i = 0; i < EPIC_NTXDESC; i++) {
281 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
282 bus_dmamap_destroy(sc->sc_dmat,
283 EPIC_DSTX(sc, i)->ds_dmamap);
284 }
285 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
286 fail_3:
287 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
288 fail_2:
289 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
290 sizeof(struct epic_control_data));
291 fail_1:
292 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
293 fail_0:
294 return;
295 }
296
297 /*
298 * Shutdown hook. Make sure the interface is stopped at reboot.
299 */
300 void
301 epic_shutdown(arg)
302 void *arg;
303 {
304 struct epic_softc *sc = arg;
305
306 epic_stop(sc, 1);
307 }
308
309 /*
310 * Start packet transmission on the interface.
311 * [ifnet interface function]
312 */
313 void
314 epic_start(ifp)
315 struct ifnet *ifp;
316 {
317 struct epic_softc *sc = ifp->if_softc;
318 struct mbuf *m0, *m;
319 struct epic_txdesc *txd;
320 struct epic_descsoft *ds;
321 struct epic_fraglist *fr;
322 bus_dmamap_t dmamap;
323 int error, firsttx, nexttx, opending, seg;
324
325 /*
326 * Remember the previous txpending and the first transmit
327 * descriptor we use.
328 */
329 opending = sc->sc_txpending;
330 firsttx = EPIC_NEXTTX(sc->sc_txlast);
331
332 /*
333 * Loop through the send queue, setting up transmit descriptors
334 * until we drain the queue, or use up all available transmit
335 * descriptors.
336 */
337 while (sc->sc_txpending < EPIC_NTXDESC) {
338 /*
339 * Grab a packet off the queue.
340 */
341 IF_DEQUEUE(&ifp->if_snd, m0);
342 if (m0 == NULL)
343 break;
344
345 /*
346 * Get the last and next available transmit descriptor.
347 */
348 nexttx = EPIC_NEXTTX(sc->sc_txlast);
349 txd = EPIC_CDTX(sc, nexttx);
350 fr = EPIC_CDFL(sc, nexttx);
351 ds = EPIC_DSTX(sc, nexttx);
352 dmamap = ds->ds_dmamap;
353
354 /*
355 * Load the DMA map. If this fails, the packet either
356 * didn't fit in the alloted number of frags, or we were
357 * short on resources. In this case, we'll copy and try
358 * again.
359 */
360 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
361 BUS_DMA_NOWAIT) != 0) {
362 MGETHDR(m, M_DONTWAIT, MT_DATA);
363 if (m == NULL) {
364 printf("%s: unable to allocate Tx mbuf\n",
365 sc->sc_dev.dv_xname);
366 IF_PREPEND(&ifp->if_snd, m0);
367 break;
368 }
369 if (m0->m_pkthdr.len > MHLEN) {
370 MCLGET(m, M_DONTWAIT);
371 if ((m->m_flags & M_EXT) == 0) {
372 printf("%s: unable to allocate Tx "
373 "cluster\n", sc->sc_dev.dv_xname);
374 m_freem(m);
375 IF_PREPEND(&ifp->if_snd, m0);
376 break;
377 }
378 }
379 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
380 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
381 m_freem(m0);
382 m0 = m;
383 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
384 m0, BUS_DMA_NOWAIT);
385 if (error) {
386 printf("%s: unable to load Tx buffer, "
387 "error = %d\n", sc->sc_dev.dv_xname, error);
388 IF_PREPEND(&ifp->if_snd, m0);
389 break;
390 }
391 }
392
393 /* Initialize the fraglist. */
394 fr->ef_nfrags = dmamap->dm_nsegs;
395 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
396 fr->ef_frags[seg].ef_addr =
397 dmamap->dm_segs[seg].ds_addr;
398 fr->ef_frags[seg].ef_length =
399 dmamap->dm_segs[seg].ds_len;
400 }
401
402 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
403
404 /* Sync the DMA map. */
405 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
406 BUS_DMASYNC_PREWRITE);
407
408 /*
409 * Store a pointer to the packet so we can free it later.
410 */
411 ds->ds_mbuf = m0;
412
413 /*
414 * Fill in the transmit descriptor. The EPIC doesn't
415 * auto-pad, so we have to do this ourselves.
416 */
417 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
418 txd->et_txlength = max(m0->m_pkthdr.len,
419 ETHER_MIN_LEN - ETHER_CRC_LEN);
420
421 /*
422 * If this is the first descriptor we're enqueueing,
423 * don't give it to the EPIC yet. That could cause
424 * a race condition. We'll do it below.
425 */
426 if (nexttx == firsttx)
427 txd->et_txstatus = 0;
428 else
429 txd->et_txstatus = ET_TXSTAT_OWNER;
430
431 EPIC_CDTXSYNC(sc, nexttx,
432 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
433
434 /* Advance the tx pointer. */
435 sc->sc_txpending++;
436 sc->sc_txlast = nexttx;
437
438 #if NBPFILTER > 0
439 /*
440 * Pass the packet to any BPF listeners.
441 */
442 if (ifp->if_bpf)
443 bpf_mtap(ifp->if_bpf, m0);
444 #endif
445 }
446
447 if (sc->sc_txpending == EPIC_NTXDESC) {
448 /* No more slots left; notify upper layer. */
449 ifp->if_flags |= IFF_OACTIVE;
450 }
451
452 if (sc->sc_txpending != opending) {
453 /*
454 * We enqueued packets. If the transmitter was idle,
455 * reset the txdirty pointer.
456 */
457 if (opending == 0)
458 sc->sc_txdirty = firsttx;
459
460 /*
461 * Cause a transmit interrupt to happen on the
462 * last packet we enqueued.
463 */
464 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
465 EPIC_CDTXSYNC(sc, sc->sc_txlast,
466 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
467
468 /*
469 * The entire packet chain is set up. Give the
470 * first descriptor to the EPIC now.
471 */
472 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
473 EPIC_CDTXSYNC(sc, firsttx,
474 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
475
476 /* Start the transmitter. */
477 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
478 COMMAND_TXQUEUED);
479
480 /* Set a watchdog timer in case the chip flakes out. */
481 ifp->if_timer = 5;
482 }
483 }
484
485 /*
486 * Watchdog timer handler.
487 * [ifnet interface function]
488 */
489 void
490 epic_watchdog(ifp)
491 struct ifnet *ifp;
492 {
493 struct epic_softc *sc = ifp->if_softc;
494
495 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
496 ifp->if_oerrors++;
497
498 (void) epic_init(sc);
499 }
500
501 /*
502 * Handle control requests from the operator.
503 * [ifnet interface function]
504 */
505 int
506 epic_ioctl(ifp, cmd, data)
507 struct ifnet *ifp;
508 u_long cmd;
509 caddr_t data;
510 {
511 struct epic_softc *sc = ifp->if_softc;
512 struct ifreq *ifr = (struct ifreq *)data;
513 struct ifaddr *ifa = (struct ifaddr *)data;
514 int s, error = 0;
515
516 s = splnet();
517
518 switch (cmd) {
519 case SIOCSIFADDR:
520 ifp->if_flags |= IFF_UP;
521
522 switch (ifa->ifa_addr->sa_family) {
523 #ifdef INET
524 case AF_INET:
525 if ((error = epic_init(sc)) != 0)
526 break;
527 arp_ifinit(ifp, ifa);
528 break;
529 #endif /* INET */
530 #ifdef NS
531 case AF_NS:
532 {
533 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
534
535 if (ns_nullhost(*ina))
536 ina->x_host = *(union ns_host *)
537 LLADDR(ifp->if_sadl);
538 else
539 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
540 ifp->if_addrlen);
541 /* Set new address. */
542 error = epic_init(sc);
543 break;
544 }
545 #endif /* NS */
546 default:
547 error = epic_init(sc);
548 break;
549 }
550 break;
551
552 case SIOCSIFMTU:
553 if (ifr->ifr_mtu > ETHERMTU)
554 error = EINVAL;
555 else
556 ifp->if_mtu = ifr->ifr_mtu;
557 break;
558
559 case SIOCSIFFLAGS:
560 if ((ifp->if_flags & IFF_UP) == 0 &&
561 (ifp->if_flags & IFF_RUNNING) != 0) {
562 /*
563 * If interface is marked down and it is running, then
564 * stop it.
565 */
566 epic_stop(sc, 1);
567 } else if ((ifp->if_flags & IFF_UP) != 0 &&
568 (ifp->if_flags & IFF_RUNNING) == 0) {
569 /*
570 * If interfase it marked up and it is stopped, then
571 * start it.
572 */
573 error = epic_init(sc);
574 } else if ((ifp->if_flags & IFF_UP) != 0) {
575 /*
576 * Reset the interface to pick up changes in any other
577 * flags that affect the hardware state.
578 */
579 error = epic_init(sc);
580 }
581 break;
582
583 case SIOCADDMULTI:
584 case SIOCDELMULTI:
585 error = (cmd == SIOCADDMULTI) ?
586 ether_addmulti(ifr, &sc->sc_ethercom) :
587 ether_delmulti(ifr, &sc->sc_ethercom);
588
589 if (error == ENETRESET) {
590 /*
591 * Multicast list has changed; set the hardware filter
592 * accordingly. Update our idea of the current media;
593 * epic_set_mchash() needs to know what it is.
594 */
595 mii_pollstat(&sc->sc_mii);
596 epic_set_mchash(sc);
597 error = 0;
598 }
599 break;
600
601 case SIOCSIFMEDIA:
602 case SIOCGIFMEDIA:
603 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
604 break;
605
606 default:
607 error = EINVAL;
608 break;
609 }
610
611 splx(s);
612 return (error);
613 }
614
615 /*
616 * Interrupt handler.
617 */
618 int
619 epic_intr(arg)
620 void *arg;
621 {
622 struct epic_softc *sc = arg;
623 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
624 struct ether_header *eh;
625 struct epic_rxdesc *rxd;
626 struct epic_txdesc *txd;
627 struct epic_descsoft *ds;
628 struct mbuf *m;
629 u_int32_t intstat;
630 int i, len, claimed = 0;
631
632 top:
633 /*
634 * Get the interrupt status from the EPIC.
635 */
636 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
637 if ((intstat & INTSTAT_INT_ACTV) == 0)
638 return (claimed);
639
640 claimed = 1;
641
642 /*
643 * Acknowledge the interrupt.
644 */
645 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
646 intstat & INTMASK);
647
648 /*
649 * Check for receive interrupts.
650 */
651 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
652 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
653 rxd = EPIC_CDRX(sc, i);
654 ds = EPIC_DSRX(sc, i);
655
656 EPIC_CDRXSYNC(sc, i,
657 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
658
659 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
660 /*
661 * We have processed all of the
662 * receive buffers.
663 */
664 break;
665 }
666
667 /*
668 * Make sure the packet arrived intact. If an error
669 * occurred, update stats and reset the descriptor.
670 * The buffer will be reused the next time the
671 * descriptor comes up in the ring.
672 */
673 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
674 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
675 printf("%s: CRC error\n",
676 sc->sc_dev.dv_xname);
677 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
678 printf("%s: alignment error\n",
679 sc->sc_dev.dv_xname);
680 ifp->if_ierrors++;
681 EPIC_INIT_RXDESC(sc, i);
682 continue;
683 }
684
685 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
686 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
687
688 /*
689 * The EPIC includes the CRC with every packet;
690 * trim it.
691 */
692 len = rxd->er_rxlength - ETHER_CRC_LEN;
693
694 if (len < sizeof(struct ether_header)) {
695 /*
696 * Runt packet; drop it now.
697 */
698 ifp->if_ierrors++;
699 EPIC_INIT_RXDESC(sc, i);
700 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
701 ds->ds_dmamap->dm_mapsize,
702 BUS_DMASYNC_PREREAD);
703 continue;
704 }
705
706 /*
707 * If the packet is small enough to fit in a
708 * single header mbuf, allocate one and copy
709 * the data into it. This greatly reduces
710 * memory consumption when we receive lots
711 * of small packets.
712 *
713 * Otherwise, we add a new buffer to the receive
714 * chain. If this fails, we drop the packet and
715 * recycle the old buffer.
716 */
717 if (epic_copy_small != 0 && len <= MHLEN) {
718 MGETHDR(m, M_DONTWAIT, MT_DATA);
719 if (m == NULL)
720 goto dropit;
721 memcpy(mtod(m, caddr_t),
722 mtod(ds->ds_mbuf, caddr_t), len);
723 EPIC_INIT_RXDESC(sc, i);
724 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
725 ds->ds_dmamap->dm_mapsize,
726 BUS_DMASYNC_PREREAD);
727 } else {
728 m = ds->ds_mbuf;
729 if (epic_add_rxbuf(sc, i) != 0) {
730 dropit:
731 ifp->if_ierrors++;
732 EPIC_INIT_RXDESC(sc, i);
733 bus_dmamap_sync(sc->sc_dmat,
734 ds->ds_dmamap, 0,
735 ds->ds_dmamap->dm_mapsize,
736 BUS_DMASYNC_PREREAD);
737 continue;
738 }
739 }
740
741 m->m_pkthdr.rcvif = ifp;
742 m->m_pkthdr.len = m->m_len = len;
743 eh = mtod(m, struct ether_header *);
744
745 #if NBPFILTER > 0
746 /*
747 * Pass this up to any BPF listeners, but only
748 * pass it up the stack if its for us.
749 */
750 if (ifp->if_bpf) {
751 bpf_mtap(ifp->if_bpf, m);
752 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
753 memcmp(LLADDR(ifp->if_sadl),
754 eh->ether_dhost,
755 ETHER_ADDR_LEN) != 0 &&
756 ETHER_IS_MULTICAST(eh->ether_dhost) == 0) {
757 m_freem(m);
758 continue;
759 }
760 }
761 #endif /* NPBFILTER > 0 */
762
763 /* Pass it on. */
764 (*ifp->if_input)(ifp, m);
765 ifp->if_ipackets++;
766 }
767
768 /* Update the recieve pointer. */
769 sc->sc_rxptr = i;
770
771 /*
772 * Check for receive queue underflow.
773 */
774 if (intstat & INTSTAT_RQE) {
775 printf("%s: receiver queue empty\n",
776 sc->sc_dev.dv_xname);
777 /*
778 * Ring is already built; just restart the
779 * receiver.
780 */
781 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
782 EPIC_CDRXADDR(sc, sc->sc_rxptr));
783 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
784 COMMAND_RXQUEUED | COMMAND_START_RX);
785 }
786 }
787
788 /*
789 * Check for transmission complete interrupts.
790 */
791 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
792 ifp->if_flags &= ~IFF_OACTIVE;
793 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
794 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
795 txd = EPIC_CDTX(sc, i);
796 ds = EPIC_DSTX(sc, i);
797
798 EPIC_CDTXSYNC(sc, i,
799 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
800
801 if (txd->et_txstatus & ET_TXSTAT_OWNER)
802 break;
803
804 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
805
806 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
807 0, ds->ds_dmamap->dm_mapsize,
808 BUS_DMASYNC_POSTWRITE);
809 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
810 m_freem(ds->ds_mbuf);
811 ds->ds_mbuf = NULL;
812
813 /*
814 * Check for errors and collisions.
815 */
816 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
817 ifp->if_oerrors++;
818 else
819 ifp->if_opackets++;
820 ifp->if_collisions +=
821 TXSTAT_COLLISIONS(txd->et_txstatus);
822 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
823 printf("%s: lost carrier\n",
824 sc->sc_dev.dv_xname);
825 }
826
827 /* Update the dirty transmit buffer pointer. */
828 sc->sc_txdirty = i;
829
830 /*
831 * Cancel the watchdog timer if there are no pending
832 * transmissions.
833 */
834 if (sc->sc_txpending == 0)
835 ifp->if_timer = 0;
836
837 /*
838 * Kick the transmitter after a DMA underrun.
839 */
840 if (intstat & INTSTAT_TXU) {
841 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
842 bus_space_write_4(sc->sc_st, sc->sc_sh,
843 EPIC_COMMAND, COMMAND_TXUGO);
844 if (sc->sc_txpending)
845 bus_space_write_4(sc->sc_st, sc->sc_sh,
846 EPIC_COMMAND, COMMAND_TXQUEUED);
847 }
848
849 /*
850 * Try to get more packets going.
851 */
852 epic_start(ifp);
853 }
854
855 /*
856 * Check for fatal interrupts.
857 */
858 if (intstat & INTSTAT_FATAL_INT) {
859 if (intstat & INTSTAT_PTA)
860 printf("%s: PCI target abort error\n",
861 sc->sc_dev.dv_xname);
862 else if (intstat & INTSTAT_PMA)
863 printf("%s: PCI master abort error\n",
864 sc->sc_dev.dv_xname);
865 else if (intstat & INTSTAT_APE)
866 printf("%s: PCI address parity error\n",
867 sc->sc_dev.dv_xname);
868 else if (intstat & INTSTAT_DPE)
869 printf("%s: PCI data parity error\n",
870 sc->sc_dev.dv_xname);
871 else
872 printf("%s: unknown fatal error\n",
873 sc->sc_dev.dv_xname);
874 (void) epic_init(sc);
875 }
876
877 /*
878 * Check for more interrupts.
879 */
880 goto top;
881 }
882
883 /*
884 * One second timer, used to tick the MII.
885 */
886 void
887 epic_tick(arg)
888 void *arg;
889 {
890 struct epic_softc *sc = arg;
891 int s;
892
893 s = splnet();
894 mii_tick(&sc->sc_mii);
895 splx(s);
896
897 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
898 }
899
900 /*
901 * Fixup the clock source on the EPIC.
902 */
903 void
904 epic_fixup_clock_source(sc)
905 struct epic_softc *sc;
906 {
907 int i;
908
909 /*
910 * According to SMC Application Note 7-15, the EPIC's clock
911 * source is incorrect following a reset. This manifests itself
912 * as failure to recognize when host software has written to
913 * a register on the EPIC. The appnote recommends issuing at
914 * least 16 consecutive writes to the CLOCK TEST bit to correctly
915 * configure the clock source.
916 */
917 for (i = 0; i < 16; i++)
918 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
919 TEST_CLOCKTEST);
920 }
921
922 /*
923 * Perform a soft reset on the EPIC.
924 */
925 void
926 epic_reset(sc)
927 struct epic_softc *sc;
928 {
929
930 epic_fixup_clock_source(sc);
931
932 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
933 delay(100);
934 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
935 delay(100);
936
937 epic_fixup_clock_source(sc);
938 }
939
940 /*
941 * Initialize the interface. Must be called at splnet().
942 */
943 int
944 epic_init(sc)
945 struct epic_softc *sc;
946 {
947 bus_space_tag_t st = sc->sc_st;
948 bus_space_handle_t sh = sc->sc_sh;
949 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
950 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
951 struct epic_txdesc *txd;
952 struct epic_descsoft *ds;
953 u_int32_t genctl, reg0;
954 int i, error = 0;
955
956 /*
957 * Cancel any pending I/O.
958 */
959 epic_stop(sc, 0);
960
961 /*
962 * Reset the EPIC to a known state.
963 */
964 epic_reset(sc);
965
966 /*
967 * Magical mystery initialization.
968 */
969 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
970
971 /*
972 * Initialize the EPIC genctl register:
973 *
974 * - 64 byte receive FIFO threshold
975 * - automatic advance to next receive frame
976 */
977 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
978 #if BYTE_ORDER == BIG_ENDIAN
979 genctl |= GENCTL_BIG_ENDIAN;
980 #endif
981 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
982
983 /*
984 * Reset the MII bus and PHY.
985 */
986 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
987 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
988 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
989 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
990 delay(100);
991 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
992 delay(100);
993 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
994
995 /*
996 * Initialize Ethernet address.
997 */
998 reg0 = enaddr[1] << 8 | enaddr[0];
999 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
1000 reg0 = enaddr[3] << 8 | enaddr[2];
1001 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
1002 reg0 = enaddr[5] << 8 | enaddr[4];
1003 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
1004
1005 /*
1006 * Initialize receive control. Remember the external buffer
1007 * size setting.
1008 */
1009 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
1010 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
1011 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1012 if (ifp->if_flags & IFF_PROMISC)
1013 reg0 |= RXCON_PROMISCMODE;
1014 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1015
1016 /* Set the current media. */
1017 mii_mediachg(&sc->sc_mii);
1018
1019 /* Set up the multicast hash table. */
1020 epic_set_mchash(sc);
1021
1022 /*
1023 * Initialize the transmit descriptor ring. txlast is initialized
1024 * to the end of the list so that it will wrap around to the first
1025 * descriptor when the first packet is transmitted.
1026 */
1027 for (i = 0; i < EPIC_NTXDESC; i++) {
1028 txd = EPIC_CDTX(sc, i);
1029 memset(txd, 0, sizeof(struct epic_txdesc));
1030 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1031 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1032 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1033 }
1034 sc->sc_txpending = 0;
1035 sc->sc_txdirty = 0;
1036 sc->sc_txlast = EPIC_NTXDESC - 1;
1037
1038 /*
1039 * Initialize the receive descriptor ring.
1040 */
1041 for (i = 0; i < EPIC_NRXDESC; i++) {
1042 ds = EPIC_DSRX(sc, i);
1043 if (ds->ds_mbuf == NULL) {
1044 if ((error = epic_add_rxbuf(sc, i)) != 0) {
1045 printf("%s: unable to allocate or map rx "
1046 "buffer %d error = %d\n",
1047 sc->sc_dev.dv_xname, i, error);
1048 /*
1049 * XXX Should attempt to run with fewer receive
1050 * XXX buffers instead of just failing.
1051 */
1052 epic_rxdrain(sc);
1053 goto out;
1054 }
1055 }
1056 }
1057 sc->sc_rxptr = 0;
1058
1059 /*
1060 * Initialize the interrupt mask and enable interrupts.
1061 */
1062 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1063 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1064
1065 /*
1066 * Give the transmit and receive rings to the EPIC.
1067 */
1068 bus_space_write_4(st, sh, EPIC_PTCDAR,
1069 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1070 bus_space_write_4(st, sh, EPIC_PRCDAR,
1071 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1072
1073 /*
1074 * Set the EPIC in motion.
1075 */
1076 bus_space_write_4(st, sh, EPIC_COMMAND,
1077 COMMAND_RXQUEUED | COMMAND_START_RX);
1078
1079 /*
1080 * ...all done!
1081 */
1082 ifp->if_flags |= IFF_RUNNING;
1083 ifp->if_flags &= ~IFF_OACTIVE;
1084
1085 /*
1086 * Start the one second clock.
1087 */
1088 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1089
1090 /*
1091 * Attempt to start output on the interface.
1092 */
1093 epic_start(ifp);
1094
1095 out:
1096 if (error)
1097 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1098 return (error);
1099 }
1100
1101 /*
1102 * Drain the receive queue.
1103 */
1104 void
1105 epic_rxdrain(sc)
1106 struct epic_softc *sc;
1107 {
1108 struct epic_descsoft *ds;
1109 int i;
1110
1111 for (i = 0; i < EPIC_NRXDESC; i++) {
1112 ds = EPIC_DSRX(sc, i);
1113 if (ds->ds_mbuf != NULL) {
1114 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1115 m_freem(ds->ds_mbuf);
1116 ds->ds_mbuf = NULL;
1117 }
1118 }
1119 }
1120
1121 /*
1122 * Stop transmission on the interface.
1123 */
1124 void
1125 epic_stop(sc, drain)
1126 struct epic_softc *sc;
1127 int drain;
1128 {
1129 bus_space_tag_t st = sc->sc_st;
1130 bus_space_handle_t sh = sc->sc_sh;
1131 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1132 struct epic_descsoft *ds;
1133 u_int32_t reg;
1134 int i;
1135
1136 /*
1137 * Stop the one second clock.
1138 */
1139 callout_stop(&sc->sc_mii_callout);
1140
1141 /* Down the MII. */
1142 mii_down(&sc->sc_mii);
1143
1144 /* Paranoia... */
1145 epic_fixup_clock_source(sc);
1146
1147 /*
1148 * Disable interrupts.
1149 */
1150 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1151 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1152 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1153
1154 /*
1155 * Stop the DMA engine and take the receiver off-line.
1156 */
1157 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1158 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1159
1160 /*
1161 * Release any queued transmit buffers.
1162 */
1163 for (i = 0; i < EPIC_NTXDESC; i++) {
1164 ds = EPIC_DSTX(sc, i);
1165 if (ds->ds_mbuf != NULL) {
1166 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1167 m_freem(ds->ds_mbuf);
1168 ds->ds_mbuf = NULL;
1169 }
1170 }
1171
1172 if (drain) {
1173 /*
1174 * Release the receive buffers.
1175 */
1176 epic_rxdrain(sc);
1177 }
1178
1179 /*
1180 * Mark the interface down and cancel the watchdog timer.
1181 */
1182 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1183 ifp->if_timer = 0;
1184 }
1185
1186 /*
1187 * Read the EPIC Serial EEPROM.
1188 */
1189 void
1190 epic_read_eeprom(sc, word, wordcnt, data)
1191 struct epic_softc *sc;
1192 int word, wordcnt;
1193 u_int16_t *data;
1194 {
1195 bus_space_tag_t st = sc->sc_st;
1196 bus_space_handle_t sh = sc->sc_sh;
1197 u_int16_t reg;
1198 int i, x;
1199
1200 #define EEPROM_WAIT_READY(st, sh) \
1201 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1202 /* nothing */
1203
1204 /*
1205 * Enable the EEPROM.
1206 */
1207 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1208 EEPROM_WAIT_READY(st, sh);
1209
1210 for (i = 0; i < wordcnt; i++) {
1211 /* Send CHIP SELECT for one clock tick. */
1212 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1213 EEPROM_WAIT_READY(st, sh);
1214
1215 /* Shift in the READ opcode. */
1216 for (x = 3; x > 0; x--) {
1217 reg = EECTL_ENABLE|EECTL_EECS;
1218 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1219 reg |= EECTL_EEDI;
1220 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1221 EEPROM_WAIT_READY(st, sh);
1222 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1223 EEPROM_WAIT_READY(st, sh);
1224 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1225 EEPROM_WAIT_READY(st, sh);
1226 }
1227
1228 /* Shift in address. */
1229 for (x = 6; x > 0; x--) {
1230 reg = EECTL_ENABLE|EECTL_EECS;
1231 if ((word + i) & (1 << (x - 1)))
1232 reg |= EECTL_EEDI;
1233 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1234 EEPROM_WAIT_READY(st, sh);
1235 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1236 EEPROM_WAIT_READY(st, sh);
1237 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1238 EEPROM_WAIT_READY(st, sh);
1239 }
1240
1241 /* Shift out data. */
1242 reg = EECTL_ENABLE|EECTL_EECS;
1243 data[i] = 0;
1244 for (x = 16; x > 0; x--) {
1245 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1246 EEPROM_WAIT_READY(st, sh);
1247 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1248 data[i] |= (1 << (x - 1));
1249 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1250 EEPROM_WAIT_READY(st, sh);
1251 }
1252
1253 /* Clear CHIP SELECT. */
1254 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1255 EEPROM_WAIT_READY(st, sh);
1256 }
1257
1258 /*
1259 * Disable the EEPROM.
1260 */
1261 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1262
1263 #undef EEPROM_WAIT_READY
1264 }
1265
1266 /*
1267 * Add a receive buffer to the indicated descriptor.
1268 */
1269 int
1270 epic_add_rxbuf(sc, idx)
1271 struct epic_softc *sc;
1272 int idx;
1273 {
1274 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1275 struct mbuf *m;
1276 int error;
1277
1278 MGETHDR(m, M_DONTWAIT, MT_DATA);
1279 if (m == NULL)
1280 return (ENOBUFS);
1281
1282 MCLGET(m, M_DONTWAIT);
1283 if ((m->m_flags & M_EXT) == 0) {
1284 m_freem(m);
1285 return (ENOBUFS);
1286 }
1287
1288 if (ds->ds_mbuf != NULL)
1289 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1290
1291 ds->ds_mbuf = m;
1292
1293 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1294 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1295 if (error) {
1296 printf("%s: can't load rx DMA map %d, error = %d\n",
1297 sc->sc_dev.dv_xname, idx, error);
1298 panic("epic_add_rxbuf"); /* XXX */
1299 }
1300
1301 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1302 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1303
1304 EPIC_INIT_RXDESC(sc, idx);
1305
1306 return (0);
1307 }
1308
1309 /*
1310 * Set the EPIC multicast hash table.
1311 *
1312 * NOTE: We rely on a recently-updated mii_media_active here!
1313 */
1314 void
1315 epic_set_mchash(sc)
1316 struct epic_softc *sc;
1317 {
1318 struct ethercom *ec = &sc->sc_ethercom;
1319 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1320 struct ether_multi *enm;
1321 struct ether_multistep step;
1322 u_int8_t *cp;
1323 u_int32_t crc, mchash[4];
1324 int len;
1325 static const u_int32_t crctab[] = {
1326 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1327 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1328 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1329 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1330 };
1331
1332 /*
1333 * Set up the multicast address filter by passing all multicast
1334 * addresses through a CRC generator, and then using the high-order
1335 * 6 bits as an index into the 64 bit multicast hash table (only
1336 * the lower 16 bits of each 32 bit multicast hash register are
1337 * valid). The high order bit selects the register, while the
1338 * rest of the bits select the bit within the register.
1339 */
1340
1341 if (ifp->if_flags & IFF_PROMISC)
1342 goto allmulti;
1343
1344 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1345 /* XXX hardware bug in 10Mbps mode. */
1346 goto allmulti;
1347 }
1348
1349 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1350
1351 ETHER_FIRST_MULTI(step, ec, enm);
1352 while (enm != NULL) {
1353 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1354 /*
1355 * We must listen to a range of multicast addresses.
1356 * For now, just accept all multicasts, rather than
1357 * trying to set only those filter bits needed to match
1358 * the range. (At this time, the only use of address
1359 * ranges is for IP multicast routing, for which the
1360 * range is big enough to require all bits set.)
1361 */
1362 goto allmulti;
1363 }
1364
1365 cp = enm->enm_addrlo;
1366 crc = 0xffffffff;
1367 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1368 crc ^= *cp++;
1369 crc = (crc >> 4) ^ crctab[crc & 0xf];
1370 crc = (crc >> 4) ^ crctab[crc & 0xf];
1371 }
1372 /* Just want the 6 most significant bits. */
1373 crc >>= 26;
1374
1375 /* Set the corresponding bit in the hash table. */
1376 mchash[crc >> 4] |= 1 << (crc & 0xf);
1377
1378 ETHER_NEXT_MULTI(step, enm);
1379 }
1380
1381 ifp->if_flags &= ~IFF_ALLMULTI;
1382 goto sethash;
1383
1384 allmulti:
1385 ifp->if_flags |= IFF_ALLMULTI;
1386 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1387
1388 sethash:
1389 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1390 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1391 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1392 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1393 }
1394
1395 /*
1396 * Wait for the MII to become ready.
1397 */
1398 int
1399 epic_mii_wait(sc, rw)
1400 struct epic_softc *sc;
1401 u_int32_t rw;
1402 {
1403 int i;
1404
1405 for (i = 0; i < 50; i++) {
1406 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1407 == 0)
1408 break;
1409 delay(2);
1410 }
1411 if (i == 50) {
1412 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1413 return (1);
1414 }
1415
1416 return (0);
1417 }
1418
1419 /*
1420 * Read from the MII.
1421 */
1422 int
1423 epic_mii_read(self, phy, reg)
1424 struct device *self;
1425 int phy, reg;
1426 {
1427 struct epic_softc *sc = (struct epic_softc *)self;
1428
1429 if (epic_mii_wait(sc, MMCTL_WRITE))
1430 return (0);
1431
1432 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1433 MMCTL_ARG(phy, reg, MMCTL_READ));
1434
1435 if (epic_mii_wait(sc, MMCTL_READ))
1436 return (0);
1437
1438 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1439 MMDATA_MASK);
1440 }
1441
1442 /*
1443 * Write to the MII.
1444 */
1445 void
1446 epic_mii_write(self, phy, reg, val)
1447 struct device *self;
1448 int phy, reg, val;
1449 {
1450 struct epic_softc *sc = (struct epic_softc *)self;
1451
1452 if (epic_mii_wait(sc, MMCTL_WRITE))
1453 return;
1454
1455 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1456 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1457 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1458 }
1459
1460 /*
1461 * Callback from PHY when media changes.
1462 */
1463 void
1464 epic_statchg(self)
1465 struct device *self;
1466 {
1467 struct epic_softc *sc = (struct epic_softc *)self;
1468 u_int32_t txcon;
1469
1470 /*
1471 * Update loopback bits in TXCON to reflect duplex mode.
1472 */
1473 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1474 if (sc->sc_mii.mii_media_active & IFM_FDX)
1475 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1476 else
1477 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1478 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1479
1480 /*
1481 * There is a multicast filter bug in 10Mbps mode. Kick the
1482 * multicast filter in case the speed changed.
1483 */
1484 epic_set_mchash(sc);
1485 }
1486
1487 /*
1488 * Callback from ifmedia to request current media status.
1489 */
1490 void
1491 epic_mediastatus(ifp, ifmr)
1492 struct ifnet *ifp;
1493 struct ifmediareq *ifmr;
1494 {
1495 struct epic_softc *sc = ifp->if_softc;
1496
1497 mii_pollstat(&sc->sc_mii);
1498 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1499 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1500 }
1501
1502 /*
1503 * Callback from ifmedia to request new media setting.
1504 */
1505 int
1506 epic_mediachange(ifp)
1507 struct ifnet *ifp;
1508 {
1509 struct epic_softc *sc = ifp->if_softc;
1510
1511 if (ifp->if_flags & IFF_UP)
1512 mii_mediachg(&sc->sc_mii);
1513 return (0);
1514 }
1515