smc83c170.c revision 1.48 1 /* $NetBSD: smc83c170.c,v 1.48 2001/07/23 17:20:04 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "bpfilter.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/mbuf.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/ioctl.h>
55 #include <sys/errno.h>
56 #include <sys/device.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #include <machine/bus.h>
70 #include <machine/intr.h>
71
72 #include <dev/mii/miivar.h>
73 #include <dev/mii/lxtphyreg.h>
74
75 #include <dev/ic/smc83c170reg.h>
76 #include <dev/ic/smc83c170var.h>
77
78 void epic_start __P((struct ifnet *));
79 void epic_watchdog __P((struct ifnet *));
80 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
81 int epic_init __P((struct ifnet *));
82 void epic_stop __P((struct ifnet *, int));
83
84 void epic_shutdown __P((void *));
85
86 void epic_reset __P((struct epic_softc *));
87 void epic_rxdrain __P((struct epic_softc *));
88 int epic_add_rxbuf __P((struct epic_softc *, int));
89 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
90 void epic_set_mchash __P((struct epic_softc *));
91 void epic_fixup_clock_source __P((struct epic_softc *));
92 int epic_mii_read __P((struct device *, int, int));
93 void epic_mii_write __P((struct device *, int, int, int));
94 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
95 void epic_tick __P((void *));
96
97 void epic_statchg __P((struct device *));
98 int epic_mediachange __P((struct ifnet *));
99 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
100
101 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
102 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
103
104 int epic_copy_small = 0;
105
106 /*
107 * Attach an EPIC interface to the system.
108 */
109 void
110 epic_attach(sc)
111 struct epic_softc *sc;
112 {
113 bus_space_tag_t st = sc->sc_st;
114 bus_space_handle_t sh = sc->sc_sh;
115 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
116 int i, rseg, error, miiflags;
117 bus_dma_segment_t seg;
118 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
119 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
120
121 callout_init(&sc->sc_mii_callout);
122
123 /*
124 * Allocate the control data structures, and create and load the
125 * DMA map for it.
126 */
127 if ((error = bus_dmamem_alloc(sc->sc_dmat,
128 sizeof(struct epic_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
129 BUS_DMA_NOWAIT)) != 0) {
130 printf("%s: unable to allocate control data, error = %d\n",
131 sc->sc_dev.dv_xname, error);
132 goto fail_0;
133 }
134
135 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
136 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
137 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
138 printf("%s: unable to map control data, error = %d\n",
139 sc->sc_dev.dv_xname, error);
140 goto fail_1;
141 }
142
143 if ((error = bus_dmamap_create(sc->sc_dmat,
144 sizeof(struct epic_control_data), 1,
145 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
146 &sc->sc_cddmamap)) != 0) {
147 printf("%s: unable to create control data DMA map, "
148 "error = %d\n", sc->sc_dev.dv_xname, error);
149 goto fail_2;
150 }
151
152 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
153 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
154 BUS_DMA_NOWAIT)) != 0) {
155 printf("%s: unable to load control data DMA map, error = %d\n",
156 sc->sc_dev.dv_xname, error);
157 goto fail_3;
158 }
159
160 /*
161 * Create the transmit buffer DMA maps.
162 */
163 for (i = 0; i < EPIC_NTXDESC; i++) {
164 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
165 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
166 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
167 printf("%s: unable to create tx DMA map %d, "
168 "error = %d\n", sc->sc_dev.dv_xname, i, error);
169 goto fail_4;
170 }
171 }
172
173 /*
174 * Create the receive buffer DMA maps.
175 */
176 for (i = 0; i < EPIC_NRXDESC; i++) {
177 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
178 MCLBYTES, 0, BUS_DMA_NOWAIT,
179 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
180 printf("%s: unable to create rx DMA map %d, "
181 "error = %d\n", sc->sc_dev.dv_xname, i, error);
182 goto fail_5;
183 }
184 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
185 }
186
187
188 /*
189 * Bring the chip out of low-power mode and reset it to a known state.
190 */
191 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
192 epic_reset(sc);
193
194 /*
195 * Read the Ethernet address from the EEPROM.
196 */
197 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
198 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
199 enaddr[i * 2] = myea[i] & 0xff;
200 enaddr[i * 2 + 1] = myea[i] >> 8;
201 }
202
203 /*
204 * ...and the device name.
205 */
206 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
207 mydevname);
208 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
209 devname[i * 2] = mydevname[i] & 0xff;
210 devname[i * 2 + 1] = mydevname[i] >> 8;
211 }
212
213 devname[sizeof(mydevname)] = '\0';
214 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
215 if (devname[i] == ' ')
216 devname[i] = '\0';
217 else
218 break;
219 }
220
221 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
222 devname, ether_sprintf(enaddr));
223
224 miiflags = 0;
225 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
226 miiflags |= MIIF_HAVEFIBER;
227
228 /*
229 * Initialize our media structures and probe the MII.
230 */
231 sc->sc_mii.mii_ifp = ifp;
232 sc->sc_mii.mii_readreg = epic_mii_read;
233 sc->sc_mii.mii_writereg = epic_mii_write;
234 sc->sc_mii.mii_statchg = epic_statchg;
235 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
236 epic_mediastatus);
237 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
238 MII_OFFSET_ANY, miiflags);
239 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
240 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
241 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
242 } else
243 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
244
245 if (sc->sc_hwflags & EPIC_HAS_BNC) {
246 /* use the next free media instance */
247 sc->sc_serinst = sc->sc_mii.mii_instance++;
248 ifmedia_add(&sc->sc_mii.mii_media,
249 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
250 sc->sc_serinst),
251 0, NULL);
252 printf("%s: 10base2/BNC\n", sc->sc_dev.dv_xname);
253 } else
254 sc->sc_serinst = -1;
255
256 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
257 ifp->if_softc = sc;
258 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 ifp->if_ioctl = epic_ioctl;
260 ifp->if_start = epic_start;
261 ifp->if_watchdog = epic_watchdog;
262 ifp->if_init = epic_init;
263 ifp->if_stop = epic_stop;
264 IFQ_SET_READY(&ifp->if_snd);
265
266 /*
267 * We can support 802.1Q VLAN-sized frames.
268 */
269 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
270
271 /*
272 * Attach the interface.
273 */
274 if_attach(ifp);
275 ether_ifattach(ifp, enaddr);
276
277 /*
278 * Make sure the interface is shutdown during reboot.
279 */
280 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
281 if (sc->sc_sdhook == NULL)
282 printf("%s: WARNING: unable to establish shutdown hook\n",
283 sc->sc_dev.dv_xname);
284 return;
285
286 /*
287 * Free any resources we've allocated during the failed attach
288 * attempt. Do this in reverse order and fall through.
289 */
290 fail_5:
291 for (i = 0; i < EPIC_NRXDESC; i++) {
292 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
293 bus_dmamap_destroy(sc->sc_dmat,
294 EPIC_DSRX(sc, i)->ds_dmamap);
295 }
296 fail_4:
297 for (i = 0; i < EPIC_NTXDESC; i++) {
298 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
299 bus_dmamap_destroy(sc->sc_dmat,
300 EPIC_DSTX(sc, i)->ds_dmamap);
301 }
302 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
303 fail_3:
304 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
305 fail_2:
306 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
307 sizeof(struct epic_control_data));
308 fail_1:
309 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
310 fail_0:
311 return;
312 }
313
314 /*
315 * Shutdown hook. Make sure the interface is stopped at reboot.
316 */
317 void
318 epic_shutdown(arg)
319 void *arg;
320 {
321 struct epic_softc *sc = arg;
322
323 epic_stop(&sc->sc_ethercom.ec_if, 1);
324 }
325
326 /*
327 * Start packet transmission on the interface.
328 * [ifnet interface function]
329 */
330 void
331 epic_start(ifp)
332 struct ifnet *ifp;
333 {
334 struct epic_softc *sc = ifp->if_softc;
335 struct mbuf *m0, *m;
336 struct epic_txdesc *txd;
337 struct epic_descsoft *ds;
338 struct epic_fraglist *fr;
339 bus_dmamap_t dmamap;
340 int error, firsttx, nexttx, opending, seg;
341
342 /*
343 * Remember the previous txpending and the first transmit
344 * descriptor we use.
345 */
346 opending = sc->sc_txpending;
347 firsttx = EPIC_NEXTTX(sc->sc_txlast);
348
349 /*
350 * Loop through the send queue, setting up transmit descriptors
351 * until we drain the queue, or use up all available transmit
352 * descriptors.
353 */
354 while (sc->sc_txpending < EPIC_NTXDESC) {
355 /*
356 * Grab a packet off the queue.
357 */
358 IFQ_POLL(&ifp->if_snd, m0);
359 if (m0 == NULL)
360 break;
361 m = NULL;
362
363 /*
364 * Get the last and next available transmit descriptor.
365 */
366 nexttx = EPIC_NEXTTX(sc->sc_txlast);
367 txd = EPIC_CDTX(sc, nexttx);
368 fr = EPIC_CDFL(sc, nexttx);
369 ds = EPIC_DSTX(sc, nexttx);
370 dmamap = ds->ds_dmamap;
371
372 /*
373 * Load the DMA map. If this fails, the packet either
374 * didn't fit in the alloted number of frags, or we were
375 * short on resources. In this case, we'll copy and try
376 * again.
377 */
378 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
379 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
380 MGETHDR(m, M_DONTWAIT, MT_DATA);
381 if (m == NULL) {
382 printf("%s: unable to allocate Tx mbuf\n",
383 sc->sc_dev.dv_xname);
384 break;
385 }
386 if (m0->m_pkthdr.len > MHLEN) {
387 MCLGET(m, M_DONTWAIT);
388 if ((m->m_flags & M_EXT) == 0) {
389 printf("%s: unable to allocate Tx "
390 "cluster\n", sc->sc_dev.dv_xname);
391 m_freem(m);
392 break;
393 }
394 }
395 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
396 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
397 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
398 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
399 if (error) {
400 printf("%s: unable to load Tx buffer, "
401 "error = %d\n", sc->sc_dev.dv_xname, error);
402 break;
403 }
404 }
405 IFQ_DEQUEUE(&ifp->if_snd, m0);
406 if (m != NULL) {
407 m_freem(m0);
408 m0 = m;
409 }
410
411 /* Initialize the fraglist. */
412 fr->ef_nfrags = dmamap->dm_nsegs;
413 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
414 fr->ef_frags[seg].ef_addr =
415 dmamap->dm_segs[seg].ds_addr;
416 fr->ef_frags[seg].ef_length =
417 dmamap->dm_segs[seg].ds_len;
418 }
419
420 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
421
422 /* Sync the DMA map. */
423 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
424 BUS_DMASYNC_PREWRITE);
425
426 /*
427 * Store a pointer to the packet so we can free it later.
428 */
429 ds->ds_mbuf = m0;
430
431 /*
432 * Fill in the transmit descriptor. The EPIC doesn't
433 * auto-pad, so we have to do this ourselves.
434 */
435 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
436 txd->et_txlength = max(m0->m_pkthdr.len,
437 ETHER_MIN_LEN - ETHER_CRC_LEN);
438
439 /*
440 * If this is the first descriptor we're enqueueing,
441 * don't give it to the EPIC yet. That could cause
442 * a race condition. We'll do it below.
443 */
444 if (nexttx == firsttx)
445 txd->et_txstatus = 0;
446 else
447 txd->et_txstatus = ET_TXSTAT_OWNER;
448
449 EPIC_CDTXSYNC(sc, nexttx,
450 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
451
452 /* Advance the tx pointer. */
453 sc->sc_txpending++;
454 sc->sc_txlast = nexttx;
455
456 #if NBPFILTER > 0
457 /*
458 * Pass the packet to any BPF listeners.
459 */
460 if (ifp->if_bpf)
461 bpf_mtap(ifp->if_bpf, m0);
462 #endif
463 }
464
465 if (sc->sc_txpending == EPIC_NTXDESC) {
466 /* No more slots left; notify upper layer. */
467 ifp->if_flags |= IFF_OACTIVE;
468 }
469
470 if (sc->sc_txpending != opending) {
471 /*
472 * We enqueued packets. If the transmitter was idle,
473 * reset the txdirty pointer.
474 */
475 if (opending == 0)
476 sc->sc_txdirty = firsttx;
477
478 /*
479 * Cause a transmit interrupt to happen on the
480 * last packet we enqueued.
481 */
482 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
483 EPIC_CDTXSYNC(sc, sc->sc_txlast,
484 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
485
486 /*
487 * The entire packet chain is set up. Give the
488 * first descriptor to the EPIC now.
489 */
490 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
491 EPIC_CDTXSYNC(sc, firsttx,
492 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
493
494 /* Start the transmitter. */
495 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
496 COMMAND_TXQUEUED);
497
498 /* Set a watchdog timer in case the chip flakes out. */
499 ifp->if_timer = 5;
500 }
501 }
502
503 /*
504 * Watchdog timer handler.
505 * [ifnet interface function]
506 */
507 void
508 epic_watchdog(ifp)
509 struct ifnet *ifp;
510 {
511 struct epic_softc *sc = ifp->if_softc;
512
513 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
514 ifp->if_oerrors++;
515
516 (void) epic_init(ifp);
517 }
518
519 /*
520 * Handle control requests from the operator.
521 * [ifnet interface function]
522 */
523 int
524 epic_ioctl(ifp, cmd, data)
525 struct ifnet *ifp;
526 u_long cmd;
527 caddr_t data;
528 {
529 struct epic_softc *sc = ifp->if_softc;
530 struct ifreq *ifr = (struct ifreq *)data;
531 int s, error;
532
533 s = splnet();
534
535 switch (cmd) {
536 case SIOCSIFMEDIA:
537 case SIOCGIFMEDIA:
538 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
539 break;
540
541 default:
542 error = ether_ioctl(ifp, cmd, data);
543 if (error == ENETRESET) {
544 /*
545 * Multicast list has changed; set the hardware filter
546 * accordingly. Update our idea of the current media;
547 * epic_set_mchash() needs to know what it is.
548 */
549 mii_pollstat(&sc->sc_mii);
550 epic_set_mchash(sc);
551 error = 0;
552 }
553 break;
554 }
555
556 splx(s);
557 return (error);
558 }
559
560 /*
561 * Interrupt handler.
562 */
563 int
564 epic_intr(arg)
565 void *arg;
566 {
567 struct epic_softc *sc = arg;
568 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
569 struct epic_rxdesc *rxd;
570 struct epic_txdesc *txd;
571 struct epic_descsoft *ds;
572 struct mbuf *m;
573 u_int32_t intstat;
574 int i, len, claimed = 0;
575
576 top:
577 /*
578 * Get the interrupt status from the EPIC.
579 */
580 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
581 if ((intstat & INTSTAT_INT_ACTV) == 0)
582 return (claimed);
583
584 claimed = 1;
585
586 /*
587 * Acknowledge the interrupt.
588 */
589 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
590 intstat & INTMASK);
591
592 /*
593 * Check for receive interrupts.
594 */
595 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
596 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
597 rxd = EPIC_CDRX(sc, i);
598 ds = EPIC_DSRX(sc, i);
599
600 EPIC_CDRXSYNC(sc, i,
601 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
602
603 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
604 /*
605 * We have processed all of the
606 * receive buffers.
607 */
608 break;
609 }
610
611 /*
612 * Make sure the packet arrived intact. If an error
613 * occurred, update stats and reset the descriptor.
614 * The buffer will be reused the next time the
615 * descriptor comes up in the ring.
616 */
617 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
618 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
619 printf("%s: CRC error\n",
620 sc->sc_dev.dv_xname);
621 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
622 printf("%s: alignment error\n",
623 sc->sc_dev.dv_xname);
624 ifp->if_ierrors++;
625 EPIC_INIT_RXDESC(sc, i);
626 continue;
627 }
628
629 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
630 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
631
632 /*
633 * The EPIC includes the CRC with every packet.
634 */
635 len = rxd->er_rxlength;
636
637 if (len < sizeof(struct ether_header)) {
638 /*
639 * Runt packet; drop it now.
640 */
641 ifp->if_ierrors++;
642 EPIC_INIT_RXDESC(sc, i);
643 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
644 ds->ds_dmamap->dm_mapsize,
645 BUS_DMASYNC_PREREAD);
646 continue;
647 }
648
649 /*
650 * If the packet is small enough to fit in a
651 * single header mbuf, allocate one and copy
652 * the data into it. This greatly reduces
653 * memory consumption when we receive lots
654 * of small packets.
655 *
656 * Otherwise, we add a new buffer to the receive
657 * chain. If this fails, we drop the packet and
658 * recycle the old buffer.
659 */
660 if (epic_copy_small != 0 && len <= MHLEN) {
661 MGETHDR(m, M_DONTWAIT, MT_DATA);
662 if (m == NULL)
663 goto dropit;
664 memcpy(mtod(m, caddr_t),
665 mtod(ds->ds_mbuf, caddr_t), len);
666 EPIC_INIT_RXDESC(sc, i);
667 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
668 ds->ds_dmamap->dm_mapsize,
669 BUS_DMASYNC_PREREAD);
670 } else {
671 m = ds->ds_mbuf;
672 if (epic_add_rxbuf(sc, i) != 0) {
673 dropit:
674 ifp->if_ierrors++;
675 EPIC_INIT_RXDESC(sc, i);
676 bus_dmamap_sync(sc->sc_dmat,
677 ds->ds_dmamap, 0,
678 ds->ds_dmamap->dm_mapsize,
679 BUS_DMASYNC_PREREAD);
680 continue;
681 }
682 }
683
684 m->m_flags |= M_HASFCS;
685 m->m_pkthdr.rcvif = ifp;
686 m->m_pkthdr.len = m->m_len = len;
687
688 #if NBPFILTER > 0
689 /*
690 * Pass this up to any BPF listeners, but only
691 * pass it up the stack if its for us.
692 */
693 if (ifp->if_bpf)
694 bpf_mtap(ifp->if_bpf, m);
695 #endif
696
697 /* Pass it on. */
698 (*ifp->if_input)(ifp, m);
699 ifp->if_ipackets++;
700 }
701
702 /* Update the receive pointer. */
703 sc->sc_rxptr = i;
704
705 /*
706 * Check for receive queue underflow.
707 */
708 if (intstat & INTSTAT_RQE) {
709 printf("%s: receiver queue empty\n",
710 sc->sc_dev.dv_xname);
711 /*
712 * Ring is already built; just restart the
713 * receiver.
714 */
715 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
716 EPIC_CDRXADDR(sc, sc->sc_rxptr));
717 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
718 COMMAND_RXQUEUED | COMMAND_START_RX);
719 }
720 }
721
722 /*
723 * Check for transmission complete interrupts.
724 */
725 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
726 ifp->if_flags &= ~IFF_OACTIVE;
727 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
728 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
729 txd = EPIC_CDTX(sc, i);
730 ds = EPIC_DSTX(sc, i);
731
732 EPIC_CDTXSYNC(sc, i,
733 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
734
735 if (txd->et_txstatus & ET_TXSTAT_OWNER)
736 break;
737
738 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
739
740 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
741 0, ds->ds_dmamap->dm_mapsize,
742 BUS_DMASYNC_POSTWRITE);
743 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
744 m_freem(ds->ds_mbuf);
745 ds->ds_mbuf = NULL;
746
747 /*
748 * Check for errors and collisions.
749 */
750 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
751 ifp->if_oerrors++;
752 else
753 ifp->if_opackets++;
754 ifp->if_collisions +=
755 TXSTAT_COLLISIONS(txd->et_txstatus);
756 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
757 printf("%s: lost carrier\n",
758 sc->sc_dev.dv_xname);
759 }
760
761 /* Update the dirty transmit buffer pointer. */
762 sc->sc_txdirty = i;
763
764 /*
765 * Cancel the watchdog timer if there are no pending
766 * transmissions.
767 */
768 if (sc->sc_txpending == 0)
769 ifp->if_timer = 0;
770
771 /*
772 * Kick the transmitter after a DMA underrun.
773 */
774 if (intstat & INTSTAT_TXU) {
775 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
776 bus_space_write_4(sc->sc_st, sc->sc_sh,
777 EPIC_COMMAND, COMMAND_TXUGO);
778 if (sc->sc_txpending)
779 bus_space_write_4(sc->sc_st, sc->sc_sh,
780 EPIC_COMMAND, COMMAND_TXQUEUED);
781 }
782
783 /*
784 * Try to get more packets going.
785 */
786 epic_start(ifp);
787 }
788
789 /*
790 * Check for fatal interrupts.
791 */
792 if (intstat & INTSTAT_FATAL_INT) {
793 if (intstat & INTSTAT_PTA)
794 printf("%s: PCI target abort error\n",
795 sc->sc_dev.dv_xname);
796 else if (intstat & INTSTAT_PMA)
797 printf("%s: PCI master abort error\n",
798 sc->sc_dev.dv_xname);
799 else if (intstat & INTSTAT_APE)
800 printf("%s: PCI address parity error\n",
801 sc->sc_dev.dv_xname);
802 else if (intstat & INTSTAT_DPE)
803 printf("%s: PCI data parity error\n",
804 sc->sc_dev.dv_xname);
805 else
806 printf("%s: unknown fatal error\n",
807 sc->sc_dev.dv_xname);
808 (void) epic_init(ifp);
809 }
810
811 /*
812 * Check for more interrupts.
813 */
814 goto top;
815 }
816
817 /*
818 * One second timer, used to tick the MII.
819 */
820 void
821 epic_tick(arg)
822 void *arg;
823 {
824 struct epic_softc *sc = arg;
825 int s;
826
827 s = splnet();
828 mii_tick(&sc->sc_mii);
829 splx(s);
830
831 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
832 }
833
834 /*
835 * Fixup the clock source on the EPIC.
836 */
837 void
838 epic_fixup_clock_source(sc)
839 struct epic_softc *sc;
840 {
841 int i;
842
843 /*
844 * According to SMC Application Note 7-15, the EPIC's clock
845 * source is incorrect following a reset. This manifests itself
846 * as failure to recognize when host software has written to
847 * a register on the EPIC. The appnote recommends issuing at
848 * least 16 consecutive writes to the CLOCK TEST bit to correctly
849 * configure the clock source.
850 */
851 for (i = 0; i < 16; i++)
852 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
853 TEST_CLOCKTEST);
854 }
855
856 /*
857 * Perform a soft reset on the EPIC.
858 */
859 void
860 epic_reset(sc)
861 struct epic_softc *sc;
862 {
863
864 epic_fixup_clock_source(sc);
865
866 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
867 delay(100);
868 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
869 delay(100);
870
871 epic_fixup_clock_source(sc);
872 }
873
874 /*
875 * Initialize the interface. Must be called at splnet().
876 */
877 int
878 epic_init(ifp)
879 struct ifnet *ifp;
880 {
881 struct epic_softc *sc = ifp->if_softc;
882 bus_space_tag_t st = sc->sc_st;
883 bus_space_handle_t sh = sc->sc_sh;
884 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
885 struct epic_txdesc *txd;
886 struct epic_descsoft *ds;
887 u_int32_t genctl, reg0;
888 int i, error = 0;
889
890 /*
891 * Cancel any pending I/O.
892 */
893 epic_stop(ifp, 0);
894
895 /*
896 * Reset the EPIC to a known state.
897 */
898 epic_reset(sc);
899
900 /*
901 * Magical mystery initialization.
902 */
903 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
904
905 /*
906 * Initialize the EPIC genctl register:
907 *
908 * - 64 byte receive FIFO threshold
909 * - automatic advance to next receive frame
910 */
911 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
912 #if BYTE_ORDER == BIG_ENDIAN
913 genctl |= GENCTL_BIG_ENDIAN;
914 #endif
915 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
916
917 /*
918 * Reset the MII bus and PHY.
919 */
920 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
921 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
922 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
923 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
924 delay(100);
925 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
926 delay(1000);
927 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
928
929 /*
930 * Initialize Ethernet address.
931 */
932 reg0 = enaddr[1] << 8 | enaddr[0];
933 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
934 reg0 = enaddr[3] << 8 | enaddr[2];
935 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
936 reg0 = enaddr[5] << 8 | enaddr[4];
937 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
938
939 /*
940 * Initialize receive control. Remember the external buffer
941 * size setting.
942 */
943 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
944 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
945 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
946 if (ifp->if_flags & IFF_PROMISC)
947 reg0 |= RXCON_PROMISCMODE;
948 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
949
950 /* Set the current media. */
951 epic_mediachange(ifp);
952
953 /* Set up the multicast hash table. */
954 epic_set_mchash(sc);
955
956 /*
957 * Initialize the transmit descriptor ring. txlast is initialized
958 * to the end of the list so that it will wrap around to the first
959 * descriptor when the first packet is transmitted.
960 */
961 for (i = 0; i < EPIC_NTXDESC; i++) {
962 txd = EPIC_CDTX(sc, i);
963 memset(txd, 0, sizeof(struct epic_txdesc));
964 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
965 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
966 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
967 }
968 sc->sc_txpending = 0;
969 sc->sc_txdirty = 0;
970 sc->sc_txlast = EPIC_NTXDESC - 1;
971
972 /*
973 * Initialize the receive descriptor ring.
974 */
975 for (i = 0; i < EPIC_NRXDESC; i++) {
976 ds = EPIC_DSRX(sc, i);
977 if (ds->ds_mbuf == NULL) {
978 if ((error = epic_add_rxbuf(sc, i)) != 0) {
979 printf("%s: unable to allocate or map rx "
980 "buffer %d error = %d\n",
981 sc->sc_dev.dv_xname, i, error);
982 /*
983 * XXX Should attempt to run with fewer receive
984 * XXX buffers instead of just failing.
985 */
986 epic_rxdrain(sc);
987 goto out;
988 }
989 } else
990 EPIC_INIT_RXDESC(sc, i);
991 }
992 sc->sc_rxptr = 0;
993
994 /*
995 * Initialize the interrupt mask and enable interrupts.
996 */
997 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
998 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
999
1000 /*
1001 * Give the transmit and receive rings to the EPIC.
1002 */
1003 bus_space_write_4(st, sh, EPIC_PTCDAR,
1004 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1005 bus_space_write_4(st, sh, EPIC_PRCDAR,
1006 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1007
1008 /*
1009 * Set the EPIC in motion.
1010 */
1011 bus_space_write_4(st, sh, EPIC_COMMAND,
1012 COMMAND_RXQUEUED | COMMAND_START_RX);
1013
1014 /*
1015 * ...all done!
1016 */
1017 ifp->if_flags |= IFF_RUNNING;
1018 ifp->if_flags &= ~IFF_OACTIVE;
1019
1020 /*
1021 * Start the one second clock.
1022 */
1023 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1024
1025 /*
1026 * Attempt to start output on the interface.
1027 */
1028 epic_start(ifp);
1029
1030 out:
1031 if (error)
1032 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1033 return (error);
1034 }
1035
1036 /*
1037 * Drain the receive queue.
1038 */
1039 void
1040 epic_rxdrain(sc)
1041 struct epic_softc *sc;
1042 {
1043 struct epic_descsoft *ds;
1044 int i;
1045
1046 for (i = 0; i < EPIC_NRXDESC; i++) {
1047 ds = EPIC_DSRX(sc, i);
1048 if (ds->ds_mbuf != NULL) {
1049 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1050 m_freem(ds->ds_mbuf);
1051 ds->ds_mbuf = NULL;
1052 }
1053 }
1054 }
1055
1056 /*
1057 * Stop transmission on the interface.
1058 */
1059 void
1060 epic_stop(ifp, disable)
1061 struct ifnet *ifp;
1062 int disable;
1063 {
1064 struct epic_softc *sc = ifp->if_softc;
1065 bus_space_tag_t st = sc->sc_st;
1066 bus_space_handle_t sh = sc->sc_sh;
1067 struct epic_descsoft *ds;
1068 u_int32_t reg;
1069 int i;
1070
1071 /*
1072 * Stop the one second clock.
1073 */
1074 callout_stop(&sc->sc_mii_callout);
1075
1076 /* Down the MII. */
1077 mii_down(&sc->sc_mii);
1078
1079 /* Paranoia... */
1080 epic_fixup_clock_source(sc);
1081
1082 /*
1083 * Disable interrupts.
1084 */
1085 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1086 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1087 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1088
1089 /*
1090 * Stop the DMA engine and take the receiver off-line.
1091 */
1092 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1093 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1094
1095 /*
1096 * Release any queued transmit buffers.
1097 */
1098 for (i = 0; i < EPIC_NTXDESC; i++) {
1099 ds = EPIC_DSTX(sc, i);
1100 if (ds->ds_mbuf != NULL) {
1101 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1102 m_freem(ds->ds_mbuf);
1103 ds->ds_mbuf = NULL;
1104 }
1105 }
1106
1107 if (disable)
1108 epic_rxdrain(sc);
1109
1110 /*
1111 * Mark the interface down and cancel the watchdog timer.
1112 */
1113 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1114 ifp->if_timer = 0;
1115 }
1116
1117 /*
1118 * Read the EPIC Serial EEPROM.
1119 */
1120 void
1121 epic_read_eeprom(sc, word, wordcnt, data)
1122 struct epic_softc *sc;
1123 int word, wordcnt;
1124 u_int16_t *data;
1125 {
1126 bus_space_tag_t st = sc->sc_st;
1127 bus_space_handle_t sh = sc->sc_sh;
1128 u_int16_t reg;
1129 int i, x;
1130
1131 #define EEPROM_WAIT_READY(st, sh) \
1132 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1133 /* nothing */
1134
1135 /*
1136 * Enable the EEPROM.
1137 */
1138 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1139 EEPROM_WAIT_READY(st, sh);
1140
1141 for (i = 0; i < wordcnt; i++) {
1142 /* Send CHIP SELECT for one clock tick. */
1143 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1144 EEPROM_WAIT_READY(st, sh);
1145
1146 /* Shift in the READ opcode. */
1147 for (x = 3; x > 0; x--) {
1148 reg = EECTL_ENABLE|EECTL_EECS;
1149 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1150 reg |= EECTL_EEDI;
1151 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1152 EEPROM_WAIT_READY(st, sh);
1153 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1154 EEPROM_WAIT_READY(st, sh);
1155 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1156 EEPROM_WAIT_READY(st, sh);
1157 }
1158
1159 /* Shift in address. */
1160 for (x = 6; x > 0; x--) {
1161 reg = EECTL_ENABLE|EECTL_EECS;
1162 if ((word + i) & (1 << (x - 1)))
1163 reg |= EECTL_EEDI;
1164 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1165 EEPROM_WAIT_READY(st, sh);
1166 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1167 EEPROM_WAIT_READY(st, sh);
1168 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1169 EEPROM_WAIT_READY(st, sh);
1170 }
1171
1172 /* Shift out data. */
1173 reg = EECTL_ENABLE|EECTL_EECS;
1174 data[i] = 0;
1175 for (x = 16; x > 0; x--) {
1176 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1177 EEPROM_WAIT_READY(st, sh);
1178 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1179 data[i] |= (1 << (x - 1));
1180 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1181 EEPROM_WAIT_READY(st, sh);
1182 }
1183
1184 /* Clear CHIP SELECT. */
1185 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1186 EEPROM_WAIT_READY(st, sh);
1187 }
1188
1189 /*
1190 * Disable the EEPROM.
1191 */
1192 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1193
1194 #undef EEPROM_WAIT_READY
1195 }
1196
1197 /*
1198 * Add a receive buffer to the indicated descriptor.
1199 */
1200 int
1201 epic_add_rxbuf(sc, idx)
1202 struct epic_softc *sc;
1203 int idx;
1204 {
1205 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1206 struct mbuf *m;
1207 int error;
1208
1209 MGETHDR(m, M_DONTWAIT, MT_DATA);
1210 if (m == NULL)
1211 return (ENOBUFS);
1212
1213 MCLGET(m, M_DONTWAIT);
1214 if ((m->m_flags & M_EXT) == 0) {
1215 m_freem(m);
1216 return (ENOBUFS);
1217 }
1218
1219 if (ds->ds_mbuf != NULL)
1220 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1221
1222 ds->ds_mbuf = m;
1223
1224 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1225 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1226 BUS_DMA_READ|BUS_DMA_NOWAIT);
1227 if (error) {
1228 printf("%s: can't load rx DMA map %d, error = %d\n",
1229 sc->sc_dev.dv_xname, idx, error);
1230 panic("epic_add_rxbuf"); /* XXX */
1231 }
1232
1233 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1234 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1235
1236 EPIC_INIT_RXDESC(sc, idx);
1237
1238 return (0);
1239 }
1240
1241 /*
1242 * Set the EPIC multicast hash table.
1243 *
1244 * NOTE: We rely on a recently-updated mii_media_active here!
1245 */
1246 void
1247 epic_set_mchash(sc)
1248 struct epic_softc *sc;
1249 {
1250 struct ethercom *ec = &sc->sc_ethercom;
1251 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1252 struct ether_multi *enm;
1253 struct ether_multistep step;
1254 u_int32_t hash, mchash[4];
1255
1256 /*
1257 * Set up the multicast address filter by passing all multicast
1258 * addresses through a CRC generator, and then using the low-order
1259 * 6 bits as an index into the 64 bit multicast hash table (only
1260 * the lower 16 bits of each 32 bit multicast hash register are
1261 * valid). The high order bits select the register, while the
1262 * rest of the bits select the bit within the register.
1263 */
1264
1265 if (ifp->if_flags & IFF_PROMISC)
1266 goto allmulti;
1267
1268 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1269 /* XXX hardware bug in 10Mbps mode. */
1270 goto allmulti;
1271 }
1272
1273 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1274
1275 ETHER_FIRST_MULTI(step, ec, enm);
1276 while (enm != NULL) {
1277 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1278 /*
1279 * We must listen to a range of multicast addresses.
1280 * For now, just accept all multicasts, rather than
1281 * trying to set only those filter bits needed to match
1282 * the range. (At this time, the only use of address
1283 * ranges is for IP multicast routing, for which the
1284 * range is big enough to require all bits set.)
1285 */
1286 goto allmulti;
1287 }
1288
1289 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1290 hash >>= 26;
1291
1292 /* Set the corresponding bit in the hash table. */
1293 mchash[hash >> 4] |= 1 << (hash & 0xf);
1294
1295 ETHER_NEXT_MULTI(step, enm);
1296 }
1297
1298 ifp->if_flags &= ~IFF_ALLMULTI;
1299 goto sethash;
1300
1301 allmulti:
1302 ifp->if_flags |= IFF_ALLMULTI;
1303 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1304
1305 sethash:
1306 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1307 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1308 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1309 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1310 }
1311
1312 /*
1313 * Wait for the MII to become ready.
1314 */
1315 int
1316 epic_mii_wait(sc, rw)
1317 struct epic_softc *sc;
1318 u_int32_t rw;
1319 {
1320 int i;
1321
1322 for (i = 0; i < 50; i++) {
1323 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1324 == 0)
1325 break;
1326 delay(2);
1327 }
1328 if (i == 50) {
1329 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1330 return (1);
1331 }
1332
1333 return (0);
1334 }
1335
1336 /*
1337 * Read from the MII.
1338 */
1339 int
1340 epic_mii_read(self, phy, reg)
1341 struct device *self;
1342 int phy, reg;
1343 {
1344 struct epic_softc *sc = (struct epic_softc *)self;
1345
1346 if (epic_mii_wait(sc, MMCTL_WRITE))
1347 return (0);
1348
1349 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1350 MMCTL_ARG(phy, reg, MMCTL_READ));
1351
1352 if (epic_mii_wait(sc, MMCTL_READ))
1353 return (0);
1354
1355 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1356 MMDATA_MASK);
1357 }
1358
1359 /*
1360 * Write to the MII.
1361 */
1362 void
1363 epic_mii_write(self, phy, reg, val)
1364 struct device *self;
1365 int phy, reg, val;
1366 {
1367 struct epic_softc *sc = (struct epic_softc *)self;
1368
1369 if (epic_mii_wait(sc, MMCTL_WRITE))
1370 return;
1371
1372 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1373 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1374 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1375 }
1376
1377 /*
1378 * Callback from PHY when media changes.
1379 */
1380 void
1381 epic_statchg(self)
1382 struct device *self;
1383 {
1384 struct epic_softc *sc = (struct epic_softc *)self;
1385 u_int32_t txcon, miicfg;
1386
1387 /*
1388 * Update loopback bits in TXCON to reflect duplex mode.
1389 */
1390 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1391 if (sc->sc_mii.mii_media_active & IFM_FDX)
1392 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1393 else
1394 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1395 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1396
1397 /* On some cards we need manualy set fullduplex led */
1398 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1399 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1400 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1401 miicfg |= MIICFG_ENABLE;
1402 else
1403 miicfg &= ~MIICFG_ENABLE;
1404 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1405 }
1406
1407 /*
1408 * There is a multicast filter bug in 10Mbps mode. Kick the
1409 * multicast filter in case the speed changed.
1410 */
1411 epic_set_mchash(sc);
1412 }
1413
1414 /*
1415 * Callback from ifmedia to request current media status.
1416 */
1417 void
1418 epic_mediastatus(ifp, ifmr)
1419 struct ifnet *ifp;
1420 struct ifmediareq *ifmr;
1421 {
1422 struct epic_softc *sc = ifp->if_softc;
1423
1424 mii_pollstat(&sc->sc_mii);
1425 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1426 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1427 }
1428
1429 /*
1430 * Callback from ifmedia to request new media setting.
1431 */
1432 int
1433 epic_mediachange(ifp)
1434 struct ifnet *ifp;
1435 {
1436 struct epic_softc *sc = ifp->if_softc;
1437 struct mii_data *mii = &sc->sc_mii;
1438 struct ifmedia *ifm = &mii->mii_media;
1439 int media = ifm->ifm_cur->ifm_media;
1440 u_int32_t miicfg;
1441 struct mii_softc *miisc;
1442 int cfg;
1443
1444 if (!(ifp->if_flags & IFF_UP))
1445 return (0);
1446
1447 if (IFM_INST(media) != sc->sc_serinst) {
1448 /* If we're not selecting serial interface, select MII mode */
1449 #ifdef EPICMEDIADEBUG
1450 printf("%s: parallel mode\n", ifp->if_xname);
1451 #endif
1452 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1453 miicfg &= ~MIICFG_SERMODEENA;
1454 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1455 }
1456
1457 mii_mediachg(mii);
1458
1459 if (IFM_INST(media) == sc->sc_serinst) {
1460 /* select serial interface */
1461 #ifdef EPICMEDIADEBUG
1462 printf("%s: serial mode\n", ifp->if_xname);
1463 #endif
1464 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1465 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1466 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1467
1468 /* There is no driver to fill this */
1469 mii->mii_media_active = media;
1470 mii->mii_media_status = 0;
1471
1472 epic_statchg(&sc->sc_dev);
1473 return (0);
1474 }
1475
1476 /* Lookup selected PHY */
1477 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1478 miisc = LIST_NEXT(miisc, mii_list)) {
1479 if (IFM_INST(media) == miisc->mii_inst)
1480 break;
1481 }
1482 if (!miisc) {
1483 printf("epic_mediachange: can't happen\n"); /* ??? panic */
1484 return (0);
1485 }
1486 #ifdef EPICMEDIADEBUG
1487 printf("%s: using phy %s\n", ifp->if_xname,
1488 miisc->mii_dev.dv_xname);
1489 #endif
1490
1491 if (miisc->mii_flags & MIIF_HAVEFIBER) {
1492 /* XXX XXX assume it's a Level1 - should check */
1493
1494 /* We have to powerup fiber tranceivers */
1495 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1496 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1497 #ifdef EPICMEDIADEBUG
1498 printf("%s: power up fiber\n", ifp->if_xname);
1499 #endif
1500 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1501 } else {
1502 #ifdef EPICMEDIADEBUG
1503 printf("%s: power down fiber\n", ifp->if_xname);
1504 #endif
1505 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1506 }
1507 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1508 }
1509
1510 return (0);
1511 }
1512