smc83c170.c revision 1.49 1 /* $NetBSD: smc83c170.c,v 1.49 2001/11/13 13:14:44 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: smc83c170.c,v 1.49 2001/11/13 13:14:44 lukem Exp $");
47
48 #include "bpfilter.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/callout.h>
53 #include <sys/mbuf.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/socket.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/device.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <net/if.h>
64 #include <net/if_dl.h>
65 #include <net/if_media.h>
66 #include <net/if_ether.h>
67
68 #if NBPFILTER > 0
69 #include <net/bpf.h>
70 #endif
71
72 #include <machine/bus.h>
73 #include <machine/intr.h>
74
75 #include <dev/mii/miivar.h>
76 #include <dev/mii/lxtphyreg.h>
77
78 #include <dev/ic/smc83c170reg.h>
79 #include <dev/ic/smc83c170var.h>
80
81 void epic_start __P((struct ifnet *));
82 void epic_watchdog __P((struct ifnet *));
83 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
84 int epic_init __P((struct ifnet *));
85 void epic_stop __P((struct ifnet *, int));
86
87 void epic_shutdown __P((void *));
88
89 void epic_reset __P((struct epic_softc *));
90 void epic_rxdrain __P((struct epic_softc *));
91 int epic_add_rxbuf __P((struct epic_softc *, int));
92 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
93 void epic_set_mchash __P((struct epic_softc *));
94 void epic_fixup_clock_source __P((struct epic_softc *));
95 int epic_mii_read __P((struct device *, int, int));
96 void epic_mii_write __P((struct device *, int, int, int));
97 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
98 void epic_tick __P((void *));
99
100 void epic_statchg __P((struct device *));
101 int epic_mediachange __P((struct ifnet *));
102 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
103
104 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
105 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
106
107 int epic_copy_small = 0;
108
109 /*
110 * Attach an EPIC interface to the system.
111 */
112 void
113 epic_attach(sc)
114 struct epic_softc *sc;
115 {
116 bus_space_tag_t st = sc->sc_st;
117 bus_space_handle_t sh = sc->sc_sh;
118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
119 int i, rseg, error, miiflags;
120 bus_dma_segment_t seg;
121 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
122 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
123
124 callout_init(&sc->sc_mii_callout);
125
126 /*
127 * Allocate the control data structures, and create and load the
128 * DMA map for it.
129 */
130 if ((error = bus_dmamem_alloc(sc->sc_dmat,
131 sizeof(struct epic_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
132 BUS_DMA_NOWAIT)) != 0) {
133 printf("%s: unable to allocate control data, error = %d\n",
134 sc->sc_dev.dv_xname, error);
135 goto fail_0;
136 }
137
138 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
139 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
140 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
141 printf("%s: unable to map control data, error = %d\n",
142 sc->sc_dev.dv_xname, error);
143 goto fail_1;
144 }
145
146 if ((error = bus_dmamap_create(sc->sc_dmat,
147 sizeof(struct epic_control_data), 1,
148 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
149 &sc->sc_cddmamap)) != 0) {
150 printf("%s: unable to create control data DMA map, "
151 "error = %d\n", sc->sc_dev.dv_xname, error);
152 goto fail_2;
153 }
154
155 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
156 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
157 BUS_DMA_NOWAIT)) != 0) {
158 printf("%s: unable to load control data DMA map, error = %d\n",
159 sc->sc_dev.dv_xname, error);
160 goto fail_3;
161 }
162
163 /*
164 * Create the transmit buffer DMA maps.
165 */
166 for (i = 0; i < EPIC_NTXDESC; i++) {
167 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
168 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
169 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
170 printf("%s: unable to create tx DMA map %d, "
171 "error = %d\n", sc->sc_dev.dv_xname, i, error);
172 goto fail_4;
173 }
174 }
175
176 /*
177 * Create the receive buffer DMA maps.
178 */
179 for (i = 0; i < EPIC_NRXDESC; i++) {
180 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
181 MCLBYTES, 0, BUS_DMA_NOWAIT,
182 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
183 printf("%s: unable to create rx DMA map %d, "
184 "error = %d\n", sc->sc_dev.dv_xname, i, error);
185 goto fail_5;
186 }
187 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
188 }
189
190
191 /*
192 * Bring the chip out of low-power mode and reset it to a known state.
193 */
194 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
195 epic_reset(sc);
196
197 /*
198 * Read the Ethernet address from the EEPROM.
199 */
200 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
201 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
202 enaddr[i * 2] = myea[i] & 0xff;
203 enaddr[i * 2 + 1] = myea[i] >> 8;
204 }
205
206 /*
207 * ...and the device name.
208 */
209 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
210 mydevname);
211 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
212 devname[i * 2] = mydevname[i] & 0xff;
213 devname[i * 2 + 1] = mydevname[i] >> 8;
214 }
215
216 devname[sizeof(mydevname)] = '\0';
217 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
218 if (devname[i] == ' ')
219 devname[i] = '\0';
220 else
221 break;
222 }
223
224 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
225 devname, ether_sprintf(enaddr));
226
227 miiflags = 0;
228 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
229 miiflags |= MIIF_HAVEFIBER;
230
231 /*
232 * Initialize our media structures and probe the MII.
233 */
234 sc->sc_mii.mii_ifp = ifp;
235 sc->sc_mii.mii_readreg = epic_mii_read;
236 sc->sc_mii.mii_writereg = epic_mii_write;
237 sc->sc_mii.mii_statchg = epic_statchg;
238 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
239 epic_mediastatus);
240 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
241 MII_OFFSET_ANY, miiflags);
242 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
243 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
244 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
245 } else
246 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
247
248 if (sc->sc_hwflags & EPIC_HAS_BNC) {
249 /* use the next free media instance */
250 sc->sc_serinst = sc->sc_mii.mii_instance++;
251 ifmedia_add(&sc->sc_mii.mii_media,
252 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
253 sc->sc_serinst),
254 0, NULL);
255 printf("%s: 10base2/BNC\n", sc->sc_dev.dv_xname);
256 } else
257 sc->sc_serinst = -1;
258
259 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
260 ifp->if_softc = sc;
261 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
262 ifp->if_ioctl = epic_ioctl;
263 ifp->if_start = epic_start;
264 ifp->if_watchdog = epic_watchdog;
265 ifp->if_init = epic_init;
266 ifp->if_stop = epic_stop;
267 IFQ_SET_READY(&ifp->if_snd);
268
269 /*
270 * We can support 802.1Q VLAN-sized frames.
271 */
272 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
273
274 /*
275 * Attach the interface.
276 */
277 if_attach(ifp);
278 ether_ifattach(ifp, enaddr);
279
280 /*
281 * Make sure the interface is shutdown during reboot.
282 */
283 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
284 if (sc->sc_sdhook == NULL)
285 printf("%s: WARNING: unable to establish shutdown hook\n",
286 sc->sc_dev.dv_xname);
287 return;
288
289 /*
290 * Free any resources we've allocated during the failed attach
291 * attempt. Do this in reverse order and fall through.
292 */
293 fail_5:
294 for (i = 0; i < EPIC_NRXDESC; i++) {
295 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
296 bus_dmamap_destroy(sc->sc_dmat,
297 EPIC_DSRX(sc, i)->ds_dmamap);
298 }
299 fail_4:
300 for (i = 0; i < EPIC_NTXDESC; i++) {
301 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
302 bus_dmamap_destroy(sc->sc_dmat,
303 EPIC_DSTX(sc, i)->ds_dmamap);
304 }
305 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
306 fail_3:
307 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
308 fail_2:
309 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
310 sizeof(struct epic_control_data));
311 fail_1:
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 fail_0:
314 return;
315 }
316
317 /*
318 * Shutdown hook. Make sure the interface is stopped at reboot.
319 */
320 void
321 epic_shutdown(arg)
322 void *arg;
323 {
324 struct epic_softc *sc = arg;
325
326 epic_stop(&sc->sc_ethercom.ec_if, 1);
327 }
328
329 /*
330 * Start packet transmission on the interface.
331 * [ifnet interface function]
332 */
333 void
334 epic_start(ifp)
335 struct ifnet *ifp;
336 {
337 struct epic_softc *sc = ifp->if_softc;
338 struct mbuf *m0, *m;
339 struct epic_txdesc *txd;
340 struct epic_descsoft *ds;
341 struct epic_fraglist *fr;
342 bus_dmamap_t dmamap;
343 int error, firsttx, nexttx, opending, seg;
344
345 /*
346 * Remember the previous txpending and the first transmit
347 * descriptor we use.
348 */
349 opending = sc->sc_txpending;
350 firsttx = EPIC_NEXTTX(sc->sc_txlast);
351
352 /*
353 * Loop through the send queue, setting up transmit descriptors
354 * until we drain the queue, or use up all available transmit
355 * descriptors.
356 */
357 while (sc->sc_txpending < EPIC_NTXDESC) {
358 /*
359 * Grab a packet off the queue.
360 */
361 IFQ_POLL(&ifp->if_snd, m0);
362 if (m0 == NULL)
363 break;
364 m = NULL;
365
366 /*
367 * Get the last and next available transmit descriptor.
368 */
369 nexttx = EPIC_NEXTTX(sc->sc_txlast);
370 txd = EPIC_CDTX(sc, nexttx);
371 fr = EPIC_CDFL(sc, nexttx);
372 ds = EPIC_DSTX(sc, nexttx);
373 dmamap = ds->ds_dmamap;
374
375 /*
376 * Load the DMA map. If this fails, the packet either
377 * didn't fit in the alloted number of frags, or we were
378 * short on resources. In this case, we'll copy and try
379 * again.
380 */
381 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
382 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
383 MGETHDR(m, M_DONTWAIT, MT_DATA);
384 if (m == NULL) {
385 printf("%s: unable to allocate Tx mbuf\n",
386 sc->sc_dev.dv_xname);
387 break;
388 }
389 if (m0->m_pkthdr.len > MHLEN) {
390 MCLGET(m, M_DONTWAIT);
391 if ((m->m_flags & M_EXT) == 0) {
392 printf("%s: unable to allocate Tx "
393 "cluster\n", sc->sc_dev.dv_xname);
394 m_freem(m);
395 break;
396 }
397 }
398 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
399 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
400 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
401 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
402 if (error) {
403 printf("%s: unable to load Tx buffer, "
404 "error = %d\n", sc->sc_dev.dv_xname, error);
405 break;
406 }
407 }
408 IFQ_DEQUEUE(&ifp->if_snd, m0);
409 if (m != NULL) {
410 m_freem(m0);
411 m0 = m;
412 }
413
414 /* Initialize the fraglist. */
415 fr->ef_nfrags = dmamap->dm_nsegs;
416 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
417 fr->ef_frags[seg].ef_addr =
418 dmamap->dm_segs[seg].ds_addr;
419 fr->ef_frags[seg].ef_length =
420 dmamap->dm_segs[seg].ds_len;
421 }
422
423 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
424
425 /* Sync the DMA map. */
426 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
427 BUS_DMASYNC_PREWRITE);
428
429 /*
430 * Store a pointer to the packet so we can free it later.
431 */
432 ds->ds_mbuf = m0;
433
434 /*
435 * Fill in the transmit descriptor. The EPIC doesn't
436 * auto-pad, so we have to do this ourselves.
437 */
438 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
439 txd->et_txlength = max(m0->m_pkthdr.len,
440 ETHER_MIN_LEN - ETHER_CRC_LEN);
441
442 /*
443 * If this is the first descriptor we're enqueueing,
444 * don't give it to the EPIC yet. That could cause
445 * a race condition. We'll do it below.
446 */
447 if (nexttx == firsttx)
448 txd->et_txstatus = 0;
449 else
450 txd->et_txstatus = ET_TXSTAT_OWNER;
451
452 EPIC_CDTXSYNC(sc, nexttx,
453 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
454
455 /* Advance the tx pointer. */
456 sc->sc_txpending++;
457 sc->sc_txlast = nexttx;
458
459 #if NBPFILTER > 0
460 /*
461 * Pass the packet to any BPF listeners.
462 */
463 if (ifp->if_bpf)
464 bpf_mtap(ifp->if_bpf, m0);
465 #endif
466 }
467
468 if (sc->sc_txpending == EPIC_NTXDESC) {
469 /* No more slots left; notify upper layer. */
470 ifp->if_flags |= IFF_OACTIVE;
471 }
472
473 if (sc->sc_txpending != opending) {
474 /*
475 * We enqueued packets. If the transmitter was idle,
476 * reset the txdirty pointer.
477 */
478 if (opending == 0)
479 sc->sc_txdirty = firsttx;
480
481 /*
482 * Cause a transmit interrupt to happen on the
483 * last packet we enqueued.
484 */
485 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
486 EPIC_CDTXSYNC(sc, sc->sc_txlast,
487 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
488
489 /*
490 * The entire packet chain is set up. Give the
491 * first descriptor to the EPIC now.
492 */
493 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
494 EPIC_CDTXSYNC(sc, firsttx,
495 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
496
497 /* Start the transmitter. */
498 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
499 COMMAND_TXQUEUED);
500
501 /* Set a watchdog timer in case the chip flakes out. */
502 ifp->if_timer = 5;
503 }
504 }
505
506 /*
507 * Watchdog timer handler.
508 * [ifnet interface function]
509 */
510 void
511 epic_watchdog(ifp)
512 struct ifnet *ifp;
513 {
514 struct epic_softc *sc = ifp->if_softc;
515
516 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
517 ifp->if_oerrors++;
518
519 (void) epic_init(ifp);
520 }
521
522 /*
523 * Handle control requests from the operator.
524 * [ifnet interface function]
525 */
526 int
527 epic_ioctl(ifp, cmd, data)
528 struct ifnet *ifp;
529 u_long cmd;
530 caddr_t data;
531 {
532 struct epic_softc *sc = ifp->if_softc;
533 struct ifreq *ifr = (struct ifreq *)data;
534 int s, error;
535
536 s = splnet();
537
538 switch (cmd) {
539 case SIOCSIFMEDIA:
540 case SIOCGIFMEDIA:
541 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
542 break;
543
544 default:
545 error = ether_ioctl(ifp, cmd, data);
546 if (error == ENETRESET) {
547 /*
548 * Multicast list has changed; set the hardware filter
549 * accordingly. Update our idea of the current media;
550 * epic_set_mchash() needs to know what it is.
551 */
552 mii_pollstat(&sc->sc_mii);
553 epic_set_mchash(sc);
554 error = 0;
555 }
556 break;
557 }
558
559 splx(s);
560 return (error);
561 }
562
563 /*
564 * Interrupt handler.
565 */
566 int
567 epic_intr(arg)
568 void *arg;
569 {
570 struct epic_softc *sc = arg;
571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
572 struct epic_rxdesc *rxd;
573 struct epic_txdesc *txd;
574 struct epic_descsoft *ds;
575 struct mbuf *m;
576 u_int32_t intstat;
577 int i, len, claimed = 0;
578
579 top:
580 /*
581 * Get the interrupt status from the EPIC.
582 */
583 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
584 if ((intstat & INTSTAT_INT_ACTV) == 0)
585 return (claimed);
586
587 claimed = 1;
588
589 /*
590 * Acknowledge the interrupt.
591 */
592 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
593 intstat & INTMASK);
594
595 /*
596 * Check for receive interrupts.
597 */
598 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
599 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
600 rxd = EPIC_CDRX(sc, i);
601 ds = EPIC_DSRX(sc, i);
602
603 EPIC_CDRXSYNC(sc, i,
604 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
605
606 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
607 /*
608 * We have processed all of the
609 * receive buffers.
610 */
611 break;
612 }
613
614 /*
615 * Make sure the packet arrived intact. If an error
616 * occurred, update stats and reset the descriptor.
617 * The buffer will be reused the next time the
618 * descriptor comes up in the ring.
619 */
620 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
621 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
622 printf("%s: CRC error\n",
623 sc->sc_dev.dv_xname);
624 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
625 printf("%s: alignment error\n",
626 sc->sc_dev.dv_xname);
627 ifp->if_ierrors++;
628 EPIC_INIT_RXDESC(sc, i);
629 continue;
630 }
631
632 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
633 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
634
635 /*
636 * The EPIC includes the CRC with every packet.
637 */
638 len = rxd->er_rxlength;
639
640 if (len < sizeof(struct ether_header)) {
641 /*
642 * Runt packet; drop it now.
643 */
644 ifp->if_ierrors++;
645 EPIC_INIT_RXDESC(sc, i);
646 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
647 ds->ds_dmamap->dm_mapsize,
648 BUS_DMASYNC_PREREAD);
649 continue;
650 }
651
652 /*
653 * If the packet is small enough to fit in a
654 * single header mbuf, allocate one and copy
655 * the data into it. This greatly reduces
656 * memory consumption when we receive lots
657 * of small packets.
658 *
659 * Otherwise, we add a new buffer to the receive
660 * chain. If this fails, we drop the packet and
661 * recycle the old buffer.
662 */
663 if (epic_copy_small != 0 && len <= MHLEN) {
664 MGETHDR(m, M_DONTWAIT, MT_DATA);
665 if (m == NULL)
666 goto dropit;
667 memcpy(mtod(m, caddr_t),
668 mtod(ds->ds_mbuf, caddr_t), len);
669 EPIC_INIT_RXDESC(sc, i);
670 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
671 ds->ds_dmamap->dm_mapsize,
672 BUS_DMASYNC_PREREAD);
673 } else {
674 m = ds->ds_mbuf;
675 if (epic_add_rxbuf(sc, i) != 0) {
676 dropit:
677 ifp->if_ierrors++;
678 EPIC_INIT_RXDESC(sc, i);
679 bus_dmamap_sync(sc->sc_dmat,
680 ds->ds_dmamap, 0,
681 ds->ds_dmamap->dm_mapsize,
682 BUS_DMASYNC_PREREAD);
683 continue;
684 }
685 }
686
687 m->m_flags |= M_HASFCS;
688 m->m_pkthdr.rcvif = ifp;
689 m->m_pkthdr.len = m->m_len = len;
690
691 #if NBPFILTER > 0
692 /*
693 * Pass this up to any BPF listeners, but only
694 * pass it up the stack if its for us.
695 */
696 if (ifp->if_bpf)
697 bpf_mtap(ifp->if_bpf, m);
698 #endif
699
700 /* Pass it on. */
701 (*ifp->if_input)(ifp, m);
702 ifp->if_ipackets++;
703 }
704
705 /* Update the receive pointer. */
706 sc->sc_rxptr = i;
707
708 /*
709 * Check for receive queue underflow.
710 */
711 if (intstat & INTSTAT_RQE) {
712 printf("%s: receiver queue empty\n",
713 sc->sc_dev.dv_xname);
714 /*
715 * Ring is already built; just restart the
716 * receiver.
717 */
718 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
719 EPIC_CDRXADDR(sc, sc->sc_rxptr));
720 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
721 COMMAND_RXQUEUED | COMMAND_START_RX);
722 }
723 }
724
725 /*
726 * Check for transmission complete interrupts.
727 */
728 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
729 ifp->if_flags &= ~IFF_OACTIVE;
730 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
731 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
732 txd = EPIC_CDTX(sc, i);
733 ds = EPIC_DSTX(sc, i);
734
735 EPIC_CDTXSYNC(sc, i,
736 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
737
738 if (txd->et_txstatus & ET_TXSTAT_OWNER)
739 break;
740
741 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
742
743 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
744 0, ds->ds_dmamap->dm_mapsize,
745 BUS_DMASYNC_POSTWRITE);
746 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
747 m_freem(ds->ds_mbuf);
748 ds->ds_mbuf = NULL;
749
750 /*
751 * Check for errors and collisions.
752 */
753 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
754 ifp->if_oerrors++;
755 else
756 ifp->if_opackets++;
757 ifp->if_collisions +=
758 TXSTAT_COLLISIONS(txd->et_txstatus);
759 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
760 printf("%s: lost carrier\n",
761 sc->sc_dev.dv_xname);
762 }
763
764 /* Update the dirty transmit buffer pointer. */
765 sc->sc_txdirty = i;
766
767 /*
768 * Cancel the watchdog timer if there are no pending
769 * transmissions.
770 */
771 if (sc->sc_txpending == 0)
772 ifp->if_timer = 0;
773
774 /*
775 * Kick the transmitter after a DMA underrun.
776 */
777 if (intstat & INTSTAT_TXU) {
778 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
779 bus_space_write_4(sc->sc_st, sc->sc_sh,
780 EPIC_COMMAND, COMMAND_TXUGO);
781 if (sc->sc_txpending)
782 bus_space_write_4(sc->sc_st, sc->sc_sh,
783 EPIC_COMMAND, COMMAND_TXQUEUED);
784 }
785
786 /*
787 * Try to get more packets going.
788 */
789 epic_start(ifp);
790 }
791
792 /*
793 * Check for fatal interrupts.
794 */
795 if (intstat & INTSTAT_FATAL_INT) {
796 if (intstat & INTSTAT_PTA)
797 printf("%s: PCI target abort error\n",
798 sc->sc_dev.dv_xname);
799 else if (intstat & INTSTAT_PMA)
800 printf("%s: PCI master abort error\n",
801 sc->sc_dev.dv_xname);
802 else if (intstat & INTSTAT_APE)
803 printf("%s: PCI address parity error\n",
804 sc->sc_dev.dv_xname);
805 else if (intstat & INTSTAT_DPE)
806 printf("%s: PCI data parity error\n",
807 sc->sc_dev.dv_xname);
808 else
809 printf("%s: unknown fatal error\n",
810 sc->sc_dev.dv_xname);
811 (void) epic_init(ifp);
812 }
813
814 /*
815 * Check for more interrupts.
816 */
817 goto top;
818 }
819
820 /*
821 * One second timer, used to tick the MII.
822 */
823 void
824 epic_tick(arg)
825 void *arg;
826 {
827 struct epic_softc *sc = arg;
828 int s;
829
830 s = splnet();
831 mii_tick(&sc->sc_mii);
832 splx(s);
833
834 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
835 }
836
837 /*
838 * Fixup the clock source on the EPIC.
839 */
840 void
841 epic_fixup_clock_source(sc)
842 struct epic_softc *sc;
843 {
844 int i;
845
846 /*
847 * According to SMC Application Note 7-15, the EPIC's clock
848 * source is incorrect following a reset. This manifests itself
849 * as failure to recognize when host software has written to
850 * a register on the EPIC. The appnote recommends issuing at
851 * least 16 consecutive writes to the CLOCK TEST bit to correctly
852 * configure the clock source.
853 */
854 for (i = 0; i < 16; i++)
855 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
856 TEST_CLOCKTEST);
857 }
858
859 /*
860 * Perform a soft reset on the EPIC.
861 */
862 void
863 epic_reset(sc)
864 struct epic_softc *sc;
865 {
866
867 epic_fixup_clock_source(sc);
868
869 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
870 delay(100);
871 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
872 delay(100);
873
874 epic_fixup_clock_source(sc);
875 }
876
877 /*
878 * Initialize the interface. Must be called at splnet().
879 */
880 int
881 epic_init(ifp)
882 struct ifnet *ifp;
883 {
884 struct epic_softc *sc = ifp->if_softc;
885 bus_space_tag_t st = sc->sc_st;
886 bus_space_handle_t sh = sc->sc_sh;
887 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
888 struct epic_txdesc *txd;
889 struct epic_descsoft *ds;
890 u_int32_t genctl, reg0;
891 int i, error = 0;
892
893 /*
894 * Cancel any pending I/O.
895 */
896 epic_stop(ifp, 0);
897
898 /*
899 * Reset the EPIC to a known state.
900 */
901 epic_reset(sc);
902
903 /*
904 * Magical mystery initialization.
905 */
906 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
907
908 /*
909 * Initialize the EPIC genctl register:
910 *
911 * - 64 byte receive FIFO threshold
912 * - automatic advance to next receive frame
913 */
914 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
915 #if BYTE_ORDER == BIG_ENDIAN
916 genctl |= GENCTL_BIG_ENDIAN;
917 #endif
918 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
919
920 /*
921 * Reset the MII bus and PHY.
922 */
923 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
924 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
925 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
926 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
927 delay(100);
928 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
929 delay(1000);
930 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
931
932 /*
933 * Initialize Ethernet address.
934 */
935 reg0 = enaddr[1] << 8 | enaddr[0];
936 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
937 reg0 = enaddr[3] << 8 | enaddr[2];
938 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
939 reg0 = enaddr[5] << 8 | enaddr[4];
940 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
941
942 /*
943 * Initialize receive control. Remember the external buffer
944 * size setting.
945 */
946 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
947 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
948 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
949 if (ifp->if_flags & IFF_PROMISC)
950 reg0 |= RXCON_PROMISCMODE;
951 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
952
953 /* Set the current media. */
954 epic_mediachange(ifp);
955
956 /* Set up the multicast hash table. */
957 epic_set_mchash(sc);
958
959 /*
960 * Initialize the transmit descriptor ring. txlast is initialized
961 * to the end of the list so that it will wrap around to the first
962 * descriptor when the first packet is transmitted.
963 */
964 for (i = 0; i < EPIC_NTXDESC; i++) {
965 txd = EPIC_CDTX(sc, i);
966 memset(txd, 0, sizeof(struct epic_txdesc));
967 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
968 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
969 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
970 }
971 sc->sc_txpending = 0;
972 sc->sc_txdirty = 0;
973 sc->sc_txlast = EPIC_NTXDESC - 1;
974
975 /*
976 * Initialize the receive descriptor ring.
977 */
978 for (i = 0; i < EPIC_NRXDESC; i++) {
979 ds = EPIC_DSRX(sc, i);
980 if (ds->ds_mbuf == NULL) {
981 if ((error = epic_add_rxbuf(sc, i)) != 0) {
982 printf("%s: unable to allocate or map rx "
983 "buffer %d error = %d\n",
984 sc->sc_dev.dv_xname, i, error);
985 /*
986 * XXX Should attempt to run with fewer receive
987 * XXX buffers instead of just failing.
988 */
989 epic_rxdrain(sc);
990 goto out;
991 }
992 } else
993 EPIC_INIT_RXDESC(sc, i);
994 }
995 sc->sc_rxptr = 0;
996
997 /*
998 * Initialize the interrupt mask and enable interrupts.
999 */
1000 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1001 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1002
1003 /*
1004 * Give the transmit and receive rings to the EPIC.
1005 */
1006 bus_space_write_4(st, sh, EPIC_PTCDAR,
1007 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1008 bus_space_write_4(st, sh, EPIC_PRCDAR,
1009 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1010
1011 /*
1012 * Set the EPIC in motion.
1013 */
1014 bus_space_write_4(st, sh, EPIC_COMMAND,
1015 COMMAND_RXQUEUED | COMMAND_START_RX);
1016
1017 /*
1018 * ...all done!
1019 */
1020 ifp->if_flags |= IFF_RUNNING;
1021 ifp->if_flags &= ~IFF_OACTIVE;
1022
1023 /*
1024 * Start the one second clock.
1025 */
1026 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1027
1028 /*
1029 * Attempt to start output on the interface.
1030 */
1031 epic_start(ifp);
1032
1033 out:
1034 if (error)
1035 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1036 return (error);
1037 }
1038
1039 /*
1040 * Drain the receive queue.
1041 */
1042 void
1043 epic_rxdrain(sc)
1044 struct epic_softc *sc;
1045 {
1046 struct epic_descsoft *ds;
1047 int i;
1048
1049 for (i = 0; i < EPIC_NRXDESC; i++) {
1050 ds = EPIC_DSRX(sc, i);
1051 if (ds->ds_mbuf != NULL) {
1052 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1053 m_freem(ds->ds_mbuf);
1054 ds->ds_mbuf = NULL;
1055 }
1056 }
1057 }
1058
1059 /*
1060 * Stop transmission on the interface.
1061 */
1062 void
1063 epic_stop(ifp, disable)
1064 struct ifnet *ifp;
1065 int disable;
1066 {
1067 struct epic_softc *sc = ifp->if_softc;
1068 bus_space_tag_t st = sc->sc_st;
1069 bus_space_handle_t sh = sc->sc_sh;
1070 struct epic_descsoft *ds;
1071 u_int32_t reg;
1072 int i;
1073
1074 /*
1075 * Stop the one second clock.
1076 */
1077 callout_stop(&sc->sc_mii_callout);
1078
1079 /* Down the MII. */
1080 mii_down(&sc->sc_mii);
1081
1082 /* Paranoia... */
1083 epic_fixup_clock_source(sc);
1084
1085 /*
1086 * Disable interrupts.
1087 */
1088 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1089 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1090 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1091
1092 /*
1093 * Stop the DMA engine and take the receiver off-line.
1094 */
1095 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1096 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1097
1098 /*
1099 * Release any queued transmit buffers.
1100 */
1101 for (i = 0; i < EPIC_NTXDESC; i++) {
1102 ds = EPIC_DSTX(sc, i);
1103 if (ds->ds_mbuf != NULL) {
1104 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1105 m_freem(ds->ds_mbuf);
1106 ds->ds_mbuf = NULL;
1107 }
1108 }
1109
1110 if (disable)
1111 epic_rxdrain(sc);
1112
1113 /*
1114 * Mark the interface down and cancel the watchdog timer.
1115 */
1116 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1117 ifp->if_timer = 0;
1118 }
1119
1120 /*
1121 * Read the EPIC Serial EEPROM.
1122 */
1123 void
1124 epic_read_eeprom(sc, word, wordcnt, data)
1125 struct epic_softc *sc;
1126 int word, wordcnt;
1127 u_int16_t *data;
1128 {
1129 bus_space_tag_t st = sc->sc_st;
1130 bus_space_handle_t sh = sc->sc_sh;
1131 u_int16_t reg;
1132 int i, x;
1133
1134 #define EEPROM_WAIT_READY(st, sh) \
1135 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1136 /* nothing */
1137
1138 /*
1139 * Enable the EEPROM.
1140 */
1141 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1142 EEPROM_WAIT_READY(st, sh);
1143
1144 for (i = 0; i < wordcnt; i++) {
1145 /* Send CHIP SELECT for one clock tick. */
1146 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1147 EEPROM_WAIT_READY(st, sh);
1148
1149 /* Shift in the READ opcode. */
1150 for (x = 3; x > 0; x--) {
1151 reg = EECTL_ENABLE|EECTL_EECS;
1152 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1153 reg |= EECTL_EEDI;
1154 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1155 EEPROM_WAIT_READY(st, sh);
1156 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1157 EEPROM_WAIT_READY(st, sh);
1158 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1159 EEPROM_WAIT_READY(st, sh);
1160 }
1161
1162 /* Shift in address. */
1163 for (x = 6; x > 0; x--) {
1164 reg = EECTL_ENABLE|EECTL_EECS;
1165 if ((word + i) & (1 << (x - 1)))
1166 reg |= EECTL_EEDI;
1167 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1168 EEPROM_WAIT_READY(st, sh);
1169 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1170 EEPROM_WAIT_READY(st, sh);
1171 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1172 EEPROM_WAIT_READY(st, sh);
1173 }
1174
1175 /* Shift out data. */
1176 reg = EECTL_ENABLE|EECTL_EECS;
1177 data[i] = 0;
1178 for (x = 16; x > 0; x--) {
1179 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1180 EEPROM_WAIT_READY(st, sh);
1181 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1182 data[i] |= (1 << (x - 1));
1183 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1184 EEPROM_WAIT_READY(st, sh);
1185 }
1186
1187 /* Clear CHIP SELECT. */
1188 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1189 EEPROM_WAIT_READY(st, sh);
1190 }
1191
1192 /*
1193 * Disable the EEPROM.
1194 */
1195 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1196
1197 #undef EEPROM_WAIT_READY
1198 }
1199
1200 /*
1201 * Add a receive buffer to the indicated descriptor.
1202 */
1203 int
1204 epic_add_rxbuf(sc, idx)
1205 struct epic_softc *sc;
1206 int idx;
1207 {
1208 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1209 struct mbuf *m;
1210 int error;
1211
1212 MGETHDR(m, M_DONTWAIT, MT_DATA);
1213 if (m == NULL)
1214 return (ENOBUFS);
1215
1216 MCLGET(m, M_DONTWAIT);
1217 if ((m->m_flags & M_EXT) == 0) {
1218 m_freem(m);
1219 return (ENOBUFS);
1220 }
1221
1222 if (ds->ds_mbuf != NULL)
1223 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1224
1225 ds->ds_mbuf = m;
1226
1227 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1228 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1229 BUS_DMA_READ|BUS_DMA_NOWAIT);
1230 if (error) {
1231 printf("%s: can't load rx DMA map %d, error = %d\n",
1232 sc->sc_dev.dv_xname, idx, error);
1233 panic("epic_add_rxbuf"); /* XXX */
1234 }
1235
1236 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1237 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1238
1239 EPIC_INIT_RXDESC(sc, idx);
1240
1241 return (0);
1242 }
1243
1244 /*
1245 * Set the EPIC multicast hash table.
1246 *
1247 * NOTE: We rely on a recently-updated mii_media_active here!
1248 */
1249 void
1250 epic_set_mchash(sc)
1251 struct epic_softc *sc;
1252 {
1253 struct ethercom *ec = &sc->sc_ethercom;
1254 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1255 struct ether_multi *enm;
1256 struct ether_multistep step;
1257 u_int32_t hash, mchash[4];
1258
1259 /*
1260 * Set up the multicast address filter by passing all multicast
1261 * addresses through a CRC generator, and then using the low-order
1262 * 6 bits as an index into the 64 bit multicast hash table (only
1263 * the lower 16 bits of each 32 bit multicast hash register are
1264 * valid). The high order bits select the register, while the
1265 * rest of the bits select the bit within the register.
1266 */
1267
1268 if (ifp->if_flags & IFF_PROMISC)
1269 goto allmulti;
1270
1271 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1272 /* XXX hardware bug in 10Mbps mode. */
1273 goto allmulti;
1274 }
1275
1276 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1277
1278 ETHER_FIRST_MULTI(step, ec, enm);
1279 while (enm != NULL) {
1280 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1281 /*
1282 * We must listen to a range of multicast addresses.
1283 * For now, just accept all multicasts, rather than
1284 * trying to set only those filter bits needed to match
1285 * the range. (At this time, the only use of address
1286 * ranges is for IP multicast routing, for which the
1287 * range is big enough to require all bits set.)
1288 */
1289 goto allmulti;
1290 }
1291
1292 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1293 hash >>= 26;
1294
1295 /* Set the corresponding bit in the hash table. */
1296 mchash[hash >> 4] |= 1 << (hash & 0xf);
1297
1298 ETHER_NEXT_MULTI(step, enm);
1299 }
1300
1301 ifp->if_flags &= ~IFF_ALLMULTI;
1302 goto sethash;
1303
1304 allmulti:
1305 ifp->if_flags |= IFF_ALLMULTI;
1306 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1307
1308 sethash:
1309 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1310 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1311 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1312 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1313 }
1314
1315 /*
1316 * Wait for the MII to become ready.
1317 */
1318 int
1319 epic_mii_wait(sc, rw)
1320 struct epic_softc *sc;
1321 u_int32_t rw;
1322 {
1323 int i;
1324
1325 for (i = 0; i < 50; i++) {
1326 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1327 == 0)
1328 break;
1329 delay(2);
1330 }
1331 if (i == 50) {
1332 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1333 return (1);
1334 }
1335
1336 return (0);
1337 }
1338
1339 /*
1340 * Read from the MII.
1341 */
1342 int
1343 epic_mii_read(self, phy, reg)
1344 struct device *self;
1345 int phy, reg;
1346 {
1347 struct epic_softc *sc = (struct epic_softc *)self;
1348
1349 if (epic_mii_wait(sc, MMCTL_WRITE))
1350 return (0);
1351
1352 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1353 MMCTL_ARG(phy, reg, MMCTL_READ));
1354
1355 if (epic_mii_wait(sc, MMCTL_READ))
1356 return (0);
1357
1358 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1359 MMDATA_MASK);
1360 }
1361
1362 /*
1363 * Write to the MII.
1364 */
1365 void
1366 epic_mii_write(self, phy, reg, val)
1367 struct device *self;
1368 int phy, reg, val;
1369 {
1370 struct epic_softc *sc = (struct epic_softc *)self;
1371
1372 if (epic_mii_wait(sc, MMCTL_WRITE))
1373 return;
1374
1375 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1376 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1377 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1378 }
1379
1380 /*
1381 * Callback from PHY when media changes.
1382 */
1383 void
1384 epic_statchg(self)
1385 struct device *self;
1386 {
1387 struct epic_softc *sc = (struct epic_softc *)self;
1388 u_int32_t txcon, miicfg;
1389
1390 /*
1391 * Update loopback bits in TXCON to reflect duplex mode.
1392 */
1393 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1394 if (sc->sc_mii.mii_media_active & IFM_FDX)
1395 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1396 else
1397 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1398 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1399
1400 /* On some cards we need manualy set fullduplex led */
1401 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1402 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1403 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1404 miicfg |= MIICFG_ENABLE;
1405 else
1406 miicfg &= ~MIICFG_ENABLE;
1407 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1408 }
1409
1410 /*
1411 * There is a multicast filter bug in 10Mbps mode. Kick the
1412 * multicast filter in case the speed changed.
1413 */
1414 epic_set_mchash(sc);
1415 }
1416
1417 /*
1418 * Callback from ifmedia to request current media status.
1419 */
1420 void
1421 epic_mediastatus(ifp, ifmr)
1422 struct ifnet *ifp;
1423 struct ifmediareq *ifmr;
1424 {
1425 struct epic_softc *sc = ifp->if_softc;
1426
1427 mii_pollstat(&sc->sc_mii);
1428 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1429 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1430 }
1431
1432 /*
1433 * Callback from ifmedia to request new media setting.
1434 */
1435 int
1436 epic_mediachange(ifp)
1437 struct ifnet *ifp;
1438 {
1439 struct epic_softc *sc = ifp->if_softc;
1440 struct mii_data *mii = &sc->sc_mii;
1441 struct ifmedia *ifm = &mii->mii_media;
1442 int media = ifm->ifm_cur->ifm_media;
1443 u_int32_t miicfg;
1444 struct mii_softc *miisc;
1445 int cfg;
1446
1447 if (!(ifp->if_flags & IFF_UP))
1448 return (0);
1449
1450 if (IFM_INST(media) != sc->sc_serinst) {
1451 /* If we're not selecting serial interface, select MII mode */
1452 #ifdef EPICMEDIADEBUG
1453 printf("%s: parallel mode\n", ifp->if_xname);
1454 #endif
1455 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1456 miicfg &= ~MIICFG_SERMODEENA;
1457 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1458 }
1459
1460 mii_mediachg(mii);
1461
1462 if (IFM_INST(media) == sc->sc_serinst) {
1463 /* select serial interface */
1464 #ifdef EPICMEDIADEBUG
1465 printf("%s: serial mode\n", ifp->if_xname);
1466 #endif
1467 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1468 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1469 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1470
1471 /* There is no driver to fill this */
1472 mii->mii_media_active = media;
1473 mii->mii_media_status = 0;
1474
1475 epic_statchg(&sc->sc_dev);
1476 return (0);
1477 }
1478
1479 /* Lookup selected PHY */
1480 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1481 miisc = LIST_NEXT(miisc, mii_list)) {
1482 if (IFM_INST(media) == miisc->mii_inst)
1483 break;
1484 }
1485 if (!miisc) {
1486 printf("epic_mediachange: can't happen\n"); /* ??? panic */
1487 return (0);
1488 }
1489 #ifdef EPICMEDIADEBUG
1490 printf("%s: using phy %s\n", ifp->if_xname,
1491 miisc->mii_dev.dv_xname);
1492 #endif
1493
1494 if (miisc->mii_flags & MIIF_HAVEFIBER) {
1495 /* XXX XXX assume it's a Level1 - should check */
1496
1497 /* We have to powerup fiber tranceivers */
1498 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1499 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1500 #ifdef EPICMEDIADEBUG
1501 printf("%s: power up fiber\n", ifp->if_xname);
1502 #endif
1503 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1504 } else {
1505 #ifdef EPICMEDIADEBUG
1506 printf("%s: power down fiber\n", ifp->if_xname);
1507 #endif
1508 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1509 }
1510 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1511 }
1512
1513 return (0);
1514 }
1515