smc83c170.c revision 1.43 1 /* $NetBSD: smc83c170.c,v 1.43 2001/05/17 17:32:47 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #ifdef INET
72 #include <netinet/in.h>
73 #include <netinet/if_inarp.h>
74 #endif
75
76 #ifdef NS
77 #include <netns/ns.h>
78 #include <netns/ns_if.h>
79 #endif
80
81 #include <machine/bus.h>
82 #include <machine/intr.h>
83
84 #include <dev/mii/miivar.h>
85 #include <dev/mii/lxtphyreg.h>
86
87 #include <dev/ic/smc83c170reg.h>
88 #include <dev/ic/smc83c170var.h>
89
90 void epic_start __P((struct ifnet *));
91 void epic_watchdog __P((struct ifnet *));
92 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
93 int epic_init __P((struct ifnet *));
94 void epic_stop __P((struct ifnet *, int));
95
96 void epic_shutdown __P((void *));
97
98 void epic_reset __P((struct epic_softc *));
99 void epic_rxdrain __P((struct epic_softc *));
100 int epic_add_rxbuf __P((struct epic_softc *, int));
101 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
102 void epic_set_mchash __P((struct epic_softc *));
103 void epic_fixup_clock_source __P((struct epic_softc *));
104 int epic_mii_read __P((struct device *, int, int));
105 void epic_mii_write __P((struct device *, int, int, int));
106 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
107 void epic_tick __P((void *));
108
109 void epic_statchg __P((struct device *));
110 int epic_mediachange __P((struct ifnet *));
111 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
112
113 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
114 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
115
116 int epic_copy_small = 0;
117
118 /*
119 * Attach an EPIC interface to the system.
120 */
121 void
122 epic_attach(sc)
123 struct epic_softc *sc;
124 {
125 bus_space_tag_t st = sc->sc_st;
126 bus_space_handle_t sh = sc->sc_sh;
127 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
128 int i, rseg, error, miiflags;
129 bus_dma_segment_t seg;
130 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
131 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
132
133 callout_init(&sc->sc_mii_callout);
134
135 /*
136 * Allocate the control data structures, and create and load the
137 * DMA map for it.
138 */
139 if ((error = bus_dmamem_alloc(sc->sc_dmat,
140 sizeof(struct epic_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
141 BUS_DMA_NOWAIT)) != 0) {
142 printf("%s: unable to allocate control data, error = %d\n",
143 sc->sc_dev.dv_xname, error);
144 goto fail_0;
145 }
146
147 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
148 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
149 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
150 printf("%s: unable to map control data, error = %d\n",
151 sc->sc_dev.dv_xname, error);
152 goto fail_1;
153 }
154
155 if ((error = bus_dmamap_create(sc->sc_dmat,
156 sizeof(struct epic_control_data), 1,
157 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
158 &sc->sc_cddmamap)) != 0) {
159 printf("%s: unable to create control data DMA map, "
160 "error = %d\n", sc->sc_dev.dv_xname, error);
161 goto fail_2;
162 }
163
164 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
165 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
166 BUS_DMA_NOWAIT)) != 0) {
167 printf("%s: unable to load control data DMA map, error = %d\n",
168 sc->sc_dev.dv_xname, error);
169 goto fail_3;
170 }
171
172 /*
173 * Create the transmit buffer DMA maps.
174 */
175 for (i = 0; i < EPIC_NTXDESC; i++) {
176 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
177 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
178 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
179 printf("%s: unable to create tx DMA map %d, "
180 "error = %d\n", sc->sc_dev.dv_xname, i, error);
181 goto fail_4;
182 }
183 }
184
185 /*
186 * Create the receive buffer DMA maps.
187 */
188 for (i = 0; i < EPIC_NRXDESC; i++) {
189 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
190 MCLBYTES, 0, BUS_DMA_NOWAIT,
191 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
192 printf("%s: unable to create rx DMA map %d, "
193 "error = %d\n", sc->sc_dev.dv_xname, i, error);
194 goto fail_5;
195 }
196 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
197 }
198
199
200 /*
201 * Bring the chip out of low-power mode and reset it to a known state.
202 */
203 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
204 epic_reset(sc);
205
206 /*
207 * Read the Ethernet address from the EEPROM.
208 */
209 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
210 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
211 enaddr[i * 2] = myea[i] & 0xff;
212 enaddr[i * 2 + 1] = myea[i] >> 8;
213 }
214
215 /*
216 * ...and the device name.
217 */
218 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
219 mydevname);
220 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
221 devname[i * 2] = mydevname[i] & 0xff;
222 devname[i * 2 + 1] = mydevname[i] >> 8;
223 }
224
225 devname[sizeof(mydevname)] = '\0';
226 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
227 if (devname[i] == ' ')
228 devname[i] = '\0';
229 else
230 break;
231 }
232
233 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
234 devname, ether_sprintf(enaddr));
235
236 miiflags = 0;
237 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
238 miiflags |= MIIF_HAVEFIBER;
239
240 /*
241 * Initialize our media structures and probe the MII.
242 */
243 sc->sc_mii.mii_ifp = ifp;
244 sc->sc_mii.mii_readreg = epic_mii_read;
245 sc->sc_mii.mii_writereg = epic_mii_write;
246 sc->sc_mii.mii_statchg = epic_statchg;
247 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
248 epic_mediastatus);
249 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
250 MII_OFFSET_ANY, miiflags);
251 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
252 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
253 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
254 } else
255 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
256
257 if (sc->sc_hwflags & EPIC_HAS_BNC) {
258 /* use the next free media instance */
259 sc->sc_serinst = sc->sc_mii.mii_instance++;
260 ifmedia_add(&sc->sc_mii.mii_media,
261 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
262 sc->sc_serinst),
263 0, NULL);
264 printf("%s: 10base2/BNC\n", sc->sc_dev.dv_xname);
265 } else
266 sc->sc_serinst = -1;
267
268 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
269 ifp->if_softc = sc;
270 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
271 ifp->if_ioctl = epic_ioctl;
272 ifp->if_start = epic_start;
273 ifp->if_watchdog = epic_watchdog;
274 ifp->if_init = epic_init;
275 ifp->if_stop = epic_stop;
276 IFQ_SET_READY(&ifp->if_snd);
277
278 /*
279 * We can support 802.1Q VLAN-sized frames.
280 */
281 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
282
283 /*
284 * Attach the interface.
285 */
286 if_attach(ifp);
287 ether_ifattach(ifp, enaddr);
288
289 /*
290 * Make sure the interface is shutdown during reboot.
291 */
292 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
293 if (sc->sc_sdhook == NULL)
294 printf("%s: WARNING: unable to establish shutdown hook\n",
295 sc->sc_dev.dv_xname);
296 return;
297
298 /*
299 * Free any resources we've allocated during the failed attach
300 * attempt. Do this in reverse order and fall through.
301 */
302 fail_5:
303 for (i = 0; i < EPIC_NRXDESC; i++) {
304 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
305 bus_dmamap_destroy(sc->sc_dmat,
306 EPIC_DSRX(sc, i)->ds_dmamap);
307 }
308 fail_4:
309 for (i = 0; i < EPIC_NTXDESC; i++) {
310 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
311 bus_dmamap_destroy(sc->sc_dmat,
312 EPIC_DSTX(sc, i)->ds_dmamap);
313 }
314 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
315 fail_3:
316 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
317 fail_2:
318 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
319 sizeof(struct epic_control_data));
320 fail_1:
321 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
322 fail_0:
323 return;
324 }
325
326 /*
327 * Shutdown hook. Make sure the interface is stopped at reboot.
328 */
329 void
330 epic_shutdown(arg)
331 void *arg;
332 {
333 struct epic_softc *sc = arg;
334
335 epic_stop(&sc->sc_ethercom.ec_if, 1);
336 }
337
338 /*
339 * Start packet transmission on the interface.
340 * [ifnet interface function]
341 */
342 void
343 epic_start(ifp)
344 struct ifnet *ifp;
345 {
346 struct epic_softc *sc = ifp->if_softc;
347 struct mbuf *m0, *m;
348 struct epic_txdesc *txd;
349 struct epic_descsoft *ds;
350 struct epic_fraglist *fr;
351 bus_dmamap_t dmamap;
352 int error, firsttx, nexttx, opending, seg;
353
354 /*
355 * Remember the previous txpending and the first transmit
356 * descriptor we use.
357 */
358 opending = sc->sc_txpending;
359 firsttx = EPIC_NEXTTX(sc->sc_txlast);
360
361 /*
362 * Loop through the send queue, setting up transmit descriptors
363 * until we drain the queue, or use up all available transmit
364 * descriptors.
365 */
366 while (sc->sc_txpending < EPIC_NTXDESC) {
367 /*
368 * Grab a packet off the queue.
369 */
370 IFQ_POLL(&ifp->if_snd, m0);
371 if (m0 == NULL)
372 break;
373 m = NULL;
374
375 /*
376 * Get the last and next available transmit descriptor.
377 */
378 nexttx = EPIC_NEXTTX(sc->sc_txlast);
379 txd = EPIC_CDTX(sc, nexttx);
380 fr = EPIC_CDFL(sc, nexttx);
381 ds = EPIC_DSTX(sc, nexttx);
382 dmamap = ds->ds_dmamap;
383
384 /*
385 * Load the DMA map. If this fails, the packet either
386 * didn't fit in the alloted number of frags, or we were
387 * short on resources. In this case, we'll copy and try
388 * again.
389 */
390 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
391 BUS_DMA_NOWAIT) != 0) {
392 MGETHDR(m, M_DONTWAIT, MT_DATA);
393 if (m == NULL) {
394 printf("%s: unable to allocate Tx mbuf\n",
395 sc->sc_dev.dv_xname);
396 break;
397 }
398 if (m0->m_pkthdr.len > MHLEN) {
399 MCLGET(m, M_DONTWAIT);
400 if ((m->m_flags & M_EXT) == 0) {
401 printf("%s: unable to allocate Tx "
402 "cluster\n", sc->sc_dev.dv_xname);
403 m_freem(m);
404 break;
405 }
406 }
407 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
408 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
409 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
410 m, BUS_DMA_NOWAIT);
411 if (error) {
412 printf("%s: unable to load Tx buffer, "
413 "error = %d\n", sc->sc_dev.dv_xname, error);
414 break;
415 }
416 }
417 IFQ_DEQUEUE(&ifp->if_snd, m0);
418 if (m != NULL) {
419 m_freem(m0);
420 m0 = m;
421 }
422
423 /* Initialize the fraglist. */
424 fr->ef_nfrags = dmamap->dm_nsegs;
425 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
426 fr->ef_frags[seg].ef_addr =
427 dmamap->dm_segs[seg].ds_addr;
428 fr->ef_frags[seg].ef_length =
429 dmamap->dm_segs[seg].ds_len;
430 }
431
432 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
433
434 /* Sync the DMA map. */
435 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
436 BUS_DMASYNC_PREWRITE);
437
438 /*
439 * Store a pointer to the packet so we can free it later.
440 */
441 ds->ds_mbuf = m0;
442
443 /*
444 * Fill in the transmit descriptor. The EPIC doesn't
445 * auto-pad, so we have to do this ourselves.
446 */
447 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
448 txd->et_txlength = max(m0->m_pkthdr.len,
449 ETHER_MIN_LEN - ETHER_CRC_LEN);
450
451 /*
452 * If this is the first descriptor we're enqueueing,
453 * don't give it to the EPIC yet. That could cause
454 * a race condition. We'll do it below.
455 */
456 if (nexttx == firsttx)
457 txd->et_txstatus = 0;
458 else
459 txd->et_txstatus = ET_TXSTAT_OWNER;
460
461 EPIC_CDTXSYNC(sc, nexttx,
462 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
463
464 /* Advance the tx pointer. */
465 sc->sc_txpending++;
466 sc->sc_txlast = nexttx;
467
468 #if NBPFILTER > 0
469 /*
470 * Pass the packet to any BPF listeners.
471 */
472 if (ifp->if_bpf)
473 bpf_mtap(ifp->if_bpf, m0);
474 #endif
475 }
476
477 if (sc->sc_txpending == EPIC_NTXDESC) {
478 /* No more slots left; notify upper layer. */
479 ifp->if_flags |= IFF_OACTIVE;
480 }
481
482 if (sc->sc_txpending != opending) {
483 /*
484 * We enqueued packets. If the transmitter was idle,
485 * reset the txdirty pointer.
486 */
487 if (opending == 0)
488 sc->sc_txdirty = firsttx;
489
490 /*
491 * Cause a transmit interrupt to happen on the
492 * last packet we enqueued.
493 */
494 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
495 EPIC_CDTXSYNC(sc, sc->sc_txlast,
496 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
497
498 /*
499 * The entire packet chain is set up. Give the
500 * first descriptor to the EPIC now.
501 */
502 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
503 EPIC_CDTXSYNC(sc, firsttx,
504 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
505
506 /* Start the transmitter. */
507 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
508 COMMAND_TXQUEUED);
509
510 /* Set a watchdog timer in case the chip flakes out. */
511 ifp->if_timer = 5;
512 }
513 }
514
515 /*
516 * Watchdog timer handler.
517 * [ifnet interface function]
518 */
519 void
520 epic_watchdog(ifp)
521 struct ifnet *ifp;
522 {
523 struct epic_softc *sc = ifp->if_softc;
524
525 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
526 ifp->if_oerrors++;
527
528 (void) epic_init(ifp);
529 }
530
531 /*
532 * Handle control requests from the operator.
533 * [ifnet interface function]
534 */
535 int
536 epic_ioctl(ifp, cmd, data)
537 struct ifnet *ifp;
538 u_long cmd;
539 caddr_t data;
540 {
541 struct epic_softc *sc = ifp->if_softc;
542 struct ifreq *ifr = (struct ifreq *)data;
543 int s, error;
544
545 s = splnet();
546
547 switch (cmd) {
548 case SIOCSIFMEDIA:
549 case SIOCGIFMEDIA:
550 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
551 break;
552
553 default:
554 error = ether_ioctl(ifp, cmd, data);
555 if (error == ENETRESET) {
556 /*
557 * Multicast list has changed; set the hardware filter
558 * accordingly. Update our idea of the current media;
559 * epic_set_mchash() needs to know what it is.
560 */
561 mii_pollstat(&sc->sc_mii);
562 epic_set_mchash(sc);
563 error = 0;
564 }
565 break;
566 }
567
568 splx(s);
569 return (error);
570 }
571
572 /*
573 * Interrupt handler.
574 */
575 int
576 epic_intr(arg)
577 void *arg;
578 {
579 struct epic_softc *sc = arg;
580 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
581 struct epic_rxdesc *rxd;
582 struct epic_txdesc *txd;
583 struct epic_descsoft *ds;
584 struct mbuf *m;
585 u_int32_t intstat;
586 int i, len, claimed = 0;
587
588 top:
589 /*
590 * Get the interrupt status from the EPIC.
591 */
592 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
593 if ((intstat & INTSTAT_INT_ACTV) == 0)
594 return (claimed);
595
596 claimed = 1;
597
598 /*
599 * Acknowledge the interrupt.
600 */
601 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
602 intstat & INTMASK);
603
604 /*
605 * Check for receive interrupts.
606 */
607 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
608 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
609 rxd = EPIC_CDRX(sc, i);
610 ds = EPIC_DSRX(sc, i);
611
612 EPIC_CDRXSYNC(sc, i,
613 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
614
615 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
616 /*
617 * We have processed all of the
618 * receive buffers.
619 */
620 break;
621 }
622
623 /*
624 * Make sure the packet arrived intact. If an error
625 * occurred, update stats and reset the descriptor.
626 * The buffer will be reused the next time the
627 * descriptor comes up in the ring.
628 */
629 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
630 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
631 printf("%s: CRC error\n",
632 sc->sc_dev.dv_xname);
633 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
634 printf("%s: alignment error\n",
635 sc->sc_dev.dv_xname);
636 ifp->if_ierrors++;
637 EPIC_INIT_RXDESC(sc, i);
638 continue;
639 }
640
641 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
642 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
643
644 /*
645 * The EPIC includes the CRC with every packet.
646 */
647 len = rxd->er_rxlength;
648
649 if (len < sizeof(struct ether_header)) {
650 /*
651 * Runt packet; drop it now.
652 */
653 ifp->if_ierrors++;
654 EPIC_INIT_RXDESC(sc, i);
655 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
656 ds->ds_dmamap->dm_mapsize,
657 BUS_DMASYNC_PREREAD);
658 continue;
659 }
660
661 /*
662 * If the packet is small enough to fit in a
663 * single header mbuf, allocate one and copy
664 * the data into it. This greatly reduces
665 * memory consumption when we receive lots
666 * of small packets.
667 *
668 * Otherwise, we add a new buffer to the receive
669 * chain. If this fails, we drop the packet and
670 * recycle the old buffer.
671 */
672 if (epic_copy_small != 0 && len <= MHLEN) {
673 MGETHDR(m, M_DONTWAIT, MT_DATA);
674 if (m == NULL)
675 goto dropit;
676 memcpy(mtod(m, caddr_t),
677 mtod(ds->ds_mbuf, caddr_t), len);
678 EPIC_INIT_RXDESC(sc, i);
679 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
680 ds->ds_dmamap->dm_mapsize,
681 BUS_DMASYNC_PREREAD);
682 } else {
683 m = ds->ds_mbuf;
684 if (epic_add_rxbuf(sc, i) != 0) {
685 dropit:
686 ifp->if_ierrors++;
687 EPIC_INIT_RXDESC(sc, i);
688 bus_dmamap_sync(sc->sc_dmat,
689 ds->ds_dmamap, 0,
690 ds->ds_dmamap->dm_mapsize,
691 BUS_DMASYNC_PREREAD);
692 continue;
693 }
694 }
695
696 m->m_flags |= M_HASFCS;
697 m->m_pkthdr.rcvif = ifp;
698 m->m_pkthdr.len = m->m_len = len;
699
700 #if NBPFILTER > 0
701 /*
702 * Pass this up to any BPF listeners, but only
703 * pass it up the stack if its for us.
704 */
705 if (ifp->if_bpf)
706 bpf_mtap(ifp->if_bpf, m);
707 #endif
708
709 /* Pass it on. */
710 (*ifp->if_input)(ifp, m);
711 ifp->if_ipackets++;
712 }
713
714 /* Update the receive pointer. */
715 sc->sc_rxptr = i;
716
717 /*
718 * Check for receive queue underflow.
719 */
720 if (intstat & INTSTAT_RQE) {
721 printf("%s: receiver queue empty\n",
722 sc->sc_dev.dv_xname);
723 /*
724 * Ring is already built; just restart the
725 * receiver.
726 */
727 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
728 EPIC_CDRXADDR(sc, sc->sc_rxptr));
729 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
730 COMMAND_RXQUEUED | COMMAND_START_RX);
731 }
732 }
733
734 /*
735 * Check for transmission complete interrupts.
736 */
737 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
738 ifp->if_flags &= ~IFF_OACTIVE;
739 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
740 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
741 txd = EPIC_CDTX(sc, i);
742 ds = EPIC_DSTX(sc, i);
743
744 EPIC_CDTXSYNC(sc, i,
745 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
746
747 if (txd->et_txstatus & ET_TXSTAT_OWNER)
748 break;
749
750 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
751
752 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
753 0, ds->ds_dmamap->dm_mapsize,
754 BUS_DMASYNC_POSTWRITE);
755 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
756 m_freem(ds->ds_mbuf);
757 ds->ds_mbuf = NULL;
758
759 /*
760 * Check for errors and collisions.
761 */
762 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
763 ifp->if_oerrors++;
764 else
765 ifp->if_opackets++;
766 ifp->if_collisions +=
767 TXSTAT_COLLISIONS(txd->et_txstatus);
768 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
769 printf("%s: lost carrier\n",
770 sc->sc_dev.dv_xname);
771 }
772
773 /* Update the dirty transmit buffer pointer. */
774 sc->sc_txdirty = i;
775
776 /*
777 * Cancel the watchdog timer if there are no pending
778 * transmissions.
779 */
780 if (sc->sc_txpending == 0)
781 ifp->if_timer = 0;
782
783 /*
784 * Kick the transmitter after a DMA underrun.
785 */
786 if (intstat & INTSTAT_TXU) {
787 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
788 bus_space_write_4(sc->sc_st, sc->sc_sh,
789 EPIC_COMMAND, COMMAND_TXUGO);
790 if (sc->sc_txpending)
791 bus_space_write_4(sc->sc_st, sc->sc_sh,
792 EPIC_COMMAND, COMMAND_TXQUEUED);
793 }
794
795 /*
796 * Try to get more packets going.
797 */
798 epic_start(ifp);
799 }
800
801 /*
802 * Check for fatal interrupts.
803 */
804 if (intstat & INTSTAT_FATAL_INT) {
805 if (intstat & INTSTAT_PTA)
806 printf("%s: PCI target abort error\n",
807 sc->sc_dev.dv_xname);
808 else if (intstat & INTSTAT_PMA)
809 printf("%s: PCI master abort error\n",
810 sc->sc_dev.dv_xname);
811 else if (intstat & INTSTAT_APE)
812 printf("%s: PCI address parity error\n",
813 sc->sc_dev.dv_xname);
814 else if (intstat & INTSTAT_DPE)
815 printf("%s: PCI data parity error\n",
816 sc->sc_dev.dv_xname);
817 else
818 printf("%s: unknown fatal error\n",
819 sc->sc_dev.dv_xname);
820 (void) epic_init(ifp);
821 }
822
823 /*
824 * Check for more interrupts.
825 */
826 goto top;
827 }
828
829 /*
830 * One second timer, used to tick the MII.
831 */
832 void
833 epic_tick(arg)
834 void *arg;
835 {
836 struct epic_softc *sc = arg;
837 int s;
838
839 s = splnet();
840 mii_tick(&sc->sc_mii);
841 splx(s);
842
843 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
844 }
845
846 /*
847 * Fixup the clock source on the EPIC.
848 */
849 void
850 epic_fixup_clock_source(sc)
851 struct epic_softc *sc;
852 {
853 int i;
854
855 /*
856 * According to SMC Application Note 7-15, the EPIC's clock
857 * source is incorrect following a reset. This manifests itself
858 * as failure to recognize when host software has written to
859 * a register on the EPIC. The appnote recommends issuing at
860 * least 16 consecutive writes to the CLOCK TEST bit to correctly
861 * configure the clock source.
862 */
863 for (i = 0; i < 16; i++)
864 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
865 TEST_CLOCKTEST);
866 }
867
868 /*
869 * Perform a soft reset on the EPIC.
870 */
871 void
872 epic_reset(sc)
873 struct epic_softc *sc;
874 {
875
876 epic_fixup_clock_source(sc);
877
878 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
879 delay(100);
880 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
881 delay(100);
882
883 epic_fixup_clock_source(sc);
884 }
885
886 /*
887 * Initialize the interface. Must be called at splnet().
888 */
889 int
890 epic_init(ifp)
891 struct ifnet *ifp;
892 {
893 struct epic_softc *sc = ifp->if_softc;
894 bus_space_tag_t st = sc->sc_st;
895 bus_space_handle_t sh = sc->sc_sh;
896 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
897 struct epic_txdesc *txd;
898 struct epic_descsoft *ds;
899 u_int32_t genctl, reg0;
900 int i, error = 0;
901
902 /*
903 * Cancel any pending I/O.
904 */
905 epic_stop(ifp, 0);
906
907 /*
908 * Reset the EPIC to a known state.
909 */
910 epic_reset(sc);
911
912 /*
913 * Magical mystery initialization.
914 */
915 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
916
917 /*
918 * Initialize the EPIC genctl register:
919 *
920 * - 64 byte receive FIFO threshold
921 * - automatic advance to next receive frame
922 */
923 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
924 #if BYTE_ORDER == BIG_ENDIAN
925 genctl |= GENCTL_BIG_ENDIAN;
926 #endif
927 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
928
929 /*
930 * Reset the MII bus and PHY.
931 */
932 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
933 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
934 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
935 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
936 delay(100);
937 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
938 delay(100);
939 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
940
941 /*
942 * Initialize Ethernet address.
943 */
944 reg0 = enaddr[1] << 8 | enaddr[0];
945 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
946 reg0 = enaddr[3] << 8 | enaddr[2];
947 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
948 reg0 = enaddr[5] << 8 | enaddr[4];
949 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
950
951 /*
952 * Initialize receive control. Remember the external buffer
953 * size setting.
954 */
955 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
956 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
957 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
958 if (ifp->if_flags & IFF_PROMISC)
959 reg0 |= RXCON_PROMISCMODE;
960 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
961
962 /* Set the current media. */
963 epic_mediachange(ifp);
964
965 /* Set up the multicast hash table. */
966 epic_set_mchash(sc);
967
968 /*
969 * Initialize the transmit descriptor ring. txlast is initialized
970 * to the end of the list so that it will wrap around to the first
971 * descriptor when the first packet is transmitted.
972 */
973 for (i = 0; i < EPIC_NTXDESC; i++) {
974 txd = EPIC_CDTX(sc, i);
975 memset(txd, 0, sizeof(struct epic_txdesc));
976 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
977 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
978 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
979 }
980 sc->sc_txpending = 0;
981 sc->sc_txdirty = 0;
982 sc->sc_txlast = EPIC_NTXDESC - 1;
983
984 /*
985 * Initialize the receive descriptor ring.
986 */
987 for (i = 0; i < EPIC_NRXDESC; i++) {
988 ds = EPIC_DSRX(sc, i);
989 if (ds->ds_mbuf == NULL) {
990 if ((error = epic_add_rxbuf(sc, i)) != 0) {
991 printf("%s: unable to allocate or map rx "
992 "buffer %d error = %d\n",
993 sc->sc_dev.dv_xname, i, error);
994 /*
995 * XXX Should attempt to run with fewer receive
996 * XXX buffers instead of just failing.
997 */
998 epic_rxdrain(sc);
999 goto out;
1000 }
1001 }
1002 }
1003 sc->sc_rxptr = 0;
1004
1005 /*
1006 * Initialize the interrupt mask and enable interrupts.
1007 */
1008 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1009 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1010
1011 /*
1012 * Give the transmit and receive rings to the EPIC.
1013 */
1014 bus_space_write_4(st, sh, EPIC_PTCDAR,
1015 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1016 bus_space_write_4(st, sh, EPIC_PRCDAR,
1017 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1018
1019 /*
1020 * Set the EPIC in motion.
1021 */
1022 bus_space_write_4(st, sh, EPIC_COMMAND,
1023 COMMAND_RXQUEUED | COMMAND_START_RX);
1024
1025 /*
1026 * ...all done!
1027 */
1028 ifp->if_flags |= IFF_RUNNING;
1029 ifp->if_flags &= ~IFF_OACTIVE;
1030
1031 /*
1032 * Start the one second clock.
1033 */
1034 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1035
1036 /*
1037 * Attempt to start output on the interface.
1038 */
1039 epic_start(ifp);
1040
1041 out:
1042 if (error)
1043 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1044 return (error);
1045 }
1046
1047 /*
1048 * Drain the receive queue.
1049 */
1050 void
1051 epic_rxdrain(sc)
1052 struct epic_softc *sc;
1053 {
1054 struct epic_descsoft *ds;
1055 int i;
1056
1057 for (i = 0; i < EPIC_NRXDESC; i++) {
1058 ds = EPIC_DSRX(sc, i);
1059 if (ds->ds_mbuf != NULL) {
1060 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1061 m_freem(ds->ds_mbuf);
1062 ds->ds_mbuf = NULL;
1063 }
1064 }
1065 }
1066
1067 /*
1068 * Stop transmission on the interface.
1069 */
1070 void
1071 epic_stop(ifp, disable)
1072 struct ifnet *ifp;
1073 int disable;
1074 {
1075 struct epic_softc *sc = ifp->if_softc;
1076 bus_space_tag_t st = sc->sc_st;
1077 bus_space_handle_t sh = sc->sc_sh;
1078 struct epic_descsoft *ds;
1079 u_int32_t reg;
1080 int i;
1081
1082 /*
1083 * Stop the one second clock.
1084 */
1085 callout_stop(&sc->sc_mii_callout);
1086
1087 /* Down the MII. */
1088 mii_down(&sc->sc_mii);
1089
1090 /* Paranoia... */
1091 epic_fixup_clock_source(sc);
1092
1093 /*
1094 * Disable interrupts.
1095 */
1096 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1097 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1098 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1099
1100 /*
1101 * Stop the DMA engine and take the receiver off-line.
1102 */
1103 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1104 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1105
1106 /*
1107 * Release any queued transmit buffers.
1108 */
1109 for (i = 0; i < EPIC_NTXDESC; i++) {
1110 ds = EPIC_DSTX(sc, i);
1111 if (ds->ds_mbuf != NULL) {
1112 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1113 m_freem(ds->ds_mbuf);
1114 ds->ds_mbuf = NULL;
1115 }
1116 }
1117
1118 if (disable)
1119 epic_rxdrain(sc);
1120
1121 /*
1122 * Mark the interface down and cancel the watchdog timer.
1123 */
1124 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1125 ifp->if_timer = 0;
1126 }
1127
1128 /*
1129 * Read the EPIC Serial EEPROM.
1130 */
1131 void
1132 epic_read_eeprom(sc, word, wordcnt, data)
1133 struct epic_softc *sc;
1134 int word, wordcnt;
1135 u_int16_t *data;
1136 {
1137 bus_space_tag_t st = sc->sc_st;
1138 bus_space_handle_t sh = sc->sc_sh;
1139 u_int16_t reg;
1140 int i, x;
1141
1142 #define EEPROM_WAIT_READY(st, sh) \
1143 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1144 /* nothing */
1145
1146 /*
1147 * Enable the EEPROM.
1148 */
1149 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1150 EEPROM_WAIT_READY(st, sh);
1151
1152 for (i = 0; i < wordcnt; i++) {
1153 /* Send CHIP SELECT for one clock tick. */
1154 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1155 EEPROM_WAIT_READY(st, sh);
1156
1157 /* Shift in the READ opcode. */
1158 for (x = 3; x > 0; x--) {
1159 reg = EECTL_ENABLE|EECTL_EECS;
1160 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1161 reg |= EECTL_EEDI;
1162 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1163 EEPROM_WAIT_READY(st, sh);
1164 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1165 EEPROM_WAIT_READY(st, sh);
1166 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1167 EEPROM_WAIT_READY(st, sh);
1168 }
1169
1170 /* Shift in address. */
1171 for (x = 6; x > 0; x--) {
1172 reg = EECTL_ENABLE|EECTL_EECS;
1173 if ((word + i) & (1 << (x - 1)))
1174 reg |= EECTL_EEDI;
1175 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1176 EEPROM_WAIT_READY(st, sh);
1177 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1178 EEPROM_WAIT_READY(st, sh);
1179 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1180 EEPROM_WAIT_READY(st, sh);
1181 }
1182
1183 /* Shift out data. */
1184 reg = EECTL_ENABLE|EECTL_EECS;
1185 data[i] = 0;
1186 for (x = 16; x > 0; x--) {
1187 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1188 EEPROM_WAIT_READY(st, sh);
1189 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1190 data[i] |= (1 << (x - 1));
1191 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1192 EEPROM_WAIT_READY(st, sh);
1193 }
1194
1195 /* Clear CHIP SELECT. */
1196 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1197 EEPROM_WAIT_READY(st, sh);
1198 }
1199
1200 /*
1201 * Disable the EEPROM.
1202 */
1203 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1204
1205 #undef EEPROM_WAIT_READY
1206 }
1207
1208 /*
1209 * Add a receive buffer to the indicated descriptor.
1210 */
1211 int
1212 epic_add_rxbuf(sc, idx)
1213 struct epic_softc *sc;
1214 int idx;
1215 {
1216 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1217 struct mbuf *m;
1218 int error;
1219
1220 MGETHDR(m, M_DONTWAIT, MT_DATA);
1221 if (m == NULL)
1222 return (ENOBUFS);
1223
1224 MCLGET(m, M_DONTWAIT);
1225 if ((m->m_flags & M_EXT) == 0) {
1226 m_freem(m);
1227 return (ENOBUFS);
1228 }
1229
1230 if (ds->ds_mbuf != NULL)
1231 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1232
1233 ds->ds_mbuf = m;
1234
1235 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1236 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1237 if (error) {
1238 printf("%s: can't load rx DMA map %d, error = %d\n",
1239 sc->sc_dev.dv_xname, idx, error);
1240 panic("epic_add_rxbuf"); /* XXX */
1241 }
1242
1243 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1244 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1245
1246 EPIC_INIT_RXDESC(sc, idx);
1247
1248 return (0);
1249 }
1250
1251 /*
1252 * Set the EPIC multicast hash table.
1253 *
1254 * NOTE: We rely on a recently-updated mii_media_active here!
1255 */
1256 void
1257 epic_set_mchash(sc)
1258 struct epic_softc *sc;
1259 {
1260 struct ethercom *ec = &sc->sc_ethercom;
1261 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1262 struct ether_multi *enm;
1263 struct ether_multistep step;
1264 u_int32_t hash, mchash[4];
1265
1266 /*
1267 * Set up the multicast address filter by passing all multicast
1268 * addresses through a CRC generator, and then using the low-order
1269 * 6 bits as an index into the 64 bit multicast hash table (only
1270 * the lower 16 bits of each 32 bit multicast hash register are
1271 * valid). The high order bits select the register, while the
1272 * rest of the bits select the bit within the register.
1273 */
1274
1275 if (ifp->if_flags & IFF_PROMISC)
1276 goto allmulti;
1277
1278 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1279 /* XXX hardware bug in 10Mbps mode. */
1280 goto allmulti;
1281 }
1282
1283 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1284
1285 ETHER_FIRST_MULTI(step, ec, enm);
1286 while (enm != NULL) {
1287 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1288 /*
1289 * We must listen to a range of multicast addresses.
1290 * For now, just accept all multicasts, rather than
1291 * trying to set only those filter bits needed to match
1292 * the range. (At this time, the only use of address
1293 * ranges is for IP multicast routing, for which the
1294 * range is big enough to require all bits set.)
1295 */
1296 goto allmulti;
1297 }
1298
1299 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1300 hash >>= 26;
1301
1302 /* Set the corresponding bit in the hash table. */
1303 mchash[hash >> 4] |= 1 << (hash & 0xf);
1304
1305 ETHER_NEXT_MULTI(step, enm);
1306 }
1307
1308 ifp->if_flags &= ~IFF_ALLMULTI;
1309 goto sethash;
1310
1311 allmulti:
1312 ifp->if_flags |= IFF_ALLMULTI;
1313 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1314
1315 sethash:
1316 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1317 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1318 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1319 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1320 }
1321
1322 /*
1323 * Wait for the MII to become ready.
1324 */
1325 int
1326 epic_mii_wait(sc, rw)
1327 struct epic_softc *sc;
1328 u_int32_t rw;
1329 {
1330 int i;
1331
1332 for (i = 0; i < 50; i++) {
1333 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1334 == 0)
1335 break;
1336 delay(2);
1337 }
1338 if (i == 50) {
1339 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1340 return (1);
1341 }
1342
1343 return (0);
1344 }
1345
1346 /*
1347 * Read from the MII.
1348 */
1349 int
1350 epic_mii_read(self, phy, reg)
1351 struct device *self;
1352 int phy, reg;
1353 {
1354 struct epic_softc *sc = (struct epic_softc *)self;
1355
1356 if (epic_mii_wait(sc, MMCTL_WRITE))
1357 return (0);
1358
1359 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1360 MMCTL_ARG(phy, reg, MMCTL_READ));
1361
1362 if (epic_mii_wait(sc, MMCTL_READ))
1363 return (0);
1364
1365 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1366 MMDATA_MASK);
1367 }
1368
1369 /*
1370 * Write to the MII.
1371 */
1372 void
1373 epic_mii_write(self, phy, reg, val)
1374 struct device *self;
1375 int phy, reg, val;
1376 {
1377 struct epic_softc *sc = (struct epic_softc *)self;
1378
1379 if (epic_mii_wait(sc, MMCTL_WRITE))
1380 return;
1381
1382 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1383 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1384 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1385 }
1386
1387 /*
1388 * Callback from PHY when media changes.
1389 */
1390 void
1391 epic_statchg(self)
1392 struct device *self;
1393 {
1394 struct epic_softc *sc = (struct epic_softc *)self;
1395 u_int32_t txcon, miicfg;
1396
1397 /*
1398 * Update loopback bits in TXCON to reflect duplex mode.
1399 */
1400 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1401 if (sc->sc_mii.mii_media_active & IFM_FDX)
1402 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1403 else
1404 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1405 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1406
1407 /* On some cards we need manualy set fullduplex led */
1408 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1409 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1410 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1411 miicfg |= MIICFG_ENABLE;
1412 else
1413 miicfg &= ~MIICFG_ENABLE;
1414 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1415 }
1416
1417 /*
1418 * There is a multicast filter bug in 10Mbps mode. Kick the
1419 * multicast filter in case the speed changed.
1420 */
1421 epic_set_mchash(sc);
1422 }
1423
1424 /*
1425 * Callback from ifmedia to request current media status.
1426 */
1427 void
1428 epic_mediastatus(ifp, ifmr)
1429 struct ifnet *ifp;
1430 struct ifmediareq *ifmr;
1431 {
1432 struct epic_softc *sc = ifp->if_softc;
1433
1434 mii_pollstat(&sc->sc_mii);
1435 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1436 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1437 }
1438
1439 /*
1440 * Callback from ifmedia to request new media setting.
1441 */
1442 int
1443 epic_mediachange(ifp)
1444 struct ifnet *ifp;
1445 {
1446 struct epic_softc *sc = ifp->if_softc;
1447 struct mii_data *mii = &sc->sc_mii;
1448 struct ifmedia *ifm = &mii->mii_media;
1449 int media = ifm->ifm_cur->ifm_media;
1450 u_int32_t miicfg;
1451 struct mii_softc *miisc;
1452 int cfg;
1453
1454 if (!(ifp->if_flags & IFF_UP))
1455 return (0);
1456
1457 if (IFM_INST(media) != sc->sc_serinst) {
1458 /* If we're not selecting serial interface, select MII mode */
1459 #ifdef EPICMEDIADEBUG
1460 printf("%s: parallel mode\n", ifp->if_xname);
1461 #endif
1462 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1463 miicfg &= ~MIICFG_SERMODEENA;
1464 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1465 }
1466
1467 mii_mediachg(mii);
1468
1469 if (IFM_INST(media) == sc->sc_serinst) {
1470 /* select serial interface */
1471 #ifdef EPICMEDIADEBUG
1472 printf("%s: serial mode\n", ifp->if_xname);
1473 #endif
1474 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1475 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1476 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1477
1478 /* There is no driver to fill this */
1479 mii->mii_media_active = media;
1480 mii->mii_media_status = 0;
1481
1482 epic_statchg(&sc->sc_dev);
1483 return (0);
1484 }
1485
1486 /* Lookup selected PHY */
1487 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1488 miisc = LIST_NEXT(miisc, mii_list)) {
1489 if (IFM_INST(media) == miisc->mii_inst)
1490 break;
1491 }
1492 if (!miisc) {
1493 printf("epic_mediachange: can't happen\n"); /* ??? panic */
1494 return (0);
1495 }
1496 #ifdef EPICMEDIADEBUG
1497 printf("%s: using phy %s\n", ifp->if_xname,
1498 miisc->mii_dev.dv_xname);
1499 #endif
1500
1501 if (miisc->mii_flags & MIIF_HAVEFIBER) {
1502 /* XXX XXX assume it's a Level1 - should check */
1503
1504 /* We have to powerup fiber tranceivers */
1505 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1506 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1507 #ifdef EPICMEDIADEBUG
1508 printf("%s: power up fiber\n", ifp->if_xname);
1509 #endif
1510 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1511 } else {
1512 #ifdef EPICMEDIADEBUG
1513 printf("%s: power down fiber\n", ifp->if_xname);
1514 #endif
1515 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1516 }
1517 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1518 }
1519
1520 return (0);
1521 }
1522