smc83c170.c revision 1.37 1 /* $NetBSD: smc83c170.c,v 1.37 2000/11/08 15:20:29 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #endif
73
74 #ifdef NS
75 #include <netns/ns.h>
76 #include <netns/ns_if.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/mii/miivar.h>
83
84 #include <dev/ic/smc83c170reg.h>
85 #include <dev/ic/smc83c170var.h>
86
87 void epic_start __P((struct ifnet *));
88 void epic_watchdog __P((struct ifnet *));
89 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
90 int epic_init __P((struct ifnet *));
91 void epic_stop __P((struct ifnet *, int));
92
93 void epic_shutdown __P((void *));
94
95 void epic_reset __P((struct epic_softc *));
96 void epic_rxdrain __P((struct epic_softc *));
97 int epic_add_rxbuf __P((struct epic_softc *, int));
98 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
99 void epic_set_mchash __P((struct epic_softc *));
100 void epic_fixup_clock_source __P((struct epic_softc *));
101 int epic_mii_read __P((struct device *, int, int));
102 void epic_mii_write __P((struct device *, int, int, int));
103 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
104 void epic_tick __P((void *));
105
106 void epic_statchg __P((struct device *));
107 int epic_mediachange __P((struct ifnet *));
108 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
109
110 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
111 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
112
113 int epic_copy_small = 0;
114
115 /*
116 * Attach an EPIC interface to the system.
117 */
118 void
119 epic_attach(sc)
120 struct epic_softc *sc;
121 {
122 bus_space_tag_t st = sc->sc_st;
123 bus_space_handle_t sh = sc->sc_sh;
124 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
125 int i, rseg, error;
126 bus_dma_segment_t seg;
127 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
128 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
129
130 callout_init(&sc->sc_mii_callout);
131
132 /*
133 * Allocate the control data structures, and create and load the
134 * DMA map for it.
135 */
136 if ((error = bus_dmamem_alloc(sc->sc_dmat,
137 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
138 BUS_DMA_NOWAIT)) != 0) {
139 printf("%s: unable to allocate control data, error = %d\n",
140 sc->sc_dev.dv_xname, error);
141 goto fail_0;
142 }
143
144 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
145 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
147 printf("%s: unable to map control data, error = %d\n",
148 sc->sc_dev.dv_xname, error);
149 goto fail_1;
150 }
151
152 if ((error = bus_dmamap_create(sc->sc_dmat,
153 sizeof(struct epic_control_data), 1,
154 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
155 &sc->sc_cddmamap)) != 0) {
156 printf("%s: unable to create control data DMA map, "
157 "error = %d\n", sc->sc_dev.dv_xname, error);
158 goto fail_2;
159 }
160
161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
162 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
163 BUS_DMA_NOWAIT)) != 0) {
164 printf("%s: unable to load control data DMA map, error = %d\n",
165 sc->sc_dev.dv_xname, error);
166 goto fail_3;
167 }
168
169 /*
170 * Create the transmit buffer DMA maps.
171 */
172 for (i = 0; i < EPIC_NTXDESC; i++) {
173 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
174 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
175 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
176 printf("%s: unable to create tx DMA map %d, "
177 "error = %d\n", sc->sc_dev.dv_xname, i, error);
178 goto fail_4;
179 }
180 }
181
182 /*
183 * Create the recieve buffer DMA maps.
184 */
185 for (i = 0; i < EPIC_NRXDESC; i++) {
186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
187 MCLBYTES, 0, BUS_DMA_NOWAIT,
188 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
189 printf("%s: unable to create rx DMA map %d, "
190 "error = %d\n", sc->sc_dev.dv_xname, i, error);
191 goto fail_5;
192 }
193 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
194 }
195
196
197 /*
198 * Bring the chip out of low-power mode and reset it to a known state.
199 */
200 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
201 epic_reset(sc);
202
203 /*
204 * Read the Ethernet address from the EEPROM.
205 */
206 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
207 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
208 enaddr[i * 2] = myea[i] & 0xff;
209 enaddr[i * 2 + 1] = myea[i] >> 8;
210 }
211
212 /*
213 * ...and the device name.
214 */
215 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
216 mydevname);
217 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
218 devname[i * 2] = mydevname[i] & 0xff;
219 devname[i * 2 + 1] = mydevname[i] >> 8;
220 }
221
222 devname[sizeof(mydevname)] = '\0';
223 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
224 if (devname[i] == ' ')
225 devname[i] = '\0';
226 else
227 break;
228 }
229
230 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
231 devname, ether_sprintf(enaddr));
232
233 /*
234 * Initialize our media structures and probe the MII.
235 */
236 sc->sc_mii.mii_ifp = ifp;
237 sc->sc_mii.mii_readreg = epic_mii_read;
238 sc->sc_mii.mii_writereg = epic_mii_write;
239 sc->sc_mii.mii_statchg = epic_statchg;
240 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
241 epic_mediastatus);
242 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
243 MII_OFFSET_ANY, 0);
244 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
245 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
246 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
247 } else
248 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
249
250 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
251 ifp->if_softc = sc;
252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
253 ifp->if_ioctl = epic_ioctl;
254 ifp->if_start = epic_start;
255 ifp->if_watchdog = epic_watchdog;
256 ifp->if_init = epic_init;
257 ifp->if_stop = epic_stop;
258
259 /*
260 * We can support 802.1Q VLAN-sized frames.
261 */
262 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
263
264 /*
265 * Attach the interface.
266 */
267 if_attach(ifp);
268 ether_ifattach(ifp, enaddr);
269 #if NBPFILTER > 0
270 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
271 sizeof(struct ether_header));
272 #endif
273
274 /*
275 * Make sure the interface is shutdown during reboot.
276 */
277 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
278 if (sc->sc_sdhook == NULL)
279 printf("%s: WARNING: unable to establish shutdown hook\n",
280 sc->sc_dev.dv_xname);
281 return;
282
283 /*
284 * Free any resources we've allocated during the failed attach
285 * attempt. Do this in reverse order and fall through.
286 */
287 fail_5:
288 for (i = 0; i < EPIC_NRXDESC; i++) {
289 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
290 bus_dmamap_destroy(sc->sc_dmat,
291 EPIC_DSRX(sc, i)->ds_dmamap);
292 }
293 fail_4:
294 for (i = 0; i < EPIC_NTXDESC; i++) {
295 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
296 bus_dmamap_destroy(sc->sc_dmat,
297 EPIC_DSTX(sc, i)->ds_dmamap);
298 }
299 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
300 fail_3:
301 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
302 fail_2:
303 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
304 sizeof(struct epic_control_data));
305 fail_1:
306 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
307 fail_0:
308 return;
309 }
310
311 /*
312 * Shutdown hook. Make sure the interface is stopped at reboot.
313 */
314 void
315 epic_shutdown(arg)
316 void *arg;
317 {
318 struct epic_softc *sc = arg;
319
320 epic_stop(&sc->sc_ethercom.ec_if, 1);
321 }
322
323 /*
324 * Start packet transmission on the interface.
325 * [ifnet interface function]
326 */
327 void
328 epic_start(ifp)
329 struct ifnet *ifp;
330 {
331 struct epic_softc *sc = ifp->if_softc;
332 struct mbuf *m0, *m;
333 struct epic_txdesc *txd;
334 struct epic_descsoft *ds;
335 struct epic_fraglist *fr;
336 bus_dmamap_t dmamap;
337 int error, firsttx, nexttx, opending, seg;
338
339 /*
340 * Remember the previous txpending and the first transmit
341 * descriptor we use.
342 */
343 opending = sc->sc_txpending;
344 firsttx = EPIC_NEXTTX(sc->sc_txlast);
345
346 /*
347 * Loop through the send queue, setting up transmit descriptors
348 * until we drain the queue, or use up all available transmit
349 * descriptors.
350 */
351 while (sc->sc_txpending < EPIC_NTXDESC) {
352 /*
353 * Grab a packet off the queue.
354 */
355 IF_DEQUEUE(&ifp->if_snd, m0);
356 if (m0 == NULL)
357 break;
358
359 /*
360 * Get the last and next available transmit descriptor.
361 */
362 nexttx = EPIC_NEXTTX(sc->sc_txlast);
363 txd = EPIC_CDTX(sc, nexttx);
364 fr = EPIC_CDFL(sc, nexttx);
365 ds = EPIC_DSTX(sc, nexttx);
366 dmamap = ds->ds_dmamap;
367
368 /*
369 * Load the DMA map. If this fails, the packet either
370 * didn't fit in the alloted number of frags, or we were
371 * short on resources. In this case, we'll copy and try
372 * again.
373 */
374 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
375 BUS_DMA_NOWAIT) != 0) {
376 MGETHDR(m, M_DONTWAIT, MT_DATA);
377 if (m == NULL) {
378 printf("%s: unable to allocate Tx mbuf\n",
379 sc->sc_dev.dv_xname);
380 IF_PREPEND(&ifp->if_snd, m0);
381 break;
382 }
383 if (m0->m_pkthdr.len > MHLEN) {
384 MCLGET(m, M_DONTWAIT);
385 if ((m->m_flags & M_EXT) == 0) {
386 printf("%s: unable to allocate Tx "
387 "cluster\n", sc->sc_dev.dv_xname);
388 m_freem(m);
389 IF_PREPEND(&ifp->if_snd, m0);
390 break;
391 }
392 }
393 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
394 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
395 m_freem(m0);
396 m0 = m;
397 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
398 m0, BUS_DMA_NOWAIT);
399 if (error) {
400 printf("%s: unable to load Tx buffer, "
401 "error = %d\n", sc->sc_dev.dv_xname, error);
402 IF_PREPEND(&ifp->if_snd, m0);
403 break;
404 }
405 }
406
407 /* Initialize the fraglist. */
408 fr->ef_nfrags = dmamap->dm_nsegs;
409 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
410 fr->ef_frags[seg].ef_addr =
411 dmamap->dm_segs[seg].ds_addr;
412 fr->ef_frags[seg].ef_length =
413 dmamap->dm_segs[seg].ds_len;
414 }
415
416 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
417
418 /* Sync the DMA map. */
419 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
420 BUS_DMASYNC_PREWRITE);
421
422 /*
423 * Store a pointer to the packet so we can free it later.
424 */
425 ds->ds_mbuf = m0;
426
427 /*
428 * Fill in the transmit descriptor. The EPIC doesn't
429 * auto-pad, so we have to do this ourselves.
430 */
431 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
432 txd->et_txlength = max(m0->m_pkthdr.len,
433 ETHER_MIN_LEN - ETHER_CRC_LEN);
434
435 /*
436 * If this is the first descriptor we're enqueueing,
437 * don't give it to the EPIC yet. That could cause
438 * a race condition. We'll do it below.
439 */
440 if (nexttx == firsttx)
441 txd->et_txstatus = 0;
442 else
443 txd->et_txstatus = ET_TXSTAT_OWNER;
444
445 EPIC_CDTXSYNC(sc, nexttx,
446 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
447
448 /* Advance the tx pointer. */
449 sc->sc_txpending++;
450 sc->sc_txlast = nexttx;
451
452 #if NBPFILTER > 0
453 /*
454 * Pass the packet to any BPF listeners.
455 */
456 if (ifp->if_bpf)
457 bpf_mtap(ifp->if_bpf, m0);
458 #endif
459 }
460
461 if (sc->sc_txpending == EPIC_NTXDESC) {
462 /* No more slots left; notify upper layer. */
463 ifp->if_flags |= IFF_OACTIVE;
464 }
465
466 if (sc->sc_txpending != opending) {
467 /*
468 * We enqueued packets. If the transmitter was idle,
469 * reset the txdirty pointer.
470 */
471 if (opending == 0)
472 sc->sc_txdirty = firsttx;
473
474 /*
475 * Cause a transmit interrupt to happen on the
476 * last packet we enqueued.
477 */
478 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
479 EPIC_CDTXSYNC(sc, sc->sc_txlast,
480 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
481
482 /*
483 * The entire packet chain is set up. Give the
484 * first descriptor to the EPIC now.
485 */
486 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
487 EPIC_CDTXSYNC(sc, firsttx,
488 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
489
490 /* Start the transmitter. */
491 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
492 COMMAND_TXQUEUED);
493
494 /* Set a watchdog timer in case the chip flakes out. */
495 ifp->if_timer = 5;
496 }
497 }
498
499 /*
500 * Watchdog timer handler.
501 * [ifnet interface function]
502 */
503 void
504 epic_watchdog(ifp)
505 struct ifnet *ifp;
506 {
507 struct epic_softc *sc = ifp->if_softc;
508
509 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
510 ifp->if_oerrors++;
511
512 (void) epic_init(ifp);
513 }
514
515 /*
516 * Handle control requests from the operator.
517 * [ifnet interface function]
518 */
519 int
520 epic_ioctl(ifp, cmd, data)
521 struct ifnet *ifp;
522 u_long cmd;
523 caddr_t data;
524 {
525 struct epic_softc *sc = ifp->if_softc;
526 struct ifreq *ifr = (struct ifreq *)data;
527 int s, error;
528
529 s = splnet();
530
531 switch (cmd) {
532 case SIOCSIFMEDIA:
533 case SIOCGIFMEDIA:
534 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
535 break;
536
537 default:
538 error = ether_ioctl(ifp, cmd, data);
539 if (error == ENETRESET) {
540 /*
541 * Multicast list has changed; set the hardware filter
542 * accordingly. Update our idea of the current media;
543 * epic_set_mchash() needs to know what it is.
544 */
545 mii_pollstat(&sc->sc_mii);
546 epic_set_mchash(sc);
547 error = 0;
548 }
549 break;
550 }
551
552 splx(s);
553 return (error);
554 }
555
556 /*
557 * Interrupt handler.
558 */
559 int
560 epic_intr(arg)
561 void *arg;
562 {
563 struct epic_softc *sc = arg;
564 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
565 struct epic_rxdesc *rxd;
566 struct epic_txdesc *txd;
567 struct epic_descsoft *ds;
568 struct mbuf *m;
569 u_int32_t intstat;
570 int i, len, claimed = 0;
571
572 top:
573 /*
574 * Get the interrupt status from the EPIC.
575 */
576 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
577 if ((intstat & INTSTAT_INT_ACTV) == 0)
578 return (claimed);
579
580 claimed = 1;
581
582 /*
583 * Acknowledge the interrupt.
584 */
585 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
586 intstat & INTMASK);
587
588 /*
589 * Check for receive interrupts.
590 */
591 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
592 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
593 rxd = EPIC_CDRX(sc, i);
594 ds = EPIC_DSRX(sc, i);
595
596 EPIC_CDRXSYNC(sc, i,
597 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
598
599 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
600 /*
601 * We have processed all of the
602 * receive buffers.
603 */
604 break;
605 }
606
607 /*
608 * Make sure the packet arrived intact. If an error
609 * occurred, update stats and reset the descriptor.
610 * The buffer will be reused the next time the
611 * descriptor comes up in the ring.
612 */
613 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
614 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
615 printf("%s: CRC error\n",
616 sc->sc_dev.dv_xname);
617 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
618 printf("%s: alignment error\n",
619 sc->sc_dev.dv_xname);
620 ifp->if_ierrors++;
621 EPIC_INIT_RXDESC(sc, i);
622 continue;
623 }
624
625 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
626 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
627
628 /*
629 * The EPIC includes the CRC with every packet.
630 */
631 len = rxd->er_rxlength;
632
633 if (len < sizeof(struct ether_header)) {
634 /*
635 * Runt packet; drop it now.
636 */
637 ifp->if_ierrors++;
638 EPIC_INIT_RXDESC(sc, i);
639 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
640 ds->ds_dmamap->dm_mapsize,
641 BUS_DMASYNC_PREREAD);
642 continue;
643 }
644
645 /*
646 * If the packet is small enough to fit in a
647 * single header mbuf, allocate one and copy
648 * the data into it. This greatly reduces
649 * memory consumption when we receive lots
650 * of small packets.
651 *
652 * Otherwise, we add a new buffer to the receive
653 * chain. If this fails, we drop the packet and
654 * recycle the old buffer.
655 */
656 if (epic_copy_small != 0 && len <= MHLEN) {
657 MGETHDR(m, M_DONTWAIT, MT_DATA);
658 if (m == NULL)
659 goto dropit;
660 memcpy(mtod(m, caddr_t),
661 mtod(ds->ds_mbuf, caddr_t), len);
662 EPIC_INIT_RXDESC(sc, i);
663 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
664 ds->ds_dmamap->dm_mapsize,
665 BUS_DMASYNC_PREREAD);
666 } else {
667 m = ds->ds_mbuf;
668 if (epic_add_rxbuf(sc, i) != 0) {
669 dropit:
670 ifp->if_ierrors++;
671 EPIC_INIT_RXDESC(sc, i);
672 bus_dmamap_sync(sc->sc_dmat,
673 ds->ds_dmamap, 0,
674 ds->ds_dmamap->dm_mapsize,
675 BUS_DMASYNC_PREREAD);
676 continue;
677 }
678 }
679
680 m->m_flags |= M_HASFCS;
681 m->m_pkthdr.rcvif = ifp;
682 m->m_pkthdr.len = m->m_len = len;
683
684 #if NBPFILTER > 0
685 /*
686 * Pass this up to any BPF listeners, but only
687 * pass it up the stack if its for us.
688 */
689 if (ifp->if_bpf)
690 bpf_mtap(ifp->if_bpf, m);
691 #endif
692
693 /* Pass it on. */
694 (*ifp->if_input)(ifp, m);
695 ifp->if_ipackets++;
696 }
697
698 /* Update the recieve pointer. */
699 sc->sc_rxptr = i;
700
701 /*
702 * Check for receive queue underflow.
703 */
704 if (intstat & INTSTAT_RQE) {
705 printf("%s: receiver queue empty\n",
706 sc->sc_dev.dv_xname);
707 /*
708 * Ring is already built; just restart the
709 * receiver.
710 */
711 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
712 EPIC_CDRXADDR(sc, sc->sc_rxptr));
713 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
714 COMMAND_RXQUEUED | COMMAND_START_RX);
715 }
716 }
717
718 /*
719 * Check for transmission complete interrupts.
720 */
721 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
722 ifp->if_flags &= ~IFF_OACTIVE;
723 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
724 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
725 txd = EPIC_CDTX(sc, i);
726 ds = EPIC_DSTX(sc, i);
727
728 EPIC_CDTXSYNC(sc, i,
729 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
730
731 if (txd->et_txstatus & ET_TXSTAT_OWNER)
732 break;
733
734 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
735
736 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
737 0, ds->ds_dmamap->dm_mapsize,
738 BUS_DMASYNC_POSTWRITE);
739 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
740 m_freem(ds->ds_mbuf);
741 ds->ds_mbuf = NULL;
742
743 /*
744 * Check for errors and collisions.
745 */
746 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
747 ifp->if_oerrors++;
748 else
749 ifp->if_opackets++;
750 ifp->if_collisions +=
751 TXSTAT_COLLISIONS(txd->et_txstatus);
752 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
753 printf("%s: lost carrier\n",
754 sc->sc_dev.dv_xname);
755 }
756
757 /* Update the dirty transmit buffer pointer. */
758 sc->sc_txdirty = i;
759
760 /*
761 * Cancel the watchdog timer if there are no pending
762 * transmissions.
763 */
764 if (sc->sc_txpending == 0)
765 ifp->if_timer = 0;
766
767 /*
768 * Kick the transmitter after a DMA underrun.
769 */
770 if (intstat & INTSTAT_TXU) {
771 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
772 bus_space_write_4(sc->sc_st, sc->sc_sh,
773 EPIC_COMMAND, COMMAND_TXUGO);
774 if (sc->sc_txpending)
775 bus_space_write_4(sc->sc_st, sc->sc_sh,
776 EPIC_COMMAND, COMMAND_TXQUEUED);
777 }
778
779 /*
780 * Try to get more packets going.
781 */
782 epic_start(ifp);
783 }
784
785 /*
786 * Check for fatal interrupts.
787 */
788 if (intstat & INTSTAT_FATAL_INT) {
789 if (intstat & INTSTAT_PTA)
790 printf("%s: PCI target abort error\n",
791 sc->sc_dev.dv_xname);
792 else if (intstat & INTSTAT_PMA)
793 printf("%s: PCI master abort error\n",
794 sc->sc_dev.dv_xname);
795 else if (intstat & INTSTAT_APE)
796 printf("%s: PCI address parity error\n",
797 sc->sc_dev.dv_xname);
798 else if (intstat & INTSTAT_DPE)
799 printf("%s: PCI data parity error\n",
800 sc->sc_dev.dv_xname);
801 else
802 printf("%s: unknown fatal error\n",
803 sc->sc_dev.dv_xname);
804 (void) epic_init(ifp);
805 }
806
807 /*
808 * Check for more interrupts.
809 */
810 goto top;
811 }
812
813 /*
814 * One second timer, used to tick the MII.
815 */
816 void
817 epic_tick(arg)
818 void *arg;
819 {
820 struct epic_softc *sc = arg;
821 int s;
822
823 s = splnet();
824 mii_tick(&sc->sc_mii);
825 splx(s);
826
827 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
828 }
829
830 /*
831 * Fixup the clock source on the EPIC.
832 */
833 void
834 epic_fixup_clock_source(sc)
835 struct epic_softc *sc;
836 {
837 int i;
838
839 /*
840 * According to SMC Application Note 7-15, the EPIC's clock
841 * source is incorrect following a reset. This manifests itself
842 * as failure to recognize when host software has written to
843 * a register on the EPIC. The appnote recommends issuing at
844 * least 16 consecutive writes to the CLOCK TEST bit to correctly
845 * configure the clock source.
846 */
847 for (i = 0; i < 16; i++)
848 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
849 TEST_CLOCKTEST);
850 }
851
852 /*
853 * Perform a soft reset on the EPIC.
854 */
855 void
856 epic_reset(sc)
857 struct epic_softc *sc;
858 {
859
860 epic_fixup_clock_source(sc);
861
862 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
863 delay(100);
864 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
865 delay(100);
866
867 epic_fixup_clock_source(sc);
868 }
869
870 /*
871 * Initialize the interface. Must be called at splnet().
872 */
873 int
874 epic_init(ifp)
875 struct ifnet *ifp;
876 {
877 struct epic_softc *sc = ifp->if_softc;
878 bus_space_tag_t st = sc->sc_st;
879 bus_space_handle_t sh = sc->sc_sh;
880 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
881 struct epic_txdesc *txd;
882 struct epic_descsoft *ds;
883 u_int32_t genctl, reg0;
884 int i, error = 0;
885
886 /*
887 * Cancel any pending I/O.
888 */
889 epic_stop(ifp, 0);
890
891 /*
892 * Reset the EPIC to a known state.
893 */
894 epic_reset(sc);
895
896 /*
897 * Magical mystery initialization.
898 */
899 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
900
901 /*
902 * Initialize the EPIC genctl register:
903 *
904 * - 64 byte receive FIFO threshold
905 * - automatic advance to next receive frame
906 */
907 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
908 #if BYTE_ORDER == BIG_ENDIAN
909 genctl |= GENCTL_BIG_ENDIAN;
910 #endif
911 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
912
913 /*
914 * Reset the MII bus and PHY.
915 */
916 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
917 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
918 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
919 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
920 delay(100);
921 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
922 delay(100);
923 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
924
925 /*
926 * Initialize Ethernet address.
927 */
928 reg0 = enaddr[1] << 8 | enaddr[0];
929 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
930 reg0 = enaddr[3] << 8 | enaddr[2];
931 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
932 reg0 = enaddr[5] << 8 | enaddr[4];
933 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
934
935 /*
936 * Initialize receive control. Remember the external buffer
937 * size setting.
938 */
939 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
940 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
941 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
942 if (ifp->if_flags & IFF_PROMISC)
943 reg0 |= RXCON_PROMISCMODE;
944 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
945
946 /* Set the current media. */
947 mii_mediachg(&sc->sc_mii);
948
949 /* Set up the multicast hash table. */
950 epic_set_mchash(sc);
951
952 /*
953 * Initialize the transmit descriptor ring. txlast is initialized
954 * to the end of the list so that it will wrap around to the first
955 * descriptor when the first packet is transmitted.
956 */
957 for (i = 0; i < EPIC_NTXDESC; i++) {
958 txd = EPIC_CDTX(sc, i);
959 memset(txd, 0, sizeof(struct epic_txdesc));
960 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
961 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
962 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
963 }
964 sc->sc_txpending = 0;
965 sc->sc_txdirty = 0;
966 sc->sc_txlast = EPIC_NTXDESC - 1;
967
968 /*
969 * Initialize the receive descriptor ring.
970 */
971 for (i = 0; i < EPIC_NRXDESC; i++) {
972 ds = EPIC_DSRX(sc, i);
973 if (ds->ds_mbuf == NULL) {
974 if ((error = epic_add_rxbuf(sc, i)) != 0) {
975 printf("%s: unable to allocate or map rx "
976 "buffer %d error = %d\n",
977 sc->sc_dev.dv_xname, i, error);
978 /*
979 * XXX Should attempt to run with fewer receive
980 * XXX buffers instead of just failing.
981 */
982 epic_rxdrain(sc);
983 goto out;
984 }
985 }
986 }
987 sc->sc_rxptr = 0;
988
989 /*
990 * Initialize the interrupt mask and enable interrupts.
991 */
992 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
993 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
994
995 /*
996 * Give the transmit and receive rings to the EPIC.
997 */
998 bus_space_write_4(st, sh, EPIC_PTCDAR,
999 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1000 bus_space_write_4(st, sh, EPIC_PRCDAR,
1001 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1002
1003 /*
1004 * Set the EPIC in motion.
1005 */
1006 bus_space_write_4(st, sh, EPIC_COMMAND,
1007 COMMAND_RXQUEUED | COMMAND_START_RX);
1008
1009 /*
1010 * ...all done!
1011 */
1012 ifp->if_flags |= IFF_RUNNING;
1013 ifp->if_flags &= ~IFF_OACTIVE;
1014
1015 /*
1016 * Start the one second clock.
1017 */
1018 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1019
1020 /*
1021 * Attempt to start output on the interface.
1022 */
1023 epic_start(ifp);
1024
1025 out:
1026 if (error)
1027 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1028 return (error);
1029 }
1030
1031 /*
1032 * Drain the receive queue.
1033 */
1034 void
1035 epic_rxdrain(sc)
1036 struct epic_softc *sc;
1037 {
1038 struct epic_descsoft *ds;
1039 int i;
1040
1041 for (i = 0; i < EPIC_NRXDESC; i++) {
1042 ds = EPIC_DSRX(sc, i);
1043 if (ds->ds_mbuf != NULL) {
1044 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1045 m_freem(ds->ds_mbuf);
1046 ds->ds_mbuf = NULL;
1047 }
1048 }
1049 }
1050
1051 /*
1052 * Stop transmission on the interface.
1053 */
1054 void
1055 epic_stop(ifp, disable)
1056 struct ifnet *ifp;
1057 int disable;
1058 {
1059 struct epic_softc *sc = ifp->if_softc;
1060 bus_space_tag_t st = sc->sc_st;
1061 bus_space_handle_t sh = sc->sc_sh;
1062 struct epic_descsoft *ds;
1063 u_int32_t reg;
1064 int i;
1065
1066 /*
1067 * Stop the one second clock.
1068 */
1069 callout_stop(&sc->sc_mii_callout);
1070
1071 /* Down the MII. */
1072 mii_down(&sc->sc_mii);
1073
1074 /* Paranoia... */
1075 epic_fixup_clock_source(sc);
1076
1077 /*
1078 * Disable interrupts.
1079 */
1080 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1081 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1082 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1083
1084 /*
1085 * Stop the DMA engine and take the receiver off-line.
1086 */
1087 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1088 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1089
1090 /*
1091 * Release any queued transmit buffers.
1092 */
1093 for (i = 0; i < EPIC_NTXDESC; i++) {
1094 ds = EPIC_DSTX(sc, i);
1095 if (ds->ds_mbuf != NULL) {
1096 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1097 m_freem(ds->ds_mbuf);
1098 ds->ds_mbuf = NULL;
1099 }
1100 }
1101
1102 if (disable)
1103 epic_rxdrain(sc);
1104
1105 /*
1106 * Mark the interface down and cancel the watchdog timer.
1107 */
1108 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1109 ifp->if_timer = 0;
1110 }
1111
1112 /*
1113 * Read the EPIC Serial EEPROM.
1114 */
1115 void
1116 epic_read_eeprom(sc, word, wordcnt, data)
1117 struct epic_softc *sc;
1118 int word, wordcnt;
1119 u_int16_t *data;
1120 {
1121 bus_space_tag_t st = sc->sc_st;
1122 bus_space_handle_t sh = sc->sc_sh;
1123 u_int16_t reg;
1124 int i, x;
1125
1126 #define EEPROM_WAIT_READY(st, sh) \
1127 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1128 /* nothing */
1129
1130 /*
1131 * Enable the EEPROM.
1132 */
1133 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1134 EEPROM_WAIT_READY(st, sh);
1135
1136 for (i = 0; i < wordcnt; i++) {
1137 /* Send CHIP SELECT for one clock tick. */
1138 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1139 EEPROM_WAIT_READY(st, sh);
1140
1141 /* Shift in the READ opcode. */
1142 for (x = 3; x > 0; x--) {
1143 reg = EECTL_ENABLE|EECTL_EECS;
1144 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1145 reg |= EECTL_EEDI;
1146 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1147 EEPROM_WAIT_READY(st, sh);
1148 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1149 EEPROM_WAIT_READY(st, sh);
1150 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1151 EEPROM_WAIT_READY(st, sh);
1152 }
1153
1154 /* Shift in address. */
1155 for (x = 6; x > 0; x--) {
1156 reg = EECTL_ENABLE|EECTL_EECS;
1157 if ((word + i) & (1 << (x - 1)))
1158 reg |= EECTL_EEDI;
1159 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1160 EEPROM_WAIT_READY(st, sh);
1161 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1162 EEPROM_WAIT_READY(st, sh);
1163 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1164 EEPROM_WAIT_READY(st, sh);
1165 }
1166
1167 /* Shift out data. */
1168 reg = EECTL_ENABLE|EECTL_EECS;
1169 data[i] = 0;
1170 for (x = 16; x > 0; x--) {
1171 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1172 EEPROM_WAIT_READY(st, sh);
1173 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1174 data[i] |= (1 << (x - 1));
1175 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1176 EEPROM_WAIT_READY(st, sh);
1177 }
1178
1179 /* Clear CHIP SELECT. */
1180 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1181 EEPROM_WAIT_READY(st, sh);
1182 }
1183
1184 /*
1185 * Disable the EEPROM.
1186 */
1187 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1188
1189 #undef EEPROM_WAIT_READY
1190 }
1191
1192 /*
1193 * Add a receive buffer to the indicated descriptor.
1194 */
1195 int
1196 epic_add_rxbuf(sc, idx)
1197 struct epic_softc *sc;
1198 int idx;
1199 {
1200 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1201 struct mbuf *m;
1202 int error;
1203
1204 MGETHDR(m, M_DONTWAIT, MT_DATA);
1205 if (m == NULL)
1206 return (ENOBUFS);
1207
1208 MCLGET(m, M_DONTWAIT);
1209 if ((m->m_flags & M_EXT) == 0) {
1210 m_freem(m);
1211 return (ENOBUFS);
1212 }
1213
1214 if (ds->ds_mbuf != NULL)
1215 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1216
1217 ds->ds_mbuf = m;
1218
1219 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1220 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1221 if (error) {
1222 printf("%s: can't load rx DMA map %d, error = %d\n",
1223 sc->sc_dev.dv_xname, idx, error);
1224 panic("epic_add_rxbuf"); /* XXX */
1225 }
1226
1227 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1228 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1229
1230 EPIC_INIT_RXDESC(sc, idx);
1231
1232 return (0);
1233 }
1234
1235 /*
1236 * Set the EPIC multicast hash table.
1237 *
1238 * NOTE: We rely on a recently-updated mii_media_active here!
1239 */
1240 void
1241 epic_set_mchash(sc)
1242 struct epic_softc *sc;
1243 {
1244 struct ethercom *ec = &sc->sc_ethercom;
1245 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1246 struct ether_multi *enm;
1247 struct ether_multistep step;
1248 u_int32_t hash, mchash[4];
1249
1250 /*
1251 * Set up the multicast address filter by passing all multicast
1252 * addresses through a CRC generator, and then using the low-order
1253 * 6 bits as an index into the 64 bit multicast hash table (only
1254 * the lower 16 bits of each 32 bit multicast hash register are
1255 * valid). The high order bits select the register, while the
1256 * rest of the bits select the bit within the register.
1257 */
1258
1259 if (ifp->if_flags & IFF_PROMISC)
1260 goto allmulti;
1261
1262 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1263 /* XXX hardware bug in 10Mbps mode. */
1264 goto allmulti;
1265 }
1266
1267 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1268
1269 ETHER_FIRST_MULTI(step, ec, enm);
1270 while (enm != NULL) {
1271 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1272 /*
1273 * We must listen to a range of multicast addresses.
1274 * For now, just accept all multicasts, rather than
1275 * trying to set only those filter bits needed to match
1276 * the range. (At this time, the only use of address
1277 * ranges is for IP multicast routing, for which the
1278 * range is big enough to require all bits set.)
1279 */
1280 goto allmulti;
1281 }
1282
1283 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1284 hash >>= 26;
1285
1286 /* Set the corresponding bit in the hash table. */
1287 mchash[hash >> 4] |= 1 << (hash & 0xf);
1288
1289 ETHER_NEXT_MULTI(step, enm);
1290 }
1291
1292 ifp->if_flags &= ~IFF_ALLMULTI;
1293 goto sethash;
1294
1295 allmulti:
1296 ifp->if_flags |= IFF_ALLMULTI;
1297 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1298
1299 sethash:
1300 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1301 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1302 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1303 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1304 }
1305
1306 /*
1307 * Wait for the MII to become ready.
1308 */
1309 int
1310 epic_mii_wait(sc, rw)
1311 struct epic_softc *sc;
1312 u_int32_t rw;
1313 {
1314 int i;
1315
1316 for (i = 0; i < 50; i++) {
1317 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1318 == 0)
1319 break;
1320 delay(2);
1321 }
1322 if (i == 50) {
1323 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1324 return (1);
1325 }
1326
1327 return (0);
1328 }
1329
1330 /*
1331 * Read from the MII.
1332 */
1333 int
1334 epic_mii_read(self, phy, reg)
1335 struct device *self;
1336 int phy, reg;
1337 {
1338 struct epic_softc *sc = (struct epic_softc *)self;
1339
1340 if (epic_mii_wait(sc, MMCTL_WRITE))
1341 return (0);
1342
1343 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1344 MMCTL_ARG(phy, reg, MMCTL_READ));
1345
1346 if (epic_mii_wait(sc, MMCTL_READ))
1347 return (0);
1348
1349 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1350 MMDATA_MASK);
1351 }
1352
1353 /*
1354 * Write to the MII.
1355 */
1356 void
1357 epic_mii_write(self, phy, reg, val)
1358 struct device *self;
1359 int phy, reg, val;
1360 {
1361 struct epic_softc *sc = (struct epic_softc *)self;
1362
1363 if (epic_mii_wait(sc, MMCTL_WRITE))
1364 return;
1365
1366 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1367 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1368 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1369 }
1370
1371 /*
1372 * Callback from PHY when media changes.
1373 */
1374 void
1375 epic_statchg(self)
1376 struct device *self;
1377 {
1378 struct epic_softc *sc = (struct epic_softc *)self;
1379 u_int32_t txcon;
1380
1381 /*
1382 * Update loopback bits in TXCON to reflect duplex mode.
1383 */
1384 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1385 if (sc->sc_mii.mii_media_active & IFM_FDX)
1386 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1387 else
1388 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1389 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1390
1391 /*
1392 * There is a multicast filter bug in 10Mbps mode. Kick the
1393 * multicast filter in case the speed changed.
1394 */
1395 epic_set_mchash(sc);
1396 }
1397
1398 /*
1399 * Callback from ifmedia to request current media status.
1400 */
1401 void
1402 epic_mediastatus(ifp, ifmr)
1403 struct ifnet *ifp;
1404 struct ifmediareq *ifmr;
1405 {
1406 struct epic_softc *sc = ifp->if_softc;
1407
1408 mii_pollstat(&sc->sc_mii);
1409 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1410 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1411 }
1412
1413 /*
1414 * Callback from ifmedia to request new media setting.
1415 */
1416 int
1417 epic_mediachange(ifp)
1418 struct ifnet *ifp;
1419 {
1420 struct epic_softc *sc = ifp->if_softc;
1421
1422 if (ifp->if_flags & IFF_UP)
1423 mii_mediachg(&sc->sc_mii);
1424 return (0);
1425 }
1426