smc83c170.c revision 1.39 1 /* $NetBSD: smc83c170.c,v 1.39 2000/11/15 01:02:17 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_dl.h>
64 #include <net/if_media.h>
65 #include <net/if_ether.h>
66
67 #if NBPFILTER > 0
68 #include <net/bpf.h>
69 #endif
70
71 #ifdef INET
72 #include <netinet/in.h>
73 #include <netinet/if_inarp.h>
74 #endif
75
76 #ifdef NS
77 #include <netns/ns.h>
78 #include <netns/ns_if.h>
79 #endif
80
81 #include <machine/bus.h>
82 #include <machine/intr.h>
83
84 #include <dev/mii/miivar.h>
85
86 #include <dev/ic/smc83c170reg.h>
87 #include <dev/ic/smc83c170var.h>
88
89 void epic_start __P((struct ifnet *));
90 void epic_watchdog __P((struct ifnet *));
91 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
92 int epic_init __P((struct ifnet *));
93 void epic_stop __P((struct ifnet *, int));
94
95 void epic_shutdown __P((void *));
96
97 void epic_reset __P((struct epic_softc *));
98 void epic_rxdrain __P((struct epic_softc *));
99 int epic_add_rxbuf __P((struct epic_softc *, int));
100 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
101 void epic_set_mchash __P((struct epic_softc *));
102 void epic_fixup_clock_source __P((struct epic_softc *));
103 int epic_mii_read __P((struct device *, int, int));
104 void epic_mii_write __P((struct device *, int, int, int));
105 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
106 void epic_tick __P((void *));
107
108 void epic_statchg __P((struct device *));
109 int epic_mediachange __P((struct ifnet *));
110 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
111
112 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
113 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
114
115 int epic_copy_small = 0;
116
117 /*
118 * Attach an EPIC interface to the system.
119 */
120 void
121 epic_attach(sc)
122 struct epic_softc *sc;
123 {
124 bus_space_tag_t st = sc->sc_st;
125 bus_space_handle_t sh = sc->sc_sh;
126 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
127 int i, rseg, error;
128 bus_dma_segment_t seg;
129 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
130 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
131
132 callout_init(&sc->sc_mii_callout);
133
134 /*
135 * Allocate the control data structures, and create and load the
136 * DMA map for it.
137 */
138 if ((error = bus_dmamem_alloc(sc->sc_dmat,
139 sizeof(struct epic_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
140 BUS_DMA_NOWAIT)) != 0) {
141 printf("%s: unable to allocate control data, error = %d\n",
142 sc->sc_dev.dv_xname, error);
143 goto fail_0;
144 }
145
146 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
147 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
148 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
149 printf("%s: unable to map control data, error = %d\n",
150 sc->sc_dev.dv_xname, error);
151 goto fail_1;
152 }
153
154 if ((error = bus_dmamap_create(sc->sc_dmat,
155 sizeof(struct epic_control_data), 1,
156 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
157 &sc->sc_cddmamap)) != 0) {
158 printf("%s: unable to create control data DMA map, "
159 "error = %d\n", sc->sc_dev.dv_xname, error);
160 goto fail_2;
161 }
162
163 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
164 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
165 BUS_DMA_NOWAIT)) != 0) {
166 printf("%s: unable to load control data DMA map, error = %d\n",
167 sc->sc_dev.dv_xname, error);
168 goto fail_3;
169 }
170
171 /*
172 * Create the transmit buffer DMA maps.
173 */
174 for (i = 0; i < EPIC_NTXDESC; i++) {
175 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
176 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
177 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
178 printf("%s: unable to create tx DMA map %d, "
179 "error = %d\n", sc->sc_dev.dv_xname, i, error);
180 goto fail_4;
181 }
182 }
183
184 /*
185 * Create the recieve buffer DMA maps.
186 */
187 for (i = 0; i < EPIC_NRXDESC; i++) {
188 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
189 MCLBYTES, 0, BUS_DMA_NOWAIT,
190 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
191 printf("%s: unable to create rx DMA map %d, "
192 "error = %d\n", sc->sc_dev.dv_xname, i, error);
193 goto fail_5;
194 }
195 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
196 }
197
198
199 /*
200 * Bring the chip out of low-power mode and reset it to a known state.
201 */
202 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
203 epic_reset(sc);
204
205 /*
206 * Read the Ethernet address from the EEPROM.
207 */
208 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
209 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
210 enaddr[i * 2] = myea[i] & 0xff;
211 enaddr[i * 2 + 1] = myea[i] >> 8;
212 }
213
214 /*
215 * ...and the device name.
216 */
217 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
218 mydevname);
219 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
220 devname[i * 2] = mydevname[i] & 0xff;
221 devname[i * 2 + 1] = mydevname[i] >> 8;
222 }
223
224 devname[sizeof(mydevname)] = '\0';
225 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
226 if (devname[i] == ' ')
227 devname[i] = '\0';
228 else
229 break;
230 }
231
232 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
233 devname, ether_sprintf(enaddr));
234
235 /*
236 * Initialize our media structures and probe the MII.
237 */
238 sc->sc_mii.mii_ifp = ifp;
239 sc->sc_mii.mii_readreg = epic_mii_read;
240 sc->sc_mii.mii_writereg = epic_mii_write;
241 sc->sc_mii.mii_statchg = epic_statchg;
242 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
243 epic_mediastatus);
244 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
245 MII_OFFSET_ANY, 0);
246 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
247 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
248 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
249 } else
250 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
251
252 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
253 ifp->if_softc = sc;
254 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
255 ifp->if_ioctl = epic_ioctl;
256 ifp->if_start = epic_start;
257 ifp->if_watchdog = epic_watchdog;
258 ifp->if_init = epic_init;
259 ifp->if_stop = epic_stop;
260
261 /*
262 * We can support 802.1Q VLAN-sized frames.
263 */
264 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
265
266 /*
267 * Attach the interface.
268 */
269 if_attach(ifp);
270 ether_ifattach(ifp, enaddr);
271
272 /*
273 * Make sure the interface is shutdown during reboot.
274 */
275 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
276 if (sc->sc_sdhook == NULL)
277 printf("%s: WARNING: unable to establish shutdown hook\n",
278 sc->sc_dev.dv_xname);
279 return;
280
281 /*
282 * Free any resources we've allocated during the failed attach
283 * attempt. Do this in reverse order and fall through.
284 */
285 fail_5:
286 for (i = 0; i < EPIC_NRXDESC; i++) {
287 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
288 bus_dmamap_destroy(sc->sc_dmat,
289 EPIC_DSRX(sc, i)->ds_dmamap);
290 }
291 fail_4:
292 for (i = 0; i < EPIC_NTXDESC; i++) {
293 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
294 bus_dmamap_destroy(sc->sc_dmat,
295 EPIC_DSTX(sc, i)->ds_dmamap);
296 }
297 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
298 fail_3:
299 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
300 fail_2:
301 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
302 sizeof(struct epic_control_data));
303 fail_1:
304 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
305 fail_0:
306 return;
307 }
308
309 /*
310 * Shutdown hook. Make sure the interface is stopped at reboot.
311 */
312 void
313 epic_shutdown(arg)
314 void *arg;
315 {
316 struct epic_softc *sc = arg;
317
318 epic_stop(&sc->sc_ethercom.ec_if, 1);
319 }
320
321 /*
322 * Start packet transmission on the interface.
323 * [ifnet interface function]
324 */
325 void
326 epic_start(ifp)
327 struct ifnet *ifp;
328 {
329 struct epic_softc *sc = ifp->if_softc;
330 struct mbuf *m0, *m;
331 struct epic_txdesc *txd;
332 struct epic_descsoft *ds;
333 struct epic_fraglist *fr;
334 bus_dmamap_t dmamap;
335 int error, firsttx, nexttx, opending, seg;
336
337 /*
338 * Remember the previous txpending and the first transmit
339 * descriptor we use.
340 */
341 opending = sc->sc_txpending;
342 firsttx = EPIC_NEXTTX(sc->sc_txlast);
343
344 /*
345 * Loop through the send queue, setting up transmit descriptors
346 * until we drain the queue, or use up all available transmit
347 * descriptors.
348 */
349 while (sc->sc_txpending < EPIC_NTXDESC) {
350 /*
351 * Grab a packet off the queue.
352 */
353 IF_DEQUEUE(&ifp->if_snd, m0);
354 if (m0 == NULL)
355 break;
356
357 /*
358 * Get the last and next available transmit descriptor.
359 */
360 nexttx = EPIC_NEXTTX(sc->sc_txlast);
361 txd = EPIC_CDTX(sc, nexttx);
362 fr = EPIC_CDFL(sc, nexttx);
363 ds = EPIC_DSTX(sc, nexttx);
364 dmamap = ds->ds_dmamap;
365
366 /*
367 * Load the DMA map. If this fails, the packet either
368 * didn't fit in the alloted number of frags, or we were
369 * short on resources. In this case, we'll copy and try
370 * again.
371 */
372 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
373 BUS_DMA_NOWAIT) != 0) {
374 MGETHDR(m, M_DONTWAIT, MT_DATA);
375 if (m == NULL) {
376 printf("%s: unable to allocate Tx mbuf\n",
377 sc->sc_dev.dv_xname);
378 IF_PREPEND(&ifp->if_snd, m0);
379 break;
380 }
381 if (m0->m_pkthdr.len > MHLEN) {
382 MCLGET(m, M_DONTWAIT);
383 if ((m->m_flags & M_EXT) == 0) {
384 printf("%s: unable to allocate Tx "
385 "cluster\n", sc->sc_dev.dv_xname);
386 m_freem(m);
387 IF_PREPEND(&ifp->if_snd, m0);
388 break;
389 }
390 }
391 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
392 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
393 m_freem(m0);
394 m0 = m;
395 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
396 m0, BUS_DMA_NOWAIT);
397 if (error) {
398 printf("%s: unable to load Tx buffer, "
399 "error = %d\n", sc->sc_dev.dv_xname, error);
400 IF_PREPEND(&ifp->if_snd, m0);
401 break;
402 }
403 }
404
405 /* Initialize the fraglist. */
406 fr->ef_nfrags = dmamap->dm_nsegs;
407 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
408 fr->ef_frags[seg].ef_addr =
409 dmamap->dm_segs[seg].ds_addr;
410 fr->ef_frags[seg].ef_length =
411 dmamap->dm_segs[seg].ds_len;
412 }
413
414 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
415
416 /* Sync the DMA map. */
417 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
418 BUS_DMASYNC_PREWRITE);
419
420 /*
421 * Store a pointer to the packet so we can free it later.
422 */
423 ds->ds_mbuf = m0;
424
425 /*
426 * Fill in the transmit descriptor. The EPIC doesn't
427 * auto-pad, so we have to do this ourselves.
428 */
429 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
430 txd->et_txlength = max(m0->m_pkthdr.len,
431 ETHER_MIN_LEN - ETHER_CRC_LEN);
432
433 /*
434 * If this is the first descriptor we're enqueueing,
435 * don't give it to the EPIC yet. That could cause
436 * a race condition. We'll do it below.
437 */
438 if (nexttx == firsttx)
439 txd->et_txstatus = 0;
440 else
441 txd->et_txstatus = ET_TXSTAT_OWNER;
442
443 EPIC_CDTXSYNC(sc, nexttx,
444 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
445
446 /* Advance the tx pointer. */
447 sc->sc_txpending++;
448 sc->sc_txlast = nexttx;
449
450 #if NBPFILTER > 0
451 /*
452 * Pass the packet to any BPF listeners.
453 */
454 if (ifp->if_bpf)
455 bpf_mtap(ifp->if_bpf, m0);
456 #endif
457 }
458
459 if (sc->sc_txpending == EPIC_NTXDESC) {
460 /* No more slots left; notify upper layer. */
461 ifp->if_flags |= IFF_OACTIVE;
462 }
463
464 if (sc->sc_txpending != opending) {
465 /*
466 * We enqueued packets. If the transmitter was idle,
467 * reset the txdirty pointer.
468 */
469 if (opending == 0)
470 sc->sc_txdirty = firsttx;
471
472 /*
473 * Cause a transmit interrupt to happen on the
474 * last packet we enqueued.
475 */
476 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
477 EPIC_CDTXSYNC(sc, sc->sc_txlast,
478 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
479
480 /*
481 * The entire packet chain is set up. Give the
482 * first descriptor to the EPIC now.
483 */
484 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
485 EPIC_CDTXSYNC(sc, firsttx,
486 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
487
488 /* Start the transmitter. */
489 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
490 COMMAND_TXQUEUED);
491
492 /* Set a watchdog timer in case the chip flakes out. */
493 ifp->if_timer = 5;
494 }
495 }
496
497 /*
498 * Watchdog timer handler.
499 * [ifnet interface function]
500 */
501 void
502 epic_watchdog(ifp)
503 struct ifnet *ifp;
504 {
505 struct epic_softc *sc = ifp->if_softc;
506
507 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
508 ifp->if_oerrors++;
509
510 (void) epic_init(ifp);
511 }
512
513 /*
514 * Handle control requests from the operator.
515 * [ifnet interface function]
516 */
517 int
518 epic_ioctl(ifp, cmd, data)
519 struct ifnet *ifp;
520 u_long cmd;
521 caddr_t data;
522 {
523 struct epic_softc *sc = ifp->if_softc;
524 struct ifreq *ifr = (struct ifreq *)data;
525 int s, error;
526
527 s = splnet();
528
529 switch (cmd) {
530 case SIOCSIFMEDIA:
531 case SIOCGIFMEDIA:
532 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
533 break;
534
535 default:
536 error = ether_ioctl(ifp, cmd, data);
537 if (error == ENETRESET) {
538 /*
539 * Multicast list has changed; set the hardware filter
540 * accordingly. Update our idea of the current media;
541 * epic_set_mchash() needs to know what it is.
542 */
543 mii_pollstat(&sc->sc_mii);
544 epic_set_mchash(sc);
545 error = 0;
546 }
547 break;
548 }
549
550 splx(s);
551 return (error);
552 }
553
554 /*
555 * Interrupt handler.
556 */
557 int
558 epic_intr(arg)
559 void *arg;
560 {
561 struct epic_softc *sc = arg;
562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
563 struct epic_rxdesc *rxd;
564 struct epic_txdesc *txd;
565 struct epic_descsoft *ds;
566 struct mbuf *m;
567 u_int32_t intstat;
568 int i, len, claimed = 0;
569
570 top:
571 /*
572 * Get the interrupt status from the EPIC.
573 */
574 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
575 if ((intstat & INTSTAT_INT_ACTV) == 0)
576 return (claimed);
577
578 claimed = 1;
579
580 /*
581 * Acknowledge the interrupt.
582 */
583 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
584 intstat & INTMASK);
585
586 /*
587 * Check for receive interrupts.
588 */
589 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
590 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
591 rxd = EPIC_CDRX(sc, i);
592 ds = EPIC_DSRX(sc, i);
593
594 EPIC_CDRXSYNC(sc, i,
595 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
596
597 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
598 /*
599 * We have processed all of the
600 * receive buffers.
601 */
602 break;
603 }
604
605 /*
606 * Make sure the packet arrived intact. If an error
607 * occurred, update stats and reset the descriptor.
608 * The buffer will be reused the next time the
609 * descriptor comes up in the ring.
610 */
611 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
612 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
613 printf("%s: CRC error\n",
614 sc->sc_dev.dv_xname);
615 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
616 printf("%s: alignment error\n",
617 sc->sc_dev.dv_xname);
618 ifp->if_ierrors++;
619 EPIC_INIT_RXDESC(sc, i);
620 continue;
621 }
622
623 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
624 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
625
626 /*
627 * The EPIC includes the CRC with every packet.
628 */
629 len = rxd->er_rxlength;
630
631 if (len < sizeof(struct ether_header)) {
632 /*
633 * Runt packet; drop it now.
634 */
635 ifp->if_ierrors++;
636 EPIC_INIT_RXDESC(sc, i);
637 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
638 ds->ds_dmamap->dm_mapsize,
639 BUS_DMASYNC_PREREAD);
640 continue;
641 }
642
643 /*
644 * If the packet is small enough to fit in a
645 * single header mbuf, allocate one and copy
646 * the data into it. This greatly reduces
647 * memory consumption when we receive lots
648 * of small packets.
649 *
650 * Otherwise, we add a new buffer to the receive
651 * chain. If this fails, we drop the packet and
652 * recycle the old buffer.
653 */
654 if (epic_copy_small != 0 && len <= MHLEN) {
655 MGETHDR(m, M_DONTWAIT, MT_DATA);
656 if (m == NULL)
657 goto dropit;
658 memcpy(mtod(m, caddr_t),
659 mtod(ds->ds_mbuf, caddr_t), len);
660 EPIC_INIT_RXDESC(sc, i);
661 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
662 ds->ds_dmamap->dm_mapsize,
663 BUS_DMASYNC_PREREAD);
664 } else {
665 m = ds->ds_mbuf;
666 if (epic_add_rxbuf(sc, i) != 0) {
667 dropit:
668 ifp->if_ierrors++;
669 EPIC_INIT_RXDESC(sc, i);
670 bus_dmamap_sync(sc->sc_dmat,
671 ds->ds_dmamap, 0,
672 ds->ds_dmamap->dm_mapsize,
673 BUS_DMASYNC_PREREAD);
674 continue;
675 }
676 }
677
678 m->m_flags |= M_HASFCS;
679 m->m_pkthdr.rcvif = ifp;
680 m->m_pkthdr.len = m->m_len = len;
681
682 #if NBPFILTER > 0
683 /*
684 * Pass this up to any BPF listeners, but only
685 * pass it up the stack if its for us.
686 */
687 if (ifp->if_bpf)
688 bpf_mtap(ifp->if_bpf, m);
689 #endif
690
691 /* Pass it on. */
692 (*ifp->if_input)(ifp, m);
693 ifp->if_ipackets++;
694 }
695
696 /* Update the recieve pointer. */
697 sc->sc_rxptr = i;
698
699 /*
700 * Check for receive queue underflow.
701 */
702 if (intstat & INTSTAT_RQE) {
703 printf("%s: receiver queue empty\n",
704 sc->sc_dev.dv_xname);
705 /*
706 * Ring is already built; just restart the
707 * receiver.
708 */
709 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
710 EPIC_CDRXADDR(sc, sc->sc_rxptr));
711 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
712 COMMAND_RXQUEUED | COMMAND_START_RX);
713 }
714 }
715
716 /*
717 * Check for transmission complete interrupts.
718 */
719 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
720 ifp->if_flags &= ~IFF_OACTIVE;
721 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
722 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
723 txd = EPIC_CDTX(sc, i);
724 ds = EPIC_DSTX(sc, i);
725
726 EPIC_CDTXSYNC(sc, i,
727 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
728
729 if (txd->et_txstatus & ET_TXSTAT_OWNER)
730 break;
731
732 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
733
734 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
735 0, ds->ds_dmamap->dm_mapsize,
736 BUS_DMASYNC_POSTWRITE);
737 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
738 m_freem(ds->ds_mbuf);
739 ds->ds_mbuf = NULL;
740
741 /*
742 * Check for errors and collisions.
743 */
744 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
745 ifp->if_oerrors++;
746 else
747 ifp->if_opackets++;
748 ifp->if_collisions +=
749 TXSTAT_COLLISIONS(txd->et_txstatus);
750 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
751 printf("%s: lost carrier\n",
752 sc->sc_dev.dv_xname);
753 }
754
755 /* Update the dirty transmit buffer pointer. */
756 sc->sc_txdirty = i;
757
758 /*
759 * Cancel the watchdog timer if there are no pending
760 * transmissions.
761 */
762 if (sc->sc_txpending == 0)
763 ifp->if_timer = 0;
764
765 /*
766 * Kick the transmitter after a DMA underrun.
767 */
768 if (intstat & INTSTAT_TXU) {
769 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
770 bus_space_write_4(sc->sc_st, sc->sc_sh,
771 EPIC_COMMAND, COMMAND_TXUGO);
772 if (sc->sc_txpending)
773 bus_space_write_4(sc->sc_st, sc->sc_sh,
774 EPIC_COMMAND, COMMAND_TXQUEUED);
775 }
776
777 /*
778 * Try to get more packets going.
779 */
780 epic_start(ifp);
781 }
782
783 /*
784 * Check for fatal interrupts.
785 */
786 if (intstat & INTSTAT_FATAL_INT) {
787 if (intstat & INTSTAT_PTA)
788 printf("%s: PCI target abort error\n",
789 sc->sc_dev.dv_xname);
790 else if (intstat & INTSTAT_PMA)
791 printf("%s: PCI master abort error\n",
792 sc->sc_dev.dv_xname);
793 else if (intstat & INTSTAT_APE)
794 printf("%s: PCI address parity error\n",
795 sc->sc_dev.dv_xname);
796 else if (intstat & INTSTAT_DPE)
797 printf("%s: PCI data parity error\n",
798 sc->sc_dev.dv_xname);
799 else
800 printf("%s: unknown fatal error\n",
801 sc->sc_dev.dv_xname);
802 (void) epic_init(ifp);
803 }
804
805 /*
806 * Check for more interrupts.
807 */
808 goto top;
809 }
810
811 /*
812 * One second timer, used to tick the MII.
813 */
814 void
815 epic_tick(arg)
816 void *arg;
817 {
818 struct epic_softc *sc = arg;
819 int s;
820
821 s = splnet();
822 mii_tick(&sc->sc_mii);
823 splx(s);
824
825 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
826 }
827
828 /*
829 * Fixup the clock source on the EPIC.
830 */
831 void
832 epic_fixup_clock_source(sc)
833 struct epic_softc *sc;
834 {
835 int i;
836
837 /*
838 * According to SMC Application Note 7-15, the EPIC's clock
839 * source is incorrect following a reset. This manifests itself
840 * as failure to recognize when host software has written to
841 * a register on the EPIC. The appnote recommends issuing at
842 * least 16 consecutive writes to the CLOCK TEST bit to correctly
843 * configure the clock source.
844 */
845 for (i = 0; i < 16; i++)
846 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
847 TEST_CLOCKTEST);
848 }
849
850 /*
851 * Perform a soft reset on the EPIC.
852 */
853 void
854 epic_reset(sc)
855 struct epic_softc *sc;
856 {
857
858 epic_fixup_clock_source(sc);
859
860 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
861 delay(100);
862 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
863 delay(100);
864
865 epic_fixup_clock_source(sc);
866 }
867
868 /*
869 * Initialize the interface. Must be called at splnet().
870 */
871 int
872 epic_init(ifp)
873 struct ifnet *ifp;
874 {
875 struct epic_softc *sc = ifp->if_softc;
876 bus_space_tag_t st = sc->sc_st;
877 bus_space_handle_t sh = sc->sc_sh;
878 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
879 struct epic_txdesc *txd;
880 struct epic_descsoft *ds;
881 u_int32_t genctl, reg0;
882 int i, error = 0;
883
884 /*
885 * Cancel any pending I/O.
886 */
887 epic_stop(ifp, 0);
888
889 /*
890 * Reset the EPIC to a known state.
891 */
892 epic_reset(sc);
893
894 /*
895 * Magical mystery initialization.
896 */
897 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
898
899 /*
900 * Initialize the EPIC genctl register:
901 *
902 * - 64 byte receive FIFO threshold
903 * - automatic advance to next receive frame
904 */
905 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
906 #if BYTE_ORDER == BIG_ENDIAN
907 genctl |= GENCTL_BIG_ENDIAN;
908 #endif
909 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
910
911 /*
912 * Reset the MII bus and PHY.
913 */
914 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
915 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
916 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
917 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
918 delay(100);
919 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
920 delay(100);
921 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
922
923 /*
924 * Initialize Ethernet address.
925 */
926 reg0 = enaddr[1] << 8 | enaddr[0];
927 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
928 reg0 = enaddr[3] << 8 | enaddr[2];
929 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
930 reg0 = enaddr[5] << 8 | enaddr[4];
931 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
932
933 /*
934 * Initialize receive control. Remember the external buffer
935 * size setting.
936 */
937 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
938 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
939 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
940 if (ifp->if_flags & IFF_PROMISC)
941 reg0 |= RXCON_PROMISCMODE;
942 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
943
944 /* Set the current media. */
945 mii_mediachg(&sc->sc_mii);
946
947 /* Set up the multicast hash table. */
948 epic_set_mchash(sc);
949
950 /*
951 * Initialize the transmit descriptor ring. txlast is initialized
952 * to the end of the list so that it will wrap around to the first
953 * descriptor when the first packet is transmitted.
954 */
955 for (i = 0; i < EPIC_NTXDESC; i++) {
956 txd = EPIC_CDTX(sc, i);
957 memset(txd, 0, sizeof(struct epic_txdesc));
958 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
959 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
960 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
961 }
962 sc->sc_txpending = 0;
963 sc->sc_txdirty = 0;
964 sc->sc_txlast = EPIC_NTXDESC - 1;
965
966 /*
967 * Initialize the receive descriptor ring.
968 */
969 for (i = 0; i < EPIC_NRXDESC; i++) {
970 ds = EPIC_DSRX(sc, i);
971 if (ds->ds_mbuf == NULL) {
972 if ((error = epic_add_rxbuf(sc, i)) != 0) {
973 printf("%s: unable to allocate or map rx "
974 "buffer %d error = %d\n",
975 sc->sc_dev.dv_xname, i, error);
976 /*
977 * XXX Should attempt to run with fewer receive
978 * XXX buffers instead of just failing.
979 */
980 epic_rxdrain(sc);
981 goto out;
982 }
983 }
984 }
985 sc->sc_rxptr = 0;
986
987 /*
988 * Initialize the interrupt mask and enable interrupts.
989 */
990 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
991 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
992
993 /*
994 * Give the transmit and receive rings to the EPIC.
995 */
996 bus_space_write_4(st, sh, EPIC_PTCDAR,
997 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
998 bus_space_write_4(st, sh, EPIC_PRCDAR,
999 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1000
1001 /*
1002 * Set the EPIC in motion.
1003 */
1004 bus_space_write_4(st, sh, EPIC_COMMAND,
1005 COMMAND_RXQUEUED | COMMAND_START_RX);
1006
1007 /*
1008 * ...all done!
1009 */
1010 ifp->if_flags |= IFF_RUNNING;
1011 ifp->if_flags &= ~IFF_OACTIVE;
1012
1013 /*
1014 * Start the one second clock.
1015 */
1016 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1017
1018 /*
1019 * Attempt to start output on the interface.
1020 */
1021 epic_start(ifp);
1022
1023 out:
1024 if (error)
1025 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1026 return (error);
1027 }
1028
1029 /*
1030 * Drain the receive queue.
1031 */
1032 void
1033 epic_rxdrain(sc)
1034 struct epic_softc *sc;
1035 {
1036 struct epic_descsoft *ds;
1037 int i;
1038
1039 for (i = 0; i < EPIC_NRXDESC; i++) {
1040 ds = EPIC_DSRX(sc, i);
1041 if (ds->ds_mbuf != NULL) {
1042 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1043 m_freem(ds->ds_mbuf);
1044 ds->ds_mbuf = NULL;
1045 }
1046 }
1047 }
1048
1049 /*
1050 * Stop transmission on the interface.
1051 */
1052 void
1053 epic_stop(ifp, disable)
1054 struct ifnet *ifp;
1055 int disable;
1056 {
1057 struct epic_softc *sc = ifp->if_softc;
1058 bus_space_tag_t st = sc->sc_st;
1059 bus_space_handle_t sh = sc->sc_sh;
1060 struct epic_descsoft *ds;
1061 u_int32_t reg;
1062 int i;
1063
1064 /*
1065 * Stop the one second clock.
1066 */
1067 callout_stop(&sc->sc_mii_callout);
1068
1069 /* Down the MII. */
1070 mii_down(&sc->sc_mii);
1071
1072 /* Paranoia... */
1073 epic_fixup_clock_source(sc);
1074
1075 /*
1076 * Disable interrupts.
1077 */
1078 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1079 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1080 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1081
1082 /*
1083 * Stop the DMA engine and take the receiver off-line.
1084 */
1085 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1086 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1087
1088 /*
1089 * Release any queued transmit buffers.
1090 */
1091 for (i = 0; i < EPIC_NTXDESC; i++) {
1092 ds = EPIC_DSTX(sc, i);
1093 if (ds->ds_mbuf != NULL) {
1094 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1095 m_freem(ds->ds_mbuf);
1096 ds->ds_mbuf = NULL;
1097 }
1098 }
1099
1100 if (disable)
1101 epic_rxdrain(sc);
1102
1103 /*
1104 * Mark the interface down and cancel the watchdog timer.
1105 */
1106 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1107 ifp->if_timer = 0;
1108 }
1109
1110 /*
1111 * Read the EPIC Serial EEPROM.
1112 */
1113 void
1114 epic_read_eeprom(sc, word, wordcnt, data)
1115 struct epic_softc *sc;
1116 int word, wordcnt;
1117 u_int16_t *data;
1118 {
1119 bus_space_tag_t st = sc->sc_st;
1120 bus_space_handle_t sh = sc->sc_sh;
1121 u_int16_t reg;
1122 int i, x;
1123
1124 #define EEPROM_WAIT_READY(st, sh) \
1125 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1126 /* nothing */
1127
1128 /*
1129 * Enable the EEPROM.
1130 */
1131 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1132 EEPROM_WAIT_READY(st, sh);
1133
1134 for (i = 0; i < wordcnt; i++) {
1135 /* Send CHIP SELECT for one clock tick. */
1136 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1137 EEPROM_WAIT_READY(st, sh);
1138
1139 /* Shift in the READ opcode. */
1140 for (x = 3; x > 0; x--) {
1141 reg = EECTL_ENABLE|EECTL_EECS;
1142 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1143 reg |= EECTL_EEDI;
1144 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1145 EEPROM_WAIT_READY(st, sh);
1146 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1147 EEPROM_WAIT_READY(st, sh);
1148 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1149 EEPROM_WAIT_READY(st, sh);
1150 }
1151
1152 /* Shift in address. */
1153 for (x = 6; x > 0; x--) {
1154 reg = EECTL_ENABLE|EECTL_EECS;
1155 if ((word + i) & (1 << (x - 1)))
1156 reg |= EECTL_EEDI;
1157 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1158 EEPROM_WAIT_READY(st, sh);
1159 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1160 EEPROM_WAIT_READY(st, sh);
1161 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1162 EEPROM_WAIT_READY(st, sh);
1163 }
1164
1165 /* Shift out data. */
1166 reg = EECTL_ENABLE|EECTL_EECS;
1167 data[i] = 0;
1168 for (x = 16; x > 0; x--) {
1169 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1170 EEPROM_WAIT_READY(st, sh);
1171 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1172 data[i] |= (1 << (x - 1));
1173 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1174 EEPROM_WAIT_READY(st, sh);
1175 }
1176
1177 /* Clear CHIP SELECT. */
1178 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1179 EEPROM_WAIT_READY(st, sh);
1180 }
1181
1182 /*
1183 * Disable the EEPROM.
1184 */
1185 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1186
1187 #undef EEPROM_WAIT_READY
1188 }
1189
1190 /*
1191 * Add a receive buffer to the indicated descriptor.
1192 */
1193 int
1194 epic_add_rxbuf(sc, idx)
1195 struct epic_softc *sc;
1196 int idx;
1197 {
1198 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1199 struct mbuf *m;
1200 int error;
1201
1202 MGETHDR(m, M_DONTWAIT, MT_DATA);
1203 if (m == NULL)
1204 return (ENOBUFS);
1205
1206 MCLGET(m, M_DONTWAIT);
1207 if ((m->m_flags & M_EXT) == 0) {
1208 m_freem(m);
1209 return (ENOBUFS);
1210 }
1211
1212 if (ds->ds_mbuf != NULL)
1213 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1214
1215 ds->ds_mbuf = m;
1216
1217 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1218 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1219 if (error) {
1220 printf("%s: can't load rx DMA map %d, error = %d\n",
1221 sc->sc_dev.dv_xname, idx, error);
1222 panic("epic_add_rxbuf"); /* XXX */
1223 }
1224
1225 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1226 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1227
1228 EPIC_INIT_RXDESC(sc, idx);
1229
1230 return (0);
1231 }
1232
1233 /*
1234 * Set the EPIC multicast hash table.
1235 *
1236 * NOTE: We rely on a recently-updated mii_media_active here!
1237 */
1238 void
1239 epic_set_mchash(sc)
1240 struct epic_softc *sc;
1241 {
1242 struct ethercom *ec = &sc->sc_ethercom;
1243 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1244 struct ether_multi *enm;
1245 struct ether_multistep step;
1246 u_int32_t hash, mchash[4];
1247
1248 /*
1249 * Set up the multicast address filter by passing all multicast
1250 * addresses through a CRC generator, and then using the low-order
1251 * 6 bits as an index into the 64 bit multicast hash table (only
1252 * the lower 16 bits of each 32 bit multicast hash register are
1253 * valid). The high order bits select the register, while the
1254 * rest of the bits select the bit within the register.
1255 */
1256
1257 if (ifp->if_flags & IFF_PROMISC)
1258 goto allmulti;
1259
1260 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1261 /* XXX hardware bug in 10Mbps mode. */
1262 goto allmulti;
1263 }
1264
1265 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1266
1267 ETHER_FIRST_MULTI(step, ec, enm);
1268 while (enm != NULL) {
1269 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1270 /*
1271 * We must listen to a range of multicast addresses.
1272 * For now, just accept all multicasts, rather than
1273 * trying to set only those filter bits needed to match
1274 * the range. (At this time, the only use of address
1275 * ranges is for IP multicast routing, for which the
1276 * range is big enough to require all bits set.)
1277 */
1278 goto allmulti;
1279 }
1280
1281 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1282 hash >>= 26;
1283
1284 /* Set the corresponding bit in the hash table. */
1285 mchash[hash >> 4] |= 1 << (hash & 0xf);
1286
1287 ETHER_NEXT_MULTI(step, enm);
1288 }
1289
1290 ifp->if_flags &= ~IFF_ALLMULTI;
1291 goto sethash;
1292
1293 allmulti:
1294 ifp->if_flags |= IFF_ALLMULTI;
1295 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1296
1297 sethash:
1298 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1299 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1300 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1301 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1302 }
1303
1304 /*
1305 * Wait for the MII to become ready.
1306 */
1307 int
1308 epic_mii_wait(sc, rw)
1309 struct epic_softc *sc;
1310 u_int32_t rw;
1311 {
1312 int i;
1313
1314 for (i = 0; i < 50; i++) {
1315 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1316 == 0)
1317 break;
1318 delay(2);
1319 }
1320 if (i == 50) {
1321 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1322 return (1);
1323 }
1324
1325 return (0);
1326 }
1327
1328 /*
1329 * Read from the MII.
1330 */
1331 int
1332 epic_mii_read(self, phy, reg)
1333 struct device *self;
1334 int phy, reg;
1335 {
1336 struct epic_softc *sc = (struct epic_softc *)self;
1337
1338 if (epic_mii_wait(sc, MMCTL_WRITE))
1339 return (0);
1340
1341 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1342 MMCTL_ARG(phy, reg, MMCTL_READ));
1343
1344 if (epic_mii_wait(sc, MMCTL_READ))
1345 return (0);
1346
1347 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1348 MMDATA_MASK);
1349 }
1350
1351 /*
1352 * Write to the MII.
1353 */
1354 void
1355 epic_mii_write(self, phy, reg, val)
1356 struct device *self;
1357 int phy, reg, val;
1358 {
1359 struct epic_softc *sc = (struct epic_softc *)self;
1360
1361 if (epic_mii_wait(sc, MMCTL_WRITE))
1362 return;
1363
1364 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1365 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1366 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1367 }
1368
1369 /*
1370 * Callback from PHY when media changes.
1371 */
1372 void
1373 epic_statchg(self)
1374 struct device *self;
1375 {
1376 struct epic_softc *sc = (struct epic_softc *)self;
1377 u_int32_t txcon;
1378
1379 /*
1380 * Update loopback bits in TXCON to reflect duplex mode.
1381 */
1382 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1383 if (sc->sc_mii.mii_media_active & IFM_FDX)
1384 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1385 else
1386 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1387 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1388
1389 /*
1390 * There is a multicast filter bug in 10Mbps mode. Kick the
1391 * multicast filter in case the speed changed.
1392 */
1393 epic_set_mchash(sc);
1394 }
1395
1396 /*
1397 * Callback from ifmedia to request current media status.
1398 */
1399 void
1400 epic_mediastatus(ifp, ifmr)
1401 struct ifnet *ifp;
1402 struct ifmediareq *ifmr;
1403 {
1404 struct epic_softc *sc = ifp->if_softc;
1405
1406 mii_pollstat(&sc->sc_mii);
1407 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1408 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1409 }
1410
1411 /*
1412 * Callback from ifmedia to request new media setting.
1413 */
1414 int
1415 epic_mediachange(ifp)
1416 struct ifnet *ifp;
1417 {
1418 struct epic_softc *sc = ifp->if_softc;
1419
1420 if (ifp->if_flags & IFF_UP)
1421 mii_mediachg(&sc->sc_mii);
1422 return (0);
1423 }
1424