smc83c170.c revision 1.35 1 /* $NetBSD: smc83c170.c,v 1.35 2000/10/15 20:00:50 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #endif
73
74 #ifdef NS
75 #include <netns/ns.h>
76 #include <netns/ns_if.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/mii/miivar.h>
83
84 #include <dev/ic/smc83c170reg.h>
85 #include <dev/ic/smc83c170var.h>
86
87 void epic_start __P((struct ifnet *));
88 void epic_watchdog __P((struct ifnet *));
89 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
90 int epic_init __P((struct ifnet *));
91 void epic_stop __P((struct ifnet *, int));
92
93 void epic_shutdown __P((void *));
94
95 void epic_reset __P((struct epic_softc *));
96 void epic_rxdrain __P((struct epic_softc *));
97 int epic_add_rxbuf __P((struct epic_softc *, int));
98 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
99 void epic_set_mchash __P((struct epic_softc *));
100 void epic_fixup_clock_source __P((struct epic_softc *));
101 int epic_mii_read __P((struct device *, int, int));
102 void epic_mii_write __P((struct device *, int, int, int));
103 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
104 void epic_tick __P((void *));
105
106 void epic_statchg __P((struct device *));
107 int epic_mediachange __P((struct ifnet *));
108 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
109
110 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
111 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
112
113 int epic_copy_small = 0;
114
115 /*
116 * Attach an EPIC interface to the system.
117 */
118 void
119 epic_attach(sc)
120 struct epic_softc *sc;
121 {
122 bus_space_tag_t st = sc->sc_st;
123 bus_space_handle_t sh = sc->sc_sh;
124 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
125 int i, rseg, error;
126 bus_dma_segment_t seg;
127 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
128 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
129
130 callout_init(&sc->sc_mii_callout);
131
132 /*
133 * Allocate the control data structures, and create and load the
134 * DMA map for it.
135 */
136 if ((error = bus_dmamem_alloc(sc->sc_dmat,
137 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
138 BUS_DMA_NOWAIT)) != 0) {
139 printf("%s: unable to allocate control data, error = %d\n",
140 sc->sc_dev.dv_xname, error);
141 goto fail_0;
142 }
143
144 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
145 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
147 printf("%s: unable to map control data, error = %d\n",
148 sc->sc_dev.dv_xname, error);
149 goto fail_1;
150 }
151
152 if ((error = bus_dmamap_create(sc->sc_dmat,
153 sizeof(struct epic_control_data), 1,
154 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
155 &sc->sc_cddmamap)) != 0) {
156 printf("%s: unable to create control data DMA map, "
157 "error = %d\n", sc->sc_dev.dv_xname, error);
158 goto fail_2;
159 }
160
161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
162 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
163 BUS_DMA_NOWAIT)) != 0) {
164 printf("%s: unable to load control data DMA map, error = %d\n",
165 sc->sc_dev.dv_xname, error);
166 goto fail_3;
167 }
168
169 /*
170 * Create the transmit buffer DMA maps.
171 */
172 for (i = 0; i < EPIC_NTXDESC; i++) {
173 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
174 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
175 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
176 printf("%s: unable to create tx DMA map %d, "
177 "error = %d\n", sc->sc_dev.dv_xname, i, error);
178 goto fail_4;
179 }
180 }
181
182 /*
183 * Create the recieve buffer DMA maps.
184 */
185 for (i = 0; i < EPIC_NRXDESC; i++) {
186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
187 MCLBYTES, 0, BUS_DMA_NOWAIT,
188 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
189 printf("%s: unable to create rx DMA map %d, "
190 "error = %d\n", sc->sc_dev.dv_xname, i, error);
191 goto fail_5;
192 }
193 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
194 }
195
196
197 /*
198 * Bring the chip out of low-power mode and reset it to a known state.
199 */
200 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
201 epic_reset(sc);
202
203 /*
204 * Read the Ethernet address from the EEPROM.
205 */
206 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
207 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
208 enaddr[i * 2] = myea[i] & 0xff;
209 enaddr[i * 2 + 1] = myea[i] >> 8;
210 }
211
212 /*
213 * ...and the device name.
214 */
215 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
216 mydevname);
217 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
218 devname[i * 2] = mydevname[i] & 0xff;
219 devname[i * 2 + 1] = mydevname[i] >> 8;
220 }
221
222 devname[sizeof(mydevname)] = '\0';
223 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
224 if (devname[i] == ' ')
225 devname[i] = '\0';
226 else
227 break;
228 }
229
230 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
231 devname, ether_sprintf(enaddr));
232
233 /*
234 * Initialize our media structures and probe the MII.
235 */
236 sc->sc_mii.mii_ifp = ifp;
237 sc->sc_mii.mii_readreg = epic_mii_read;
238 sc->sc_mii.mii_writereg = epic_mii_write;
239 sc->sc_mii.mii_statchg = epic_statchg;
240 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
241 epic_mediastatus);
242 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
243 MII_OFFSET_ANY, 0);
244 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
245 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
246 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
247 } else
248 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
249
250 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
251 ifp->if_softc = sc;
252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
253 ifp->if_ioctl = epic_ioctl;
254 ifp->if_start = epic_start;
255 ifp->if_watchdog = epic_watchdog;
256 ifp->if_init = epic_init;
257 ifp->if_stop = epic_stop;
258
259 /*
260 * Attach the interface.
261 */
262 if_attach(ifp);
263 ether_ifattach(ifp, enaddr);
264 #if NBPFILTER > 0
265 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
266 sizeof(struct ether_header));
267 #endif
268
269 /*
270 * Make sure the interface is shutdown during reboot.
271 */
272 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
273 if (sc->sc_sdhook == NULL)
274 printf("%s: WARNING: unable to establish shutdown hook\n",
275 sc->sc_dev.dv_xname);
276 return;
277
278 /*
279 * Free any resources we've allocated during the failed attach
280 * attempt. Do this in reverse order and fall through.
281 */
282 fail_5:
283 for (i = 0; i < EPIC_NRXDESC; i++) {
284 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
285 bus_dmamap_destroy(sc->sc_dmat,
286 EPIC_DSRX(sc, i)->ds_dmamap);
287 }
288 fail_4:
289 for (i = 0; i < EPIC_NTXDESC; i++) {
290 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
291 bus_dmamap_destroy(sc->sc_dmat,
292 EPIC_DSTX(sc, i)->ds_dmamap);
293 }
294 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
295 fail_3:
296 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
297 fail_2:
298 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
299 sizeof(struct epic_control_data));
300 fail_1:
301 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
302 fail_0:
303 return;
304 }
305
306 /*
307 * Shutdown hook. Make sure the interface is stopped at reboot.
308 */
309 void
310 epic_shutdown(arg)
311 void *arg;
312 {
313 struct epic_softc *sc = arg;
314
315 epic_stop(&sc->sc_ethercom.ec_if, 1);
316 }
317
318 /*
319 * Start packet transmission on the interface.
320 * [ifnet interface function]
321 */
322 void
323 epic_start(ifp)
324 struct ifnet *ifp;
325 {
326 struct epic_softc *sc = ifp->if_softc;
327 struct mbuf *m0, *m;
328 struct epic_txdesc *txd;
329 struct epic_descsoft *ds;
330 struct epic_fraglist *fr;
331 bus_dmamap_t dmamap;
332 int error, firsttx, nexttx, opending, seg;
333
334 /*
335 * Remember the previous txpending and the first transmit
336 * descriptor we use.
337 */
338 opending = sc->sc_txpending;
339 firsttx = EPIC_NEXTTX(sc->sc_txlast);
340
341 /*
342 * Loop through the send queue, setting up transmit descriptors
343 * until we drain the queue, or use up all available transmit
344 * descriptors.
345 */
346 while (sc->sc_txpending < EPIC_NTXDESC) {
347 /*
348 * Grab a packet off the queue.
349 */
350 IF_DEQUEUE(&ifp->if_snd, m0);
351 if (m0 == NULL)
352 break;
353
354 /*
355 * Get the last and next available transmit descriptor.
356 */
357 nexttx = EPIC_NEXTTX(sc->sc_txlast);
358 txd = EPIC_CDTX(sc, nexttx);
359 fr = EPIC_CDFL(sc, nexttx);
360 ds = EPIC_DSTX(sc, nexttx);
361 dmamap = ds->ds_dmamap;
362
363 /*
364 * Load the DMA map. If this fails, the packet either
365 * didn't fit in the alloted number of frags, or we were
366 * short on resources. In this case, we'll copy and try
367 * again.
368 */
369 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
370 BUS_DMA_NOWAIT) != 0) {
371 MGETHDR(m, M_DONTWAIT, MT_DATA);
372 if (m == NULL) {
373 printf("%s: unable to allocate Tx mbuf\n",
374 sc->sc_dev.dv_xname);
375 IF_PREPEND(&ifp->if_snd, m0);
376 break;
377 }
378 if (m0->m_pkthdr.len > MHLEN) {
379 MCLGET(m, M_DONTWAIT);
380 if ((m->m_flags & M_EXT) == 0) {
381 printf("%s: unable to allocate Tx "
382 "cluster\n", sc->sc_dev.dv_xname);
383 m_freem(m);
384 IF_PREPEND(&ifp->if_snd, m0);
385 break;
386 }
387 }
388 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
389 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
390 m_freem(m0);
391 m0 = m;
392 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
393 m0, BUS_DMA_NOWAIT);
394 if (error) {
395 printf("%s: unable to load Tx buffer, "
396 "error = %d\n", sc->sc_dev.dv_xname, error);
397 IF_PREPEND(&ifp->if_snd, m0);
398 break;
399 }
400 }
401
402 /* Initialize the fraglist. */
403 fr->ef_nfrags = dmamap->dm_nsegs;
404 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
405 fr->ef_frags[seg].ef_addr =
406 dmamap->dm_segs[seg].ds_addr;
407 fr->ef_frags[seg].ef_length =
408 dmamap->dm_segs[seg].ds_len;
409 }
410
411 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
412
413 /* Sync the DMA map. */
414 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
415 BUS_DMASYNC_PREWRITE);
416
417 /*
418 * Store a pointer to the packet so we can free it later.
419 */
420 ds->ds_mbuf = m0;
421
422 /*
423 * Fill in the transmit descriptor. The EPIC doesn't
424 * auto-pad, so we have to do this ourselves.
425 */
426 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
427 txd->et_txlength = max(m0->m_pkthdr.len,
428 ETHER_MIN_LEN - ETHER_CRC_LEN);
429
430 /*
431 * If this is the first descriptor we're enqueueing,
432 * don't give it to the EPIC yet. That could cause
433 * a race condition. We'll do it below.
434 */
435 if (nexttx == firsttx)
436 txd->et_txstatus = 0;
437 else
438 txd->et_txstatus = ET_TXSTAT_OWNER;
439
440 EPIC_CDTXSYNC(sc, nexttx,
441 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
442
443 /* Advance the tx pointer. */
444 sc->sc_txpending++;
445 sc->sc_txlast = nexttx;
446
447 #if NBPFILTER > 0
448 /*
449 * Pass the packet to any BPF listeners.
450 */
451 if (ifp->if_bpf)
452 bpf_mtap(ifp->if_bpf, m0);
453 #endif
454 }
455
456 if (sc->sc_txpending == EPIC_NTXDESC) {
457 /* No more slots left; notify upper layer. */
458 ifp->if_flags |= IFF_OACTIVE;
459 }
460
461 if (sc->sc_txpending != opending) {
462 /*
463 * We enqueued packets. If the transmitter was idle,
464 * reset the txdirty pointer.
465 */
466 if (opending == 0)
467 sc->sc_txdirty = firsttx;
468
469 /*
470 * Cause a transmit interrupt to happen on the
471 * last packet we enqueued.
472 */
473 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
474 EPIC_CDTXSYNC(sc, sc->sc_txlast,
475 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
476
477 /*
478 * The entire packet chain is set up. Give the
479 * first descriptor to the EPIC now.
480 */
481 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
482 EPIC_CDTXSYNC(sc, firsttx,
483 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
484
485 /* Start the transmitter. */
486 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
487 COMMAND_TXQUEUED);
488
489 /* Set a watchdog timer in case the chip flakes out. */
490 ifp->if_timer = 5;
491 }
492 }
493
494 /*
495 * Watchdog timer handler.
496 * [ifnet interface function]
497 */
498 void
499 epic_watchdog(ifp)
500 struct ifnet *ifp;
501 {
502 struct epic_softc *sc = ifp->if_softc;
503
504 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
505 ifp->if_oerrors++;
506
507 (void) epic_init(ifp);
508 }
509
510 /*
511 * Handle control requests from the operator.
512 * [ifnet interface function]
513 */
514 int
515 epic_ioctl(ifp, cmd, data)
516 struct ifnet *ifp;
517 u_long cmd;
518 caddr_t data;
519 {
520 struct epic_softc *sc = ifp->if_softc;
521 struct ifreq *ifr = (struct ifreq *)data;
522 int s, error;
523
524 s = splnet();
525
526 switch (cmd) {
527 case SIOCSIFMEDIA:
528 case SIOCGIFMEDIA:
529 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
530 break;
531
532 default:
533 error = ether_ioctl(ifp, cmd, data);
534 if (error == ENETRESET) {
535 /*
536 * Multicast list has changed; set the hardware filter
537 * accordingly. Update our idea of the current media;
538 * epic_set_mchash() needs to know what it is.
539 */
540 mii_pollstat(&sc->sc_mii);
541 epic_set_mchash(sc);
542 error = 0;
543 }
544 break;
545 }
546
547 splx(s);
548 return (error);
549 }
550
551 /*
552 * Interrupt handler.
553 */
554 int
555 epic_intr(arg)
556 void *arg;
557 {
558 struct epic_softc *sc = arg;
559 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
560 struct epic_rxdesc *rxd;
561 struct epic_txdesc *txd;
562 struct epic_descsoft *ds;
563 struct mbuf *m;
564 u_int32_t intstat;
565 int i, len, claimed = 0;
566
567 top:
568 /*
569 * Get the interrupt status from the EPIC.
570 */
571 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
572 if ((intstat & INTSTAT_INT_ACTV) == 0)
573 return (claimed);
574
575 claimed = 1;
576
577 /*
578 * Acknowledge the interrupt.
579 */
580 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
581 intstat & INTMASK);
582
583 /*
584 * Check for receive interrupts.
585 */
586 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
587 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
588 rxd = EPIC_CDRX(sc, i);
589 ds = EPIC_DSRX(sc, i);
590
591 EPIC_CDRXSYNC(sc, i,
592 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
593
594 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
595 /*
596 * We have processed all of the
597 * receive buffers.
598 */
599 break;
600 }
601
602 /*
603 * Make sure the packet arrived intact. If an error
604 * occurred, update stats and reset the descriptor.
605 * The buffer will be reused the next time the
606 * descriptor comes up in the ring.
607 */
608 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
609 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
610 printf("%s: CRC error\n",
611 sc->sc_dev.dv_xname);
612 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
613 printf("%s: alignment error\n",
614 sc->sc_dev.dv_xname);
615 ifp->if_ierrors++;
616 EPIC_INIT_RXDESC(sc, i);
617 continue;
618 }
619
620 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
621 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
622
623 /*
624 * The EPIC includes the CRC with every packet.
625 */
626 len = rxd->er_rxlength;
627
628 if (len < sizeof(struct ether_header)) {
629 /*
630 * Runt packet; drop it now.
631 */
632 ifp->if_ierrors++;
633 EPIC_INIT_RXDESC(sc, i);
634 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
635 ds->ds_dmamap->dm_mapsize,
636 BUS_DMASYNC_PREREAD);
637 continue;
638 }
639
640 /*
641 * If the packet is small enough to fit in a
642 * single header mbuf, allocate one and copy
643 * the data into it. This greatly reduces
644 * memory consumption when we receive lots
645 * of small packets.
646 *
647 * Otherwise, we add a new buffer to the receive
648 * chain. If this fails, we drop the packet and
649 * recycle the old buffer.
650 */
651 if (epic_copy_small != 0 && len <= MHLEN) {
652 MGETHDR(m, M_DONTWAIT, MT_DATA);
653 if (m == NULL)
654 goto dropit;
655 memcpy(mtod(m, caddr_t),
656 mtod(ds->ds_mbuf, caddr_t), len);
657 EPIC_INIT_RXDESC(sc, i);
658 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
659 ds->ds_dmamap->dm_mapsize,
660 BUS_DMASYNC_PREREAD);
661 } else {
662 m = ds->ds_mbuf;
663 if (epic_add_rxbuf(sc, i) != 0) {
664 dropit:
665 ifp->if_ierrors++;
666 EPIC_INIT_RXDESC(sc, i);
667 bus_dmamap_sync(sc->sc_dmat,
668 ds->ds_dmamap, 0,
669 ds->ds_dmamap->dm_mapsize,
670 BUS_DMASYNC_PREREAD);
671 continue;
672 }
673 }
674
675 m->m_flags |= M_HASFCS;
676 m->m_pkthdr.rcvif = ifp;
677 m->m_pkthdr.len = m->m_len = len;
678
679 #if NBPFILTER > 0
680 /*
681 * Pass this up to any BPF listeners, but only
682 * pass it up the stack if its for us.
683 */
684 if (ifp->if_bpf)
685 bpf_mtap(ifp->if_bpf, m);
686 #endif
687
688 /* Pass it on. */
689 (*ifp->if_input)(ifp, m);
690 ifp->if_ipackets++;
691 }
692
693 /* Update the recieve pointer. */
694 sc->sc_rxptr = i;
695
696 /*
697 * Check for receive queue underflow.
698 */
699 if (intstat & INTSTAT_RQE) {
700 printf("%s: receiver queue empty\n",
701 sc->sc_dev.dv_xname);
702 /*
703 * Ring is already built; just restart the
704 * receiver.
705 */
706 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
707 EPIC_CDRXADDR(sc, sc->sc_rxptr));
708 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
709 COMMAND_RXQUEUED | COMMAND_START_RX);
710 }
711 }
712
713 /*
714 * Check for transmission complete interrupts.
715 */
716 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
717 ifp->if_flags &= ~IFF_OACTIVE;
718 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
719 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
720 txd = EPIC_CDTX(sc, i);
721 ds = EPIC_DSTX(sc, i);
722
723 EPIC_CDTXSYNC(sc, i,
724 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
725
726 if (txd->et_txstatus & ET_TXSTAT_OWNER)
727 break;
728
729 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
730
731 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
732 0, ds->ds_dmamap->dm_mapsize,
733 BUS_DMASYNC_POSTWRITE);
734 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
735 m_freem(ds->ds_mbuf);
736 ds->ds_mbuf = NULL;
737
738 /*
739 * Check for errors and collisions.
740 */
741 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
742 ifp->if_oerrors++;
743 else
744 ifp->if_opackets++;
745 ifp->if_collisions +=
746 TXSTAT_COLLISIONS(txd->et_txstatus);
747 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
748 printf("%s: lost carrier\n",
749 sc->sc_dev.dv_xname);
750 }
751
752 /* Update the dirty transmit buffer pointer. */
753 sc->sc_txdirty = i;
754
755 /*
756 * Cancel the watchdog timer if there are no pending
757 * transmissions.
758 */
759 if (sc->sc_txpending == 0)
760 ifp->if_timer = 0;
761
762 /*
763 * Kick the transmitter after a DMA underrun.
764 */
765 if (intstat & INTSTAT_TXU) {
766 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
767 bus_space_write_4(sc->sc_st, sc->sc_sh,
768 EPIC_COMMAND, COMMAND_TXUGO);
769 if (sc->sc_txpending)
770 bus_space_write_4(sc->sc_st, sc->sc_sh,
771 EPIC_COMMAND, COMMAND_TXQUEUED);
772 }
773
774 /*
775 * Try to get more packets going.
776 */
777 epic_start(ifp);
778 }
779
780 /*
781 * Check for fatal interrupts.
782 */
783 if (intstat & INTSTAT_FATAL_INT) {
784 if (intstat & INTSTAT_PTA)
785 printf("%s: PCI target abort error\n",
786 sc->sc_dev.dv_xname);
787 else if (intstat & INTSTAT_PMA)
788 printf("%s: PCI master abort error\n",
789 sc->sc_dev.dv_xname);
790 else if (intstat & INTSTAT_APE)
791 printf("%s: PCI address parity error\n",
792 sc->sc_dev.dv_xname);
793 else if (intstat & INTSTAT_DPE)
794 printf("%s: PCI data parity error\n",
795 sc->sc_dev.dv_xname);
796 else
797 printf("%s: unknown fatal error\n",
798 sc->sc_dev.dv_xname);
799 (void) epic_init(ifp);
800 }
801
802 /*
803 * Check for more interrupts.
804 */
805 goto top;
806 }
807
808 /*
809 * One second timer, used to tick the MII.
810 */
811 void
812 epic_tick(arg)
813 void *arg;
814 {
815 struct epic_softc *sc = arg;
816 int s;
817
818 s = splnet();
819 mii_tick(&sc->sc_mii);
820 splx(s);
821
822 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
823 }
824
825 /*
826 * Fixup the clock source on the EPIC.
827 */
828 void
829 epic_fixup_clock_source(sc)
830 struct epic_softc *sc;
831 {
832 int i;
833
834 /*
835 * According to SMC Application Note 7-15, the EPIC's clock
836 * source is incorrect following a reset. This manifests itself
837 * as failure to recognize when host software has written to
838 * a register on the EPIC. The appnote recommends issuing at
839 * least 16 consecutive writes to the CLOCK TEST bit to correctly
840 * configure the clock source.
841 */
842 for (i = 0; i < 16; i++)
843 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
844 TEST_CLOCKTEST);
845 }
846
847 /*
848 * Perform a soft reset on the EPIC.
849 */
850 void
851 epic_reset(sc)
852 struct epic_softc *sc;
853 {
854
855 epic_fixup_clock_source(sc);
856
857 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
858 delay(100);
859 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
860 delay(100);
861
862 epic_fixup_clock_source(sc);
863 }
864
865 /*
866 * Initialize the interface. Must be called at splnet().
867 */
868 int
869 epic_init(ifp)
870 struct ifnet *ifp;
871 {
872 struct epic_softc *sc = ifp->if_softc;
873 bus_space_tag_t st = sc->sc_st;
874 bus_space_handle_t sh = sc->sc_sh;
875 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
876 struct epic_txdesc *txd;
877 struct epic_descsoft *ds;
878 u_int32_t genctl, reg0;
879 int i, error = 0;
880
881 /*
882 * Cancel any pending I/O.
883 */
884 epic_stop(ifp, 0);
885
886 /*
887 * Reset the EPIC to a known state.
888 */
889 epic_reset(sc);
890
891 /*
892 * Magical mystery initialization.
893 */
894 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
895
896 /*
897 * Initialize the EPIC genctl register:
898 *
899 * - 64 byte receive FIFO threshold
900 * - automatic advance to next receive frame
901 */
902 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
903 #if BYTE_ORDER == BIG_ENDIAN
904 genctl |= GENCTL_BIG_ENDIAN;
905 #endif
906 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
907
908 /*
909 * Reset the MII bus and PHY.
910 */
911 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
912 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
913 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
914 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
915 delay(100);
916 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
917 delay(100);
918 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
919
920 /*
921 * Initialize Ethernet address.
922 */
923 reg0 = enaddr[1] << 8 | enaddr[0];
924 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
925 reg0 = enaddr[3] << 8 | enaddr[2];
926 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
927 reg0 = enaddr[5] << 8 | enaddr[4];
928 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
929
930 /*
931 * Initialize receive control. Remember the external buffer
932 * size setting.
933 */
934 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
935 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
936 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
937 if (ifp->if_flags & IFF_PROMISC)
938 reg0 |= RXCON_PROMISCMODE;
939 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
940
941 /* Set the current media. */
942 mii_mediachg(&sc->sc_mii);
943
944 /* Set up the multicast hash table. */
945 epic_set_mchash(sc);
946
947 /*
948 * Initialize the transmit descriptor ring. txlast is initialized
949 * to the end of the list so that it will wrap around to the first
950 * descriptor when the first packet is transmitted.
951 */
952 for (i = 0; i < EPIC_NTXDESC; i++) {
953 txd = EPIC_CDTX(sc, i);
954 memset(txd, 0, sizeof(struct epic_txdesc));
955 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
956 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
957 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
958 }
959 sc->sc_txpending = 0;
960 sc->sc_txdirty = 0;
961 sc->sc_txlast = EPIC_NTXDESC - 1;
962
963 /*
964 * Initialize the receive descriptor ring.
965 */
966 for (i = 0; i < EPIC_NRXDESC; i++) {
967 ds = EPIC_DSRX(sc, i);
968 if (ds->ds_mbuf == NULL) {
969 if ((error = epic_add_rxbuf(sc, i)) != 0) {
970 printf("%s: unable to allocate or map rx "
971 "buffer %d error = %d\n",
972 sc->sc_dev.dv_xname, i, error);
973 /*
974 * XXX Should attempt to run with fewer receive
975 * XXX buffers instead of just failing.
976 */
977 epic_rxdrain(sc);
978 goto out;
979 }
980 }
981 }
982 sc->sc_rxptr = 0;
983
984 /*
985 * Initialize the interrupt mask and enable interrupts.
986 */
987 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
988 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
989
990 /*
991 * Give the transmit and receive rings to the EPIC.
992 */
993 bus_space_write_4(st, sh, EPIC_PTCDAR,
994 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
995 bus_space_write_4(st, sh, EPIC_PRCDAR,
996 EPIC_CDRXADDR(sc, sc->sc_rxptr));
997
998 /*
999 * Set the EPIC in motion.
1000 */
1001 bus_space_write_4(st, sh, EPIC_COMMAND,
1002 COMMAND_RXQUEUED | COMMAND_START_RX);
1003
1004 /*
1005 * ...all done!
1006 */
1007 ifp->if_flags |= IFF_RUNNING;
1008 ifp->if_flags &= ~IFF_OACTIVE;
1009
1010 /*
1011 * Start the one second clock.
1012 */
1013 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1014
1015 /*
1016 * Attempt to start output on the interface.
1017 */
1018 epic_start(ifp);
1019
1020 out:
1021 if (error)
1022 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1023 return (error);
1024 }
1025
1026 /*
1027 * Drain the receive queue.
1028 */
1029 void
1030 epic_rxdrain(sc)
1031 struct epic_softc *sc;
1032 {
1033 struct epic_descsoft *ds;
1034 int i;
1035
1036 for (i = 0; i < EPIC_NRXDESC; i++) {
1037 ds = EPIC_DSRX(sc, i);
1038 if (ds->ds_mbuf != NULL) {
1039 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1040 m_freem(ds->ds_mbuf);
1041 ds->ds_mbuf = NULL;
1042 }
1043 }
1044 }
1045
1046 /*
1047 * Stop transmission on the interface.
1048 */
1049 void
1050 epic_stop(ifp, disable)
1051 struct ifnet *ifp;
1052 int disable;
1053 {
1054 struct epic_softc *sc = ifp->if_softc;
1055 bus_space_tag_t st = sc->sc_st;
1056 bus_space_handle_t sh = sc->sc_sh;
1057 struct epic_descsoft *ds;
1058 u_int32_t reg;
1059 int i;
1060
1061 /*
1062 * Stop the one second clock.
1063 */
1064 callout_stop(&sc->sc_mii_callout);
1065
1066 /* Down the MII. */
1067 mii_down(&sc->sc_mii);
1068
1069 /* Paranoia... */
1070 epic_fixup_clock_source(sc);
1071
1072 /*
1073 * Disable interrupts.
1074 */
1075 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1076 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1077 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1078
1079 /*
1080 * Stop the DMA engine and take the receiver off-line.
1081 */
1082 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1083 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1084
1085 /*
1086 * Release any queued transmit buffers.
1087 */
1088 for (i = 0; i < EPIC_NTXDESC; i++) {
1089 ds = EPIC_DSTX(sc, i);
1090 if (ds->ds_mbuf != NULL) {
1091 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1092 m_freem(ds->ds_mbuf);
1093 ds->ds_mbuf = NULL;
1094 }
1095 }
1096
1097 if (disable)
1098 epic_rxdrain(sc);
1099
1100 /*
1101 * Mark the interface down and cancel the watchdog timer.
1102 */
1103 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1104 ifp->if_timer = 0;
1105 }
1106
1107 /*
1108 * Read the EPIC Serial EEPROM.
1109 */
1110 void
1111 epic_read_eeprom(sc, word, wordcnt, data)
1112 struct epic_softc *sc;
1113 int word, wordcnt;
1114 u_int16_t *data;
1115 {
1116 bus_space_tag_t st = sc->sc_st;
1117 bus_space_handle_t sh = sc->sc_sh;
1118 u_int16_t reg;
1119 int i, x;
1120
1121 #define EEPROM_WAIT_READY(st, sh) \
1122 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1123 /* nothing */
1124
1125 /*
1126 * Enable the EEPROM.
1127 */
1128 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1129 EEPROM_WAIT_READY(st, sh);
1130
1131 for (i = 0; i < wordcnt; i++) {
1132 /* Send CHIP SELECT for one clock tick. */
1133 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1134 EEPROM_WAIT_READY(st, sh);
1135
1136 /* Shift in the READ opcode. */
1137 for (x = 3; x > 0; x--) {
1138 reg = EECTL_ENABLE|EECTL_EECS;
1139 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1140 reg |= EECTL_EEDI;
1141 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1142 EEPROM_WAIT_READY(st, sh);
1143 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1144 EEPROM_WAIT_READY(st, sh);
1145 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1146 EEPROM_WAIT_READY(st, sh);
1147 }
1148
1149 /* Shift in address. */
1150 for (x = 6; x > 0; x--) {
1151 reg = EECTL_ENABLE|EECTL_EECS;
1152 if ((word + i) & (1 << (x - 1)))
1153 reg |= EECTL_EEDI;
1154 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1155 EEPROM_WAIT_READY(st, sh);
1156 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1157 EEPROM_WAIT_READY(st, sh);
1158 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1159 EEPROM_WAIT_READY(st, sh);
1160 }
1161
1162 /* Shift out data. */
1163 reg = EECTL_ENABLE|EECTL_EECS;
1164 data[i] = 0;
1165 for (x = 16; x > 0; x--) {
1166 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1167 EEPROM_WAIT_READY(st, sh);
1168 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1169 data[i] |= (1 << (x - 1));
1170 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1171 EEPROM_WAIT_READY(st, sh);
1172 }
1173
1174 /* Clear CHIP SELECT. */
1175 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1176 EEPROM_WAIT_READY(st, sh);
1177 }
1178
1179 /*
1180 * Disable the EEPROM.
1181 */
1182 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1183
1184 #undef EEPROM_WAIT_READY
1185 }
1186
1187 /*
1188 * Add a receive buffer to the indicated descriptor.
1189 */
1190 int
1191 epic_add_rxbuf(sc, idx)
1192 struct epic_softc *sc;
1193 int idx;
1194 {
1195 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1196 struct mbuf *m;
1197 int error;
1198
1199 MGETHDR(m, M_DONTWAIT, MT_DATA);
1200 if (m == NULL)
1201 return (ENOBUFS);
1202
1203 MCLGET(m, M_DONTWAIT);
1204 if ((m->m_flags & M_EXT) == 0) {
1205 m_freem(m);
1206 return (ENOBUFS);
1207 }
1208
1209 if (ds->ds_mbuf != NULL)
1210 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1211
1212 ds->ds_mbuf = m;
1213
1214 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1215 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1216 if (error) {
1217 printf("%s: can't load rx DMA map %d, error = %d\n",
1218 sc->sc_dev.dv_xname, idx, error);
1219 panic("epic_add_rxbuf"); /* XXX */
1220 }
1221
1222 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1223 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1224
1225 EPIC_INIT_RXDESC(sc, idx);
1226
1227 return (0);
1228 }
1229
1230 /*
1231 * Set the EPIC multicast hash table.
1232 *
1233 * NOTE: We rely on a recently-updated mii_media_active here!
1234 */
1235 void
1236 epic_set_mchash(sc)
1237 struct epic_softc *sc;
1238 {
1239 struct ethercom *ec = &sc->sc_ethercom;
1240 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1241 struct ether_multi *enm;
1242 struct ether_multistep step;
1243 u_int32_t hash, mchash[4];
1244
1245 /*
1246 * Set up the multicast address filter by passing all multicast
1247 * addresses through a CRC generator, and then using the low-order
1248 * 6 bits as an index into the 64 bit multicast hash table (only
1249 * the lower 16 bits of each 32 bit multicast hash register are
1250 * valid). The high order bits select the register, while the
1251 * rest of the bits select the bit within the register.
1252 */
1253
1254 if (ifp->if_flags & IFF_PROMISC)
1255 goto allmulti;
1256
1257 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1258 /* XXX hardware bug in 10Mbps mode. */
1259 goto allmulti;
1260 }
1261
1262 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1263
1264 ETHER_FIRST_MULTI(step, ec, enm);
1265 while (enm != NULL) {
1266 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1267 /*
1268 * We must listen to a range of multicast addresses.
1269 * For now, just accept all multicasts, rather than
1270 * trying to set only those filter bits needed to match
1271 * the range. (At this time, the only use of address
1272 * ranges is for IP multicast routing, for which the
1273 * range is big enough to require all bits set.)
1274 */
1275 goto allmulti;
1276 }
1277
1278 hash = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN) & 0x3f;
1279
1280 /* Set the corresponding bit in the hash table. */
1281 mchash[hash >> 4] |= 1 << (hash & 0xf);
1282
1283 ETHER_NEXT_MULTI(step, enm);
1284 }
1285
1286 ifp->if_flags &= ~IFF_ALLMULTI;
1287 goto sethash;
1288
1289 allmulti:
1290 ifp->if_flags |= IFF_ALLMULTI;
1291 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1292
1293 sethash:
1294 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1295 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1296 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1297 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1298 }
1299
1300 /*
1301 * Wait for the MII to become ready.
1302 */
1303 int
1304 epic_mii_wait(sc, rw)
1305 struct epic_softc *sc;
1306 u_int32_t rw;
1307 {
1308 int i;
1309
1310 for (i = 0; i < 50; i++) {
1311 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1312 == 0)
1313 break;
1314 delay(2);
1315 }
1316 if (i == 50) {
1317 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1318 return (1);
1319 }
1320
1321 return (0);
1322 }
1323
1324 /*
1325 * Read from the MII.
1326 */
1327 int
1328 epic_mii_read(self, phy, reg)
1329 struct device *self;
1330 int phy, reg;
1331 {
1332 struct epic_softc *sc = (struct epic_softc *)self;
1333
1334 if (epic_mii_wait(sc, MMCTL_WRITE))
1335 return (0);
1336
1337 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1338 MMCTL_ARG(phy, reg, MMCTL_READ));
1339
1340 if (epic_mii_wait(sc, MMCTL_READ))
1341 return (0);
1342
1343 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1344 MMDATA_MASK);
1345 }
1346
1347 /*
1348 * Write to the MII.
1349 */
1350 void
1351 epic_mii_write(self, phy, reg, val)
1352 struct device *self;
1353 int phy, reg, val;
1354 {
1355 struct epic_softc *sc = (struct epic_softc *)self;
1356
1357 if (epic_mii_wait(sc, MMCTL_WRITE))
1358 return;
1359
1360 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1361 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1362 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1363 }
1364
1365 /*
1366 * Callback from PHY when media changes.
1367 */
1368 void
1369 epic_statchg(self)
1370 struct device *self;
1371 {
1372 struct epic_softc *sc = (struct epic_softc *)self;
1373 u_int32_t txcon;
1374
1375 /*
1376 * Update loopback bits in TXCON to reflect duplex mode.
1377 */
1378 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1379 if (sc->sc_mii.mii_media_active & IFM_FDX)
1380 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1381 else
1382 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1383 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1384
1385 /*
1386 * There is a multicast filter bug in 10Mbps mode. Kick the
1387 * multicast filter in case the speed changed.
1388 */
1389 epic_set_mchash(sc);
1390 }
1391
1392 /*
1393 * Callback from ifmedia to request current media status.
1394 */
1395 void
1396 epic_mediastatus(ifp, ifmr)
1397 struct ifnet *ifp;
1398 struct ifmediareq *ifmr;
1399 {
1400 struct epic_softc *sc = ifp->if_softc;
1401
1402 mii_pollstat(&sc->sc_mii);
1403 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1404 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1405 }
1406
1407 /*
1408 * Callback from ifmedia to request new media setting.
1409 */
1410 int
1411 epic_mediachange(ifp)
1412 struct ifnet *ifp;
1413 {
1414 struct epic_softc *sc = ifp->if_softc;
1415
1416 if (ifp->if_flags & IFF_UP)
1417 mii_mediachg(&sc->sc_mii);
1418 return (0);
1419 }
1420