smc83c170.c revision 1.46 1 /* $NetBSD: smc83c170.c,v 1.46 2001/07/07 05:35:42 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "bpfilter.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/callout.h>
50 #include <sys/mbuf.h>
51 #include <sys/malloc.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/ioctl.h>
55 #include <sys/errno.h>
56 #include <sys/device.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #include <machine/bus.h>
70 #include <machine/intr.h>
71
72 #include <dev/mii/miivar.h>
73 #include <dev/mii/lxtphyreg.h>
74
75 #include <dev/ic/smc83c170reg.h>
76 #include <dev/ic/smc83c170var.h>
77
78 void epic_start __P((struct ifnet *));
79 void epic_watchdog __P((struct ifnet *));
80 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
81 int epic_init __P((struct ifnet *));
82 void epic_stop __P((struct ifnet *, int));
83
84 void epic_shutdown __P((void *));
85
86 void epic_reset __P((struct epic_softc *));
87 void epic_rxdrain __P((struct epic_softc *));
88 int epic_add_rxbuf __P((struct epic_softc *, int));
89 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
90 void epic_set_mchash __P((struct epic_softc *));
91 void epic_fixup_clock_source __P((struct epic_softc *));
92 int epic_mii_read __P((struct device *, int, int));
93 void epic_mii_write __P((struct device *, int, int, int));
94 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
95 void epic_tick __P((void *));
96
97 void epic_statchg __P((struct device *));
98 int epic_mediachange __P((struct ifnet *));
99 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
100
101 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
102 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
103
104 int epic_copy_small = 0;
105
106 /*
107 * Attach an EPIC interface to the system.
108 */
109 void
110 epic_attach(sc)
111 struct epic_softc *sc;
112 {
113 bus_space_tag_t st = sc->sc_st;
114 bus_space_handle_t sh = sc->sc_sh;
115 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
116 int i, rseg, error, miiflags;
117 bus_dma_segment_t seg;
118 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
119 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
120
121 callout_init(&sc->sc_mii_callout);
122
123 /*
124 * Allocate the control data structures, and create and load the
125 * DMA map for it.
126 */
127 if ((error = bus_dmamem_alloc(sc->sc_dmat,
128 sizeof(struct epic_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
129 BUS_DMA_NOWAIT)) != 0) {
130 printf("%s: unable to allocate control data, error = %d\n",
131 sc->sc_dev.dv_xname, error);
132 goto fail_0;
133 }
134
135 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
136 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
137 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
138 printf("%s: unable to map control data, error = %d\n",
139 sc->sc_dev.dv_xname, error);
140 goto fail_1;
141 }
142
143 if ((error = bus_dmamap_create(sc->sc_dmat,
144 sizeof(struct epic_control_data), 1,
145 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
146 &sc->sc_cddmamap)) != 0) {
147 printf("%s: unable to create control data DMA map, "
148 "error = %d\n", sc->sc_dev.dv_xname, error);
149 goto fail_2;
150 }
151
152 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
153 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
154 BUS_DMA_NOWAIT)) != 0) {
155 printf("%s: unable to load control data DMA map, error = %d\n",
156 sc->sc_dev.dv_xname, error);
157 goto fail_3;
158 }
159
160 /*
161 * Create the transmit buffer DMA maps.
162 */
163 for (i = 0; i < EPIC_NTXDESC; i++) {
164 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
165 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
166 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
167 printf("%s: unable to create tx DMA map %d, "
168 "error = %d\n", sc->sc_dev.dv_xname, i, error);
169 goto fail_4;
170 }
171 }
172
173 /*
174 * Create the receive buffer DMA maps.
175 */
176 for (i = 0; i < EPIC_NRXDESC; i++) {
177 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
178 MCLBYTES, 0, BUS_DMA_NOWAIT,
179 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
180 printf("%s: unable to create rx DMA map %d, "
181 "error = %d\n", sc->sc_dev.dv_xname, i, error);
182 goto fail_5;
183 }
184 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
185 }
186
187
188 /*
189 * Bring the chip out of low-power mode and reset it to a known state.
190 */
191 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
192 epic_reset(sc);
193
194 /*
195 * Read the Ethernet address from the EEPROM.
196 */
197 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
198 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
199 enaddr[i * 2] = myea[i] & 0xff;
200 enaddr[i * 2 + 1] = myea[i] >> 8;
201 }
202
203 /*
204 * ...and the device name.
205 */
206 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
207 mydevname);
208 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
209 devname[i * 2] = mydevname[i] & 0xff;
210 devname[i * 2 + 1] = mydevname[i] >> 8;
211 }
212
213 devname[sizeof(mydevname)] = '\0';
214 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
215 if (devname[i] == ' ')
216 devname[i] = '\0';
217 else
218 break;
219 }
220
221 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
222 devname, ether_sprintf(enaddr));
223
224 miiflags = 0;
225 if (sc->sc_hwflags & EPIC_HAS_MII_FIBER)
226 miiflags |= MIIF_HAVEFIBER;
227
228 /*
229 * Initialize our media structures and probe the MII.
230 */
231 sc->sc_mii.mii_ifp = ifp;
232 sc->sc_mii.mii_readreg = epic_mii_read;
233 sc->sc_mii.mii_writereg = epic_mii_write;
234 sc->sc_mii.mii_statchg = epic_statchg;
235 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
236 epic_mediastatus);
237 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
238 MII_OFFSET_ANY, miiflags);
239 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
240 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
241 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
242 } else
243 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
244
245 if (sc->sc_hwflags & EPIC_HAS_BNC) {
246 /* use the next free media instance */
247 sc->sc_serinst = sc->sc_mii.mii_instance++;
248 ifmedia_add(&sc->sc_mii.mii_media,
249 IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0,
250 sc->sc_serinst),
251 0, NULL);
252 printf("%s: 10base2/BNC\n", sc->sc_dev.dv_xname);
253 } else
254 sc->sc_serinst = -1;
255
256 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
257 ifp->if_softc = sc;
258 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259 ifp->if_ioctl = epic_ioctl;
260 ifp->if_start = epic_start;
261 ifp->if_watchdog = epic_watchdog;
262 ifp->if_init = epic_init;
263 ifp->if_stop = epic_stop;
264 IFQ_SET_READY(&ifp->if_snd);
265
266 /*
267 * We can support 802.1Q VLAN-sized frames.
268 */
269 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
270
271 /*
272 * Attach the interface.
273 */
274 if_attach(ifp);
275 ether_ifattach(ifp, enaddr);
276
277 /*
278 * Make sure the interface is shutdown during reboot.
279 */
280 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
281 if (sc->sc_sdhook == NULL)
282 printf("%s: WARNING: unable to establish shutdown hook\n",
283 sc->sc_dev.dv_xname);
284 return;
285
286 /*
287 * Free any resources we've allocated during the failed attach
288 * attempt. Do this in reverse order and fall through.
289 */
290 fail_5:
291 for (i = 0; i < EPIC_NRXDESC; i++) {
292 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
293 bus_dmamap_destroy(sc->sc_dmat,
294 EPIC_DSRX(sc, i)->ds_dmamap);
295 }
296 fail_4:
297 for (i = 0; i < EPIC_NTXDESC; i++) {
298 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
299 bus_dmamap_destroy(sc->sc_dmat,
300 EPIC_DSTX(sc, i)->ds_dmamap);
301 }
302 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
303 fail_3:
304 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
305 fail_2:
306 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
307 sizeof(struct epic_control_data));
308 fail_1:
309 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
310 fail_0:
311 return;
312 }
313
314 /*
315 * Shutdown hook. Make sure the interface is stopped at reboot.
316 */
317 void
318 epic_shutdown(arg)
319 void *arg;
320 {
321 struct epic_softc *sc = arg;
322
323 epic_stop(&sc->sc_ethercom.ec_if, 1);
324 }
325
326 /*
327 * Start packet transmission on the interface.
328 * [ifnet interface function]
329 */
330 void
331 epic_start(ifp)
332 struct ifnet *ifp;
333 {
334 struct epic_softc *sc = ifp->if_softc;
335 struct mbuf *m0, *m;
336 struct epic_txdesc *txd;
337 struct epic_descsoft *ds;
338 struct epic_fraglist *fr;
339 bus_dmamap_t dmamap;
340 int error, firsttx, nexttx, opending, seg;
341
342 /*
343 * Remember the previous txpending and the first transmit
344 * descriptor we use.
345 */
346 opending = sc->sc_txpending;
347 firsttx = EPIC_NEXTTX(sc->sc_txlast);
348
349 /*
350 * Loop through the send queue, setting up transmit descriptors
351 * until we drain the queue, or use up all available transmit
352 * descriptors.
353 */
354 while (sc->sc_txpending < EPIC_NTXDESC) {
355 /*
356 * Grab a packet off the queue.
357 */
358 IFQ_POLL(&ifp->if_snd, m0);
359 if (m0 == NULL)
360 break;
361 m = NULL;
362
363 /*
364 * Get the last and next available transmit descriptor.
365 */
366 nexttx = EPIC_NEXTTX(sc->sc_txlast);
367 txd = EPIC_CDTX(sc, nexttx);
368 fr = EPIC_CDFL(sc, nexttx);
369 ds = EPIC_DSTX(sc, nexttx);
370 dmamap = ds->ds_dmamap;
371
372 /*
373 * Load the DMA map. If this fails, the packet either
374 * didn't fit in the alloted number of frags, or we were
375 * short on resources. In this case, we'll copy and try
376 * again.
377 */
378 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
379 BUS_DMA_NOWAIT) != 0) {
380 MGETHDR(m, M_DONTWAIT, MT_DATA);
381 if (m == NULL) {
382 printf("%s: unable to allocate Tx mbuf\n",
383 sc->sc_dev.dv_xname);
384 break;
385 }
386 if (m0->m_pkthdr.len > MHLEN) {
387 MCLGET(m, M_DONTWAIT);
388 if ((m->m_flags & M_EXT) == 0) {
389 printf("%s: unable to allocate Tx "
390 "cluster\n", sc->sc_dev.dv_xname);
391 m_freem(m);
392 break;
393 }
394 }
395 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
396 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
397 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
398 m, BUS_DMA_NOWAIT);
399 if (error) {
400 printf("%s: unable to load Tx buffer, "
401 "error = %d\n", sc->sc_dev.dv_xname, error);
402 break;
403 }
404 }
405 IFQ_DEQUEUE(&ifp->if_snd, m0);
406 if (m != NULL) {
407 m_freem(m0);
408 m0 = m;
409 }
410
411 /* Initialize the fraglist. */
412 fr->ef_nfrags = dmamap->dm_nsegs;
413 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
414 fr->ef_frags[seg].ef_addr =
415 dmamap->dm_segs[seg].ds_addr;
416 fr->ef_frags[seg].ef_length =
417 dmamap->dm_segs[seg].ds_len;
418 }
419
420 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
421
422 /* Sync the DMA map. */
423 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
424 BUS_DMASYNC_PREWRITE);
425
426 /*
427 * Store a pointer to the packet so we can free it later.
428 */
429 ds->ds_mbuf = m0;
430
431 /*
432 * Fill in the transmit descriptor. The EPIC doesn't
433 * auto-pad, so we have to do this ourselves.
434 */
435 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
436 txd->et_txlength = max(m0->m_pkthdr.len,
437 ETHER_MIN_LEN - ETHER_CRC_LEN);
438
439 /*
440 * If this is the first descriptor we're enqueueing,
441 * don't give it to the EPIC yet. That could cause
442 * a race condition. We'll do it below.
443 */
444 if (nexttx == firsttx)
445 txd->et_txstatus = 0;
446 else
447 txd->et_txstatus = ET_TXSTAT_OWNER;
448
449 EPIC_CDTXSYNC(sc, nexttx,
450 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
451
452 /* Advance the tx pointer. */
453 sc->sc_txpending++;
454 sc->sc_txlast = nexttx;
455
456 #if NBPFILTER > 0
457 /*
458 * Pass the packet to any BPF listeners.
459 */
460 if (ifp->if_bpf)
461 bpf_mtap(ifp->if_bpf, m0);
462 #endif
463 }
464
465 if (sc->sc_txpending == EPIC_NTXDESC) {
466 /* No more slots left; notify upper layer. */
467 ifp->if_flags |= IFF_OACTIVE;
468 }
469
470 if (sc->sc_txpending != opending) {
471 /*
472 * We enqueued packets. If the transmitter was idle,
473 * reset the txdirty pointer.
474 */
475 if (opending == 0)
476 sc->sc_txdirty = firsttx;
477
478 /*
479 * Cause a transmit interrupt to happen on the
480 * last packet we enqueued.
481 */
482 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
483 EPIC_CDTXSYNC(sc, sc->sc_txlast,
484 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
485
486 /*
487 * The entire packet chain is set up. Give the
488 * first descriptor to the EPIC now.
489 */
490 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
491 EPIC_CDTXSYNC(sc, firsttx,
492 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
493
494 /* Start the transmitter. */
495 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
496 COMMAND_TXQUEUED);
497
498 /* Set a watchdog timer in case the chip flakes out. */
499 ifp->if_timer = 5;
500 }
501 }
502
503 /*
504 * Watchdog timer handler.
505 * [ifnet interface function]
506 */
507 void
508 epic_watchdog(ifp)
509 struct ifnet *ifp;
510 {
511 struct epic_softc *sc = ifp->if_softc;
512
513 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
514 ifp->if_oerrors++;
515
516 (void) epic_init(ifp);
517 }
518
519 /*
520 * Handle control requests from the operator.
521 * [ifnet interface function]
522 */
523 int
524 epic_ioctl(ifp, cmd, data)
525 struct ifnet *ifp;
526 u_long cmd;
527 caddr_t data;
528 {
529 struct epic_softc *sc = ifp->if_softc;
530 struct ifreq *ifr = (struct ifreq *)data;
531 int s, error;
532
533 s = splnet();
534
535 switch (cmd) {
536 case SIOCSIFMEDIA:
537 case SIOCGIFMEDIA:
538 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
539 break;
540
541 default:
542 error = ether_ioctl(ifp, cmd, data);
543 if (error == ENETRESET) {
544 /*
545 * Multicast list has changed; set the hardware filter
546 * accordingly. Update our idea of the current media;
547 * epic_set_mchash() needs to know what it is.
548 */
549 mii_pollstat(&sc->sc_mii);
550 epic_set_mchash(sc);
551 error = 0;
552 }
553 break;
554 }
555
556 splx(s);
557 return (error);
558 }
559
560 /*
561 * Interrupt handler.
562 */
563 int
564 epic_intr(arg)
565 void *arg;
566 {
567 struct epic_softc *sc = arg;
568 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
569 struct epic_rxdesc *rxd;
570 struct epic_txdesc *txd;
571 struct epic_descsoft *ds;
572 struct mbuf *m;
573 u_int32_t intstat;
574 int i, len, claimed = 0;
575
576 top:
577 /*
578 * Get the interrupt status from the EPIC.
579 */
580 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
581 if ((intstat & INTSTAT_INT_ACTV) == 0)
582 return (claimed);
583
584 claimed = 1;
585
586 /*
587 * Acknowledge the interrupt.
588 */
589 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
590 intstat & INTMASK);
591
592 /*
593 * Check for receive interrupts.
594 */
595 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
596 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
597 rxd = EPIC_CDRX(sc, i);
598 ds = EPIC_DSRX(sc, i);
599
600 EPIC_CDRXSYNC(sc, i,
601 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
602
603 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
604 /*
605 * We have processed all of the
606 * receive buffers.
607 */
608 break;
609 }
610
611 /*
612 * Make sure the packet arrived intact. If an error
613 * occurred, update stats and reset the descriptor.
614 * The buffer will be reused the next time the
615 * descriptor comes up in the ring.
616 */
617 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
618 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
619 printf("%s: CRC error\n",
620 sc->sc_dev.dv_xname);
621 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
622 printf("%s: alignment error\n",
623 sc->sc_dev.dv_xname);
624 ifp->if_ierrors++;
625 EPIC_INIT_RXDESC(sc, i);
626 continue;
627 }
628
629 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
630 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
631
632 /*
633 * The EPIC includes the CRC with every packet.
634 */
635 len = rxd->er_rxlength;
636
637 if (len < sizeof(struct ether_header)) {
638 /*
639 * Runt packet; drop it now.
640 */
641 ifp->if_ierrors++;
642 EPIC_INIT_RXDESC(sc, i);
643 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
644 ds->ds_dmamap->dm_mapsize,
645 BUS_DMASYNC_PREREAD);
646 continue;
647 }
648
649 /*
650 * If the packet is small enough to fit in a
651 * single header mbuf, allocate one and copy
652 * the data into it. This greatly reduces
653 * memory consumption when we receive lots
654 * of small packets.
655 *
656 * Otherwise, we add a new buffer to the receive
657 * chain. If this fails, we drop the packet and
658 * recycle the old buffer.
659 */
660 if (epic_copy_small != 0 && len <= MHLEN) {
661 MGETHDR(m, M_DONTWAIT, MT_DATA);
662 if (m == NULL)
663 goto dropit;
664 memcpy(mtod(m, caddr_t),
665 mtod(ds->ds_mbuf, caddr_t), len);
666 EPIC_INIT_RXDESC(sc, i);
667 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
668 ds->ds_dmamap->dm_mapsize,
669 BUS_DMASYNC_PREREAD);
670 } else {
671 m = ds->ds_mbuf;
672 if (epic_add_rxbuf(sc, i) != 0) {
673 dropit:
674 ifp->if_ierrors++;
675 EPIC_INIT_RXDESC(sc, i);
676 bus_dmamap_sync(sc->sc_dmat,
677 ds->ds_dmamap, 0,
678 ds->ds_dmamap->dm_mapsize,
679 BUS_DMASYNC_PREREAD);
680 continue;
681 }
682 }
683
684 m->m_flags |= M_HASFCS;
685 m->m_pkthdr.rcvif = ifp;
686 m->m_pkthdr.len = m->m_len = len;
687
688 #if NBPFILTER > 0
689 /*
690 * Pass this up to any BPF listeners, but only
691 * pass it up the stack if its for us.
692 */
693 if (ifp->if_bpf)
694 bpf_mtap(ifp->if_bpf, m);
695 #endif
696
697 /* Pass it on. */
698 (*ifp->if_input)(ifp, m);
699 ifp->if_ipackets++;
700 }
701
702 /* Update the receive pointer. */
703 sc->sc_rxptr = i;
704
705 /*
706 * Check for receive queue underflow.
707 */
708 if (intstat & INTSTAT_RQE) {
709 printf("%s: receiver queue empty\n",
710 sc->sc_dev.dv_xname);
711 /*
712 * Ring is already built; just restart the
713 * receiver.
714 */
715 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
716 EPIC_CDRXADDR(sc, sc->sc_rxptr));
717 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
718 COMMAND_RXQUEUED | COMMAND_START_RX);
719 }
720 }
721
722 /*
723 * Check for transmission complete interrupts.
724 */
725 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
726 ifp->if_flags &= ~IFF_OACTIVE;
727 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
728 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
729 txd = EPIC_CDTX(sc, i);
730 ds = EPIC_DSTX(sc, i);
731
732 EPIC_CDTXSYNC(sc, i,
733 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
734
735 if (txd->et_txstatus & ET_TXSTAT_OWNER)
736 break;
737
738 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
739
740 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
741 0, ds->ds_dmamap->dm_mapsize,
742 BUS_DMASYNC_POSTWRITE);
743 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
744 m_freem(ds->ds_mbuf);
745 ds->ds_mbuf = NULL;
746
747 /*
748 * Check for errors and collisions.
749 */
750 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
751 ifp->if_oerrors++;
752 else
753 ifp->if_opackets++;
754 ifp->if_collisions +=
755 TXSTAT_COLLISIONS(txd->et_txstatus);
756 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
757 printf("%s: lost carrier\n",
758 sc->sc_dev.dv_xname);
759 }
760
761 /* Update the dirty transmit buffer pointer. */
762 sc->sc_txdirty = i;
763
764 /*
765 * Cancel the watchdog timer if there are no pending
766 * transmissions.
767 */
768 if (sc->sc_txpending == 0)
769 ifp->if_timer = 0;
770
771 /*
772 * Kick the transmitter after a DMA underrun.
773 */
774 if (intstat & INTSTAT_TXU) {
775 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
776 bus_space_write_4(sc->sc_st, sc->sc_sh,
777 EPIC_COMMAND, COMMAND_TXUGO);
778 if (sc->sc_txpending)
779 bus_space_write_4(sc->sc_st, sc->sc_sh,
780 EPIC_COMMAND, COMMAND_TXQUEUED);
781 }
782
783 /*
784 * Try to get more packets going.
785 */
786 epic_start(ifp);
787 }
788
789 /*
790 * Check for fatal interrupts.
791 */
792 if (intstat & INTSTAT_FATAL_INT) {
793 if (intstat & INTSTAT_PTA)
794 printf("%s: PCI target abort error\n",
795 sc->sc_dev.dv_xname);
796 else if (intstat & INTSTAT_PMA)
797 printf("%s: PCI master abort error\n",
798 sc->sc_dev.dv_xname);
799 else if (intstat & INTSTAT_APE)
800 printf("%s: PCI address parity error\n",
801 sc->sc_dev.dv_xname);
802 else if (intstat & INTSTAT_DPE)
803 printf("%s: PCI data parity error\n",
804 sc->sc_dev.dv_xname);
805 else
806 printf("%s: unknown fatal error\n",
807 sc->sc_dev.dv_xname);
808 (void) epic_init(ifp);
809 }
810
811 /*
812 * Check for more interrupts.
813 */
814 goto top;
815 }
816
817 /*
818 * One second timer, used to tick the MII.
819 */
820 void
821 epic_tick(arg)
822 void *arg;
823 {
824 struct epic_softc *sc = arg;
825 int s;
826
827 s = splnet();
828 mii_tick(&sc->sc_mii);
829 splx(s);
830
831 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
832 }
833
834 /*
835 * Fixup the clock source on the EPIC.
836 */
837 void
838 epic_fixup_clock_source(sc)
839 struct epic_softc *sc;
840 {
841 int i;
842
843 /*
844 * According to SMC Application Note 7-15, the EPIC's clock
845 * source is incorrect following a reset. This manifests itself
846 * as failure to recognize when host software has written to
847 * a register on the EPIC. The appnote recommends issuing at
848 * least 16 consecutive writes to the CLOCK TEST bit to correctly
849 * configure the clock source.
850 */
851 for (i = 0; i < 16; i++)
852 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
853 TEST_CLOCKTEST);
854 }
855
856 /*
857 * Perform a soft reset on the EPIC.
858 */
859 void
860 epic_reset(sc)
861 struct epic_softc *sc;
862 {
863
864 epic_fixup_clock_source(sc);
865
866 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
867 delay(100);
868 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
869 delay(100);
870
871 epic_fixup_clock_source(sc);
872 }
873
874 /*
875 * Initialize the interface. Must be called at splnet().
876 */
877 int
878 epic_init(ifp)
879 struct ifnet *ifp;
880 {
881 struct epic_softc *sc = ifp->if_softc;
882 bus_space_tag_t st = sc->sc_st;
883 bus_space_handle_t sh = sc->sc_sh;
884 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
885 struct epic_txdesc *txd;
886 struct epic_descsoft *ds;
887 u_int32_t genctl, reg0;
888 int i, error = 0;
889
890 /*
891 * Cancel any pending I/O.
892 */
893 epic_stop(ifp, 0);
894
895 /*
896 * Reset the EPIC to a known state.
897 */
898 epic_reset(sc);
899
900 /*
901 * Magical mystery initialization.
902 */
903 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
904
905 /*
906 * Initialize the EPIC genctl register:
907 *
908 * - 64 byte receive FIFO threshold
909 * - automatic advance to next receive frame
910 */
911 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
912 #if BYTE_ORDER == BIG_ENDIAN
913 genctl |= GENCTL_BIG_ENDIAN;
914 #endif
915 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
916
917 /*
918 * Reset the MII bus and PHY.
919 */
920 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
921 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
922 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
923 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
924 delay(100);
925 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
926 delay(1000);
927 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
928
929 /*
930 * Initialize Ethernet address.
931 */
932 reg0 = enaddr[1] << 8 | enaddr[0];
933 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
934 reg0 = enaddr[3] << 8 | enaddr[2];
935 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
936 reg0 = enaddr[5] << 8 | enaddr[4];
937 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
938
939 /*
940 * Initialize receive control. Remember the external buffer
941 * size setting.
942 */
943 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
944 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
945 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
946 if (ifp->if_flags & IFF_PROMISC)
947 reg0 |= RXCON_PROMISCMODE;
948 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
949
950 /* Set the current media. */
951 epic_mediachange(ifp);
952
953 /* Set up the multicast hash table. */
954 epic_set_mchash(sc);
955
956 /*
957 * Initialize the transmit descriptor ring. txlast is initialized
958 * to the end of the list so that it will wrap around to the first
959 * descriptor when the first packet is transmitted.
960 */
961 for (i = 0; i < EPIC_NTXDESC; i++) {
962 txd = EPIC_CDTX(sc, i);
963 memset(txd, 0, sizeof(struct epic_txdesc));
964 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
965 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
966 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
967 }
968 sc->sc_txpending = 0;
969 sc->sc_txdirty = 0;
970 sc->sc_txlast = EPIC_NTXDESC - 1;
971
972 /*
973 * Initialize the receive descriptor ring.
974 */
975 for (i = 0; i < EPIC_NRXDESC; i++) {
976 ds = EPIC_DSRX(sc, i);
977 if (ds->ds_mbuf == NULL) {
978 if ((error = epic_add_rxbuf(sc, i)) != 0) {
979 printf("%s: unable to allocate or map rx "
980 "buffer %d error = %d\n",
981 sc->sc_dev.dv_xname, i, error);
982 /*
983 * XXX Should attempt to run with fewer receive
984 * XXX buffers instead of just failing.
985 */
986 epic_rxdrain(sc);
987 goto out;
988 }
989 }
990 }
991 sc->sc_rxptr = 0;
992
993 /*
994 * Initialize the interrupt mask and enable interrupts.
995 */
996 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
997 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
998
999 /*
1000 * Give the transmit and receive rings to the EPIC.
1001 */
1002 bus_space_write_4(st, sh, EPIC_PTCDAR,
1003 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1004 bus_space_write_4(st, sh, EPIC_PRCDAR,
1005 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1006
1007 /*
1008 * Set the EPIC in motion.
1009 */
1010 bus_space_write_4(st, sh, EPIC_COMMAND,
1011 COMMAND_RXQUEUED | COMMAND_START_RX);
1012
1013 /*
1014 * ...all done!
1015 */
1016 ifp->if_flags |= IFF_RUNNING;
1017 ifp->if_flags &= ~IFF_OACTIVE;
1018
1019 /*
1020 * Start the one second clock.
1021 */
1022 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1023
1024 /*
1025 * Attempt to start output on the interface.
1026 */
1027 epic_start(ifp);
1028
1029 out:
1030 if (error)
1031 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1032 return (error);
1033 }
1034
1035 /*
1036 * Drain the receive queue.
1037 */
1038 void
1039 epic_rxdrain(sc)
1040 struct epic_softc *sc;
1041 {
1042 struct epic_descsoft *ds;
1043 int i;
1044
1045 for (i = 0; i < EPIC_NRXDESC; i++) {
1046 ds = EPIC_DSRX(sc, i);
1047 if (ds->ds_mbuf != NULL) {
1048 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1049 m_freem(ds->ds_mbuf);
1050 ds->ds_mbuf = NULL;
1051 }
1052 }
1053 }
1054
1055 /*
1056 * Stop transmission on the interface.
1057 */
1058 void
1059 epic_stop(ifp, disable)
1060 struct ifnet *ifp;
1061 int disable;
1062 {
1063 struct epic_softc *sc = ifp->if_softc;
1064 bus_space_tag_t st = sc->sc_st;
1065 bus_space_handle_t sh = sc->sc_sh;
1066 struct epic_descsoft *ds;
1067 u_int32_t reg;
1068 int i;
1069
1070 /*
1071 * Stop the one second clock.
1072 */
1073 callout_stop(&sc->sc_mii_callout);
1074
1075 /* Down the MII. */
1076 mii_down(&sc->sc_mii);
1077
1078 /* Paranoia... */
1079 epic_fixup_clock_source(sc);
1080
1081 /*
1082 * Disable interrupts.
1083 */
1084 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1085 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1086 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1087
1088 /*
1089 * Stop the DMA engine and take the receiver off-line.
1090 */
1091 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1092 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1093
1094 /*
1095 * Release any queued transmit buffers.
1096 */
1097 for (i = 0; i < EPIC_NTXDESC; i++) {
1098 ds = EPIC_DSTX(sc, i);
1099 if (ds->ds_mbuf != NULL) {
1100 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1101 m_freem(ds->ds_mbuf);
1102 ds->ds_mbuf = NULL;
1103 }
1104 }
1105
1106 if (disable)
1107 epic_rxdrain(sc);
1108
1109 /*
1110 * Mark the interface down and cancel the watchdog timer.
1111 */
1112 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1113 ifp->if_timer = 0;
1114 }
1115
1116 /*
1117 * Read the EPIC Serial EEPROM.
1118 */
1119 void
1120 epic_read_eeprom(sc, word, wordcnt, data)
1121 struct epic_softc *sc;
1122 int word, wordcnt;
1123 u_int16_t *data;
1124 {
1125 bus_space_tag_t st = sc->sc_st;
1126 bus_space_handle_t sh = sc->sc_sh;
1127 u_int16_t reg;
1128 int i, x;
1129
1130 #define EEPROM_WAIT_READY(st, sh) \
1131 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1132 /* nothing */
1133
1134 /*
1135 * Enable the EEPROM.
1136 */
1137 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1138 EEPROM_WAIT_READY(st, sh);
1139
1140 for (i = 0; i < wordcnt; i++) {
1141 /* Send CHIP SELECT for one clock tick. */
1142 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1143 EEPROM_WAIT_READY(st, sh);
1144
1145 /* Shift in the READ opcode. */
1146 for (x = 3; x > 0; x--) {
1147 reg = EECTL_ENABLE|EECTL_EECS;
1148 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1149 reg |= EECTL_EEDI;
1150 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1151 EEPROM_WAIT_READY(st, sh);
1152 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1153 EEPROM_WAIT_READY(st, sh);
1154 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1155 EEPROM_WAIT_READY(st, sh);
1156 }
1157
1158 /* Shift in address. */
1159 for (x = 6; x > 0; x--) {
1160 reg = EECTL_ENABLE|EECTL_EECS;
1161 if ((word + i) & (1 << (x - 1)))
1162 reg |= EECTL_EEDI;
1163 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1164 EEPROM_WAIT_READY(st, sh);
1165 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1166 EEPROM_WAIT_READY(st, sh);
1167 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1168 EEPROM_WAIT_READY(st, sh);
1169 }
1170
1171 /* Shift out data. */
1172 reg = EECTL_ENABLE|EECTL_EECS;
1173 data[i] = 0;
1174 for (x = 16; x > 0; x--) {
1175 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1176 EEPROM_WAIT_READY(st, sh);
1177 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1178 data[i] |= (1 << (x - 1));
1179 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1180 EEPROM_WAIT_READY(st, sh);
1181 }
1182
1183 /* Clear CHIP SELECT. */
1184 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1185 EEPROM_WAIT_READY(st, sh);
1186 }
1187
1188 /*
1189 * Disable the EEPROM.
1190 */
1191 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1192
1193 #undef EEPROM_WAIT_READY
1194 }
1195
1196 /*
1197 * Add a receive buffer to the indicated descriptor.
1198 */
1199 int
1200 epic_add_rxbuf(sc, idx)
1201 struct epic_softc *sc;
1202 int idx;
1203 {
1204 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1205 struct mbuf *m;
1206 int error;
1207
1208 MGETHDR(m, M_DONTWAIT, MT_DATA);
1209 if (m == NULL)
1210 return (ENOBUFS);
1211
1212 MCLGET(m, M_DONTWAIT);
1213 if ((m->m_flags & M_EXT) == 0) {
1214 m_freem(m);
1215 return (ENOBUFS);
1216 }
1217
1218 if (ds->ds_mbuf != NULL)
1219 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1220
1221 ds->ds_mbuf = m;
1222
1223 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1224 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1225 if (error) {
1226 printf("%s: can't load rx DMA map %d, error = %d\n",
1227 sc->sc_dev.dv_xname, idx, error);
1228 panic("epic_add_rxbuf"); /* XXX */
1229 }
1230
1231 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1232 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1233
1234 EPIC_INIT_RXDESC(sc, idx);
1235
1236 return (0);
1237 }
1238
1239 /*
1240 * Set the EPIC multicast hash table.
1241 *
1242 * NOTE: We rely on a recently-updated mii_media_active here!
1243 */
1244 void
1245 epic_set_mchash(sc)
1246 struct epic_softc *sc;
1247 {
1248 struct ethercom *ec = &sc->sc_ethercom;
1249 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1250 struct ether_multi *enm;
1251 struct ether_multistep step;
1252 u_int32_t hash, mchash[4];
1253
1254 /*
1255 * Set up the multicast address filter by passing all multicast
1256 * addresses through a CRC generator, and then using the low-order
1257 * 6 bits as an index into the 64 bit multicast hash table (only
1258 * the lower 16 bits of each 32 bit multicast hash register are
1259 * valid). The high order bits select the register, while the
1260 * rest of the bits select the bit within the register.
1261 */
1262
1263 if (ifp->if_flags & IFF_PROMISC)
1264 goto allmulti;
1265
1266 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1267 /* XXX hardware bug in 10Mbps mode. */
1268 goto allmulti;
1269 }
1270
1271 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1272
1273 ETHER_FIRST_MULTI(step, ec, enm);
1274 while (enm != NULL) {
1275 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1276 /*
1277 * We must listen to a range of multicast addresses.
1278 * For now, just accept all multicasts, rather than
1279 * trying to set only those filter bits needed to match
1280 * the range. (At this time, the only use of address
1281 * ranges is for IP multicast routing, for which the
1282 * range is big enough to require all bits set.)
1283 */
1284 goto allmulti;
1285 }
1286
1287 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1288 hash >>= 26;
1289
1290 /* Set the corresponding bit in the hash table. */
1291 mchash[hash >> 4] |= 1 << (hash & 0xf);
1292
1293 ETHER_NEXT_MULTI(step, enm);
1294 }
1295
1296 ifp->if_flags &= ~IFF_ALLMULTI;
1297 goto sethash;
1298
1299 allmulti:
1300 ifp->if_flags |= IFF_ALLMULTI;
1301 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1302
1303 sethash:
1304 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1305 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1306 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1307 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1308 }
1309
1310 /*
1311 * Wait for the MII to become ready.
1312 */
1313 int
1314 epic_mii_wait(sc, rw)
1315 struct epic_softc *sc;
1316 u_int32_t rw;
1317 {
1318 int i;
1319
1320 for (i = 0; i < 50; i++) {
1321 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1322 == 0)
1323 break;
1324 delay(2);
1325 }
1326 if (i == 50) {
1327 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1328 return (1);
1329 }
1330
1331 return (0);
1332 }
1333
1334 /*
1335 * Read from the MII.
1336 */
1337 int
1338 epic_mii_read(self, phy, reg)
1339 struct device *self;
1340 int phy, reg;
1341 {
1342 struct epic_softc *sc = (struct epic_softc *)self;
1343
1344 if (epic_mii_wait(sc, MMCTL_WRITE))
1345 return (0);
1346
1347 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1348 MMCTL_ARG(phy, reg, MMCTL_READ));
1349
1350 if (epic_mii_wait(sc, MMCTL_READ))
1351 return (0);
1352
1353 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1354 MMDATA_MASK);
1355 }
1356
1357 /*
1358 * Write to the MII.
1359 */
1360 void
1361 epic_mii_write(self, phy, reg, val)
1362 struct device *self;
1363 int phy, reg, val;
1364 {
1365 struct epic_softc *sc = (struct epic_softc *)self;
1366
1367 if (epic_mii_wait(sc, MMCTL_WRITE))
1368 return;
1369
1370 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1371 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1372 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1373 }
1374
1375 /*
1376 * Callback from PHY when media changes.
1377 */
1378 void
1379 epic_statchg(self)
1380 struct device *self;
1381 {
1382 struct epic_softc *sc = (struct epic_softc *)self;
1383 u_int32_t txcon, miicfg;
1384
1385 /*
1386 * Update loopback bits in TXCON to reflect duplex mode.
1387 */
1388 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1389 if (sc->sc_mii.mii_media_active & IFM_FDX)
1390 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1391 else
1392 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1393 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1394
1395 /* On some cards we need manualy set fullduplex led */
1396 if (sc->sc_hwflags & EPIC_DUPLEXLED_ON_694) {
1397 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1398 if (IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX)
1399 miicfg |= MIICFG_ENABLE;
1400 else
1401 miicfg &= ~MIICFG_ENABLE;
1402 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1403 }
1404
1405 /*
1406 * There is a multicast filter bug in 10Mbps mode. Kick the
1407 * multicast filter in case the speed changed.
1408 */
1409 epic_set_mchash(sc);
1410 }
1411
1412 /*
1413 * Callback from ifmedia to request current media status.
1414 */
1415 void
1416 epic_mediastatus(ifp, ifmr)
1417 struct ifnet *ifp;
1418 struct ifmediareq *ifmr;
1419 {
1420 struct epic_softc *sc = ifp->if_softc;
1421
1422 mii_pollstat(&sc->sc_mii);
1423 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1424 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1425 }
1426
1427 /*
1428 * Callback from ifmedia to request new media setting.
1429 */
1430 int
1431 epic_mediachange(ifp)
1432 struct ifnet *ifp;
1433 {
1434 struct epic_softc *sc = ifp->if_softc;
1435 struct mii_data *mii = &sc->sc_mii;
1436 struct ifmedia *ifm = &mii->mii_media;
1437 int media = ifm->ifm_cur->ifm_media;
1438 u_int32_t miicfg;
1439 struct mii_softc *miisc;
1440 int cfg;
1441
1442 if (!(ifp->if_flags & IFF_UP))
1443 return (0);
1444
1445 if (IFM_INST(media) != sc->sc_serinst) {
1446 /* If we're not selecting serial interface, select MII mode */
1447 #ifdef EPICMEDIADEBUG
1448 printf("%s: parallel mode\n", ifp->if_xname);
1449 #endif
1450 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1451 miicfg &= ~MIICFG_SERMODEENA;
1452 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1453 }
1454
1455 mii_mediachg(mii);
1456
1457 if (IFM_INST(media) == sc->sc_serinst) {
1458 /* select serial interface */
1459 #ifdef EPICMEDIADEBUG
1460 printf("%s: serial mode\n", ifp->if_xname);
1461 #endif
1462 miicfg = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG);
1463 miicfg |= (MIICFG_SERMODEENA | MIICFG_ENABLE);
1464 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MIICFG, miicfg);
1465
1466 /* There is no driver to fill this */
1467 mii->mii_media_active = media;
1468 mii->mii_media_status = 0;
1469
1470 epic_statchg(&sc->sc_dev);
1471 return (0);
1472 }
1473
1474 /* Lookup selected PHY */
1475 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1476 miisc = LIST_NEXT(miisc, mii_list)) {
1477 if (IFM_INST(media) == miisc->mii_inst)
1478 break;
1479 }
1480 if (!miisc) {
1481 printf("epic_mediachange: can't happen\n"); /* ??? panic */
1482 return (0);
1483 }
1484 #ifdef EPICMEDIADEBUG
1485 printf("%s: using phy %s\n", ifp->if_xname,
1486 miisc->mii_dev.dv_xname);
1487 #endif
1488
1489 if (miisc->mii_flags & MIIF_HAVEFIBER) {
1490 /* XXX XXX assume it's a Level1 - should check */
1491
1492 /* We have to powerup fiber tranceivers */
1493 cfg = PHY_READ(miisc, MII_LXTPHY_CONFIG);
1494 if (IFM_SUBTYPE(media) == IFM_100_FX) {
1495 #ifdef EPICMEDIADEBUG
1496 printf("%s: power up fiber\n", ifp->if_xname);
1497 #endif
1498 cfg |= (CONFIG_LEDC1 | CONFIG_LEDC0);
1499 } else {
1500 #ifdef EPICMEDIADEBUG
1501 printf("%s: power down fiber\n", ifp->if_xname);
1502 #endif
1503 cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0);
1504 }
1505 PHY_WRITE(miisc, MII_LXTPHY_CONFIG, cfg);
1506 }
1507
1508 return (0);
1509 }
1510