smc83c170.c revision 1.32.4.2 1 /* $NetBSD: smc83c170.c,v 1.32.4.2 2000/12/31 20:15:01 jhawk Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/mbuf.h>
53 #include <sys/malloc.h>
54 #include <sys/kernel.h>
55 #include <sys/socket.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/device.h>
59
60 #include <net/if.h>
61 #include <net/if_dl.h>
62 #include <net/if_media.h>
63 #include <net/if_ether.h>
64
65 #if NBPFILTER > 0
66 #include <net/bpf.h>
67 #endif
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #endif
73
74 #ifdef NS
75 #include <netns/ns.h>
76 #include <netns/ns_if.h>
77 #endif
78
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81
82 #include <dev/mii/miivar.h>
83
84 #include <dev/ic/smc83c170reg.h>
85 #include <dev/ic/smc83c170var.h>
86
87 void epic_start __P((struct ifnet *));
88 void epic_watchdog __P((struct ifnet *));
89 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
90
91 void epic_shutdown __P((void *));
92
93 void epic_reset __P((struct epic_softc *));
94 int epic_init __P((struct epic_softc *));
95 void epic_rxdrain __P((struct epic_softc *));
96 void epic_stop __P((struct epic_softc *, int));
97 int epic_add_rxbuf __P((struct epic_softc *, int));
98 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
99 void epic_set_mchash __P((struct epic_softc *));
100 void epic_fixup_clock_source __P((struct epic_softc *));
101 int epic_mii_read __P((struct device *, int, int));
102 void epic_mii_write __P((struct device *, int, int, int));
103 int epic_mii_wait __P((struct epic_softc *, u_int32_t));
104 void epic_tick __P((void *));
105
106 void epic_statchg __P((struct device *));
107 int epic_mediachange __P((struct ifnet *));
108 void epic_mediastatus __P((struct ifnet *, struct ifmediareq *));
109
110 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
111 INTSTAT_TXC | INTSTAT_RXE | INTSTAT_RQE | INTSTAT_RCC)
112
113 int epic_copy_small = 0;
114
115 /*
116 * Attach an EPIC interface to the system.
117 */
118 void
119 epic_attach(sc)
120 struct epic_softc *sc;
121 {
122 bus_space_tag_t st = sc->sc_st;
123 bus_space_handle_t sh = sc->sc_sh;
124 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
125 int i, rseg, error;
126 bus_dma_segment_t seg;
127 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
128 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
129
130 callout_init(&sc->sc_mii_callout);
131
132 /*
133 * Allocate the control data structures, and create and load the
134 * DMA map for it.
135 */
136 if ((error = bus_dmamem_alloc(sc->sc_dmat,
137 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
138 BUS_DMA_NOWAIT)) != 0) {
139 printf("%s: unable to allocate control data, error = %d\n",
140 sc->sc_dev.dv_xname, error);
141 goto fail_0;
142 }
143
144 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
145 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
146 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
147 printf("%s: unable to map control data, error = %d\n",
148 sc->sc_dev.dv_xname, error);
149 goto fail_1;
150 }
151
152 if ((error = bus_dmamap_create(sc->sc_dmat,
153 sizeof(struct epic_control_data), 1,
154 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
155 &sc->sc_cddmamap)) != 0) {
156 printf("%s: unable to create control data DMA map, "
157 "error = %d\n", sc->sc_dev.dv_xname, error);
158 goto fail_2;
159 }
160
161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
162 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
163 BUS_DMA_NOWAIT)) != 0) {
164 printf("%s: unable to load control data DMA map, error = %d\n",
165 sc->sc_dev.dv_xname, error);
166 goto fail_3;
167 }
168
169 /*
170 * Create the transmit buffer DMA maps.
171 */
172 for (i = 0; i < EPIC_NTXDESC; i++) {
173 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
174 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
175 &EPIC_DSTX(sc, i)->ds_dmamap)) != 0) {
176 printf("%s: unable to create tx DMA map %d, "
177 "error = %d\n", sc->sc_dev.dv_xname, i, error);
178 goto fail_4;
179 }
180 }
181
182 /*
183 * Create the recieve buffer DMA maps.
184 */
185 for (i = 0; i < EPIC_NRXDESC; i++) {
186 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
187 MCLBYTES, 0, BUS_DMA_NOWAIT,
188 &EPIC_DSRX(sc, i)->ds_dmamap)) != 0) {
189 printf("%s: unable to create rx DMA map %d, "
190 "error = %d\n", sc->sc_dev.dv_xname, i, error);
191 goto fail_5;
192 }
193 EPIC_DSRX(sc, i)->ds_mbuf = NULL;
194 }
195
196
197 /*
198 * Bring the chip out of low-power mode and reset it to a known state.
199 */
200 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
201 epic_reset(sc);
202
203 /*
204 * Read the Ethernet address from the EEPROM.
205 */
206 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
207 for (i = 0; i < sizeof(myea)/ sizeof(myea[0]); i++) {
208 enaddr[i * 2] = myea[i] & 0xff;
209 enaddr[i * 2 + 1] = myea[i] >> 8;
210 }
211
212 /*
213 * ...and the device name.
214 */
215 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
216 mydevname);
217 for (i = 0; i < sizeof(mydevname) / sizeof(mydevname[0]); i++) {
218 devname[i * 2] = mydevname[i] & 0xff;
219 devname[i * 2 + 1] = mydevname[i] >> 8;
220 }
221
222 devname[sizeof(mydevname)] = '\0';
223 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
224 if (devname[i] == ' ')
225 devname[i] = '\0';
226 else
227 break;
228 }
229
230 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
231 devname, ether_sprintf(enaddr));
232
233 /*
234 * Initialize our media structures and probe the MII.
235 */
236 sc->sc_mii.mii_ifp = ifp;
237 sc->sc_mii.mii_readreg = epic_mii_read;
238 sc->sc_mii.mii_writereg = epic_mii_write;
239 sc->sc_mii.mii_statchg = epic_statchg;
240 ifmedia_init(&sc->sc_mii.mii_media, 0, epic_mediachange,
241 epic_mediastatus);
242 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
243 MII_OFFSET_ANY, 0);
244 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
245 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
246 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
247 } else
248 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
249
250 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
251 ifp->if_softc = sc;
252 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
253 ifp->if_ioctl = epic_ioctl;
254 ifp->if_start = epic_start;
255 ifp->if_watchdog = epic_watchdog;
256
257 /*
258 * We can support 802.1Q VLAN-sized frames.
259 */
260 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
261
262 /*
263 * Attach the interface.
264 */
265 if_attach(ifp);
266 ether_ifattach(ifp, enaddr);
267 #if NBPFILTER > 0
268 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
269 sizeof(struct ether_header));
270 #endif
271
272 /*
273 * Make sure the interface is shutdown during reboot.
274 */
275 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
276 if (sc->sc_sdhook == NULL)
277 printf("%s: WARNING: unable to establish shutdown hook\n",
278 sc->sc_dev.dv_xname);
279 return;
280
281 /*
282 * Free any resources we've allocated during the failed attach
283 * attempt. Do this in reverse order and fall through.
284 */
285 fail_5:
286 for (i = 0; i < EPIC_NRXDESC; i++) {
287 if (EPIC_DSRX(sc, i)->ds_dmamap != NULL)
288 bus_dmamap_destroy(sc->sc_dmat,
289 EPIC_DSRX(sc, i)->ds_dmamap);
290 }
291 fail_4:
292 for (i = 0; i < EPIC_NTXDESC; i++) {
293 if (EPIC_DSTX(sc, i)->ds_dmamap != NULL)
294 bus_dmamap_destroy(sc->sc_dmat,
295 EPIC_DSTX(sc, i)->ds_dmamap);
296 }
297 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
298 fail_3:
299 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
300 fail_2:
301 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
302 sizeof(struct epic_control_data));
303 fail_1:
304 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
305 fail_0:
306 return;
307 }
308
309 /*
310 * Shutdown hook. Make sure the interface is stopped at reboot.
311 */
312 void
313 epic_shutdown(arg)
314 void *arg;
315 {
316 struct epic_softc *sc = arg;
317
318 epic_stop(sc, 1);
319 }
320
321 /*
322 * Start packet transmission on the interface.
323 * [ifnet interface function]
324 */
325 void
326 epic_start(ifp)
327 struct ifnet *ifp;
328 {
329 struct epic_softc *sc = ifp->if_softc;
330 struct mbuf *m0, *m;
331 struct epic_txdesc *txd;
332 struct epic_descsoft *ds;
333 struct epic_fraglist *fr;
334 bus_dmamap_t dmamap;
335 int error, firsttx, nexttx, opending, seg;
336
337 /*
338 * Remember the previous txpending and the first transmit
339 * descriptor we use.
340 */
341 opending = sc->sc_txpending;
342 firsttx = EPIC_NEXTTX(sc->sc_txlast);
343
344 /*
345 * Loop through the send queue, setting up transmit descriptors
346 * until we drain the queue, or use up all available transmit
347 * descriptors.
348 */
349 while (sc->sc_txpending < EPIC_NTXDESC) {
350 /*
351 * Grab a packet off the queue.
352 */
353 IF_DEQUEUE(&ifp->if_snd, m0);
354 if (m0 == NULL)
355 break;
356
357 /*
358 * Get the last and next available transmit descriptor.
359 */
360 nexttx = EPIC_NEXTTX(sc->sc_txlast);
361 txd = EPIC_CDTX(sc, nexttx);
362 fr = EPIC_CDFL(sc, nexttx);
363 ds = EPIC_DSTX(sc, nexttx);
364 dmamap = ds->ds_dmamap;
365
366 /*
367 * Load the DMA map. If this fails, the packet either
368 * didn't fit in the alloted number of frags, or we were
369 * short on resources. In this case, we'll copy and try
370 * again.
371 */
372 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
373 BUS_DMA_NOWAIT) != 0) {
374 MGETHDR(m, M_DONTWAIT, MT_DATA);
375 if (m == NULL) {
376 printf("%s: unable to allocate Tx mbuf\n",
377 sc->sc_dev.dv_xname);
378 IF_PREPEND(&ifp->if_snd, m0);
379 break;
380 }
381 if (m0->m_pkthdr.len > MHLEN) {
382 MCLGET(m, M_DONTWAIT);
383 if ((m->m_flags & M_EXT) == 0) {
384 printf("%s: unable to allocate Tx "
385 "cluster\n", sc->sc_dev.dv_xname);
386 m_freem(m);
387 IF_PREPEND(&ifp->if_snd, m0);
388 break;
389 }
390 }
391 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
392 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
393 m_freem(m0);
394 m0 = m;
395 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
396 m0, BUS_DMA_NOWAIT);
397 if (error) {
398 printf("%s: unable to load Tx buffer, "
399 "error = %d\n", sc->sc_dev.dv_xname, error);
400 IF_PREPEND(&ifp->if_snd, m0);
401 break;
402 }
403 }
404
405 /* Initialize the fraglist. */
406 fr->ef_nfrags = dmamap->dm_nsegs;
407 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
408 fr->ef_frags[seg].ef_addr =
409 dmamap->dm_segs[seg].ds_addr;
410 fr->ef_frags[seg].ef_length =
411 dmamap->dm_segs[seg].ds_len;
412 }
413
414 EPIC_CDFLSYNC(sc, nexttx, BUS_DMASYNC_PREWRITE);
415
416 /* Sync the DMA map. */
417 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
418 BUS_DMASYNC_PREWRITE);
419
420 /*
421 * Store a pointer to the packet so we can free it later.
422 */
423 ds->ds_mbuf = m0;
424
425 /*
426 * Fill in the transmit descriptor. The EPIC doesn't
427 * auto-pad, so we have to do this ourselves.
428 */
429 txd->et_control = ET_TXCTL_LASTDESC | ET_TXCTL_FRAGLIST;
430 txd->et_txlength = max(m0->m_pkthdr.len,
431 ETHER_MIN_LEN - ETHER_CRC_LEN);
432
433 /*
434 * If this is the first descriptor we're enqueueing,
435 * don't give it to the EPIC yet. That could cause
436 * a race condition. We'll do it below.
437 */
438 if (nexttx == firsttx)
439 txd->et_txstatus = 0;
440 else
441 txd->et_txstatus = ET_TXSTAT_OWNER;
442
443 EPIC_CDTXSYNC(sc, nexttx,
444 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
445
446 /* Advance the tx pointer. */
447 sc->sc_txpending++;
448 sc->sc_txlast = nexttx;
449
450 #if NBPFILTER > 0
451 /*
452 * Pass the packet to any BPF listeners.
453 */
454 if (ifp->if_bpf)
455 bpf_mtap(ifp->if_bpf, m0);
456 #endif
457 }
458
459 if (sc->sc_txpending == EPIC_NTXDESC) {
460 /* No more slots left; notify upper layer. */
461 ifp->if_flags |= IFF_OACTIVE;
462 }
463
464 if (sc->sc_txpending != opending) {
465 /*
466 * We enqueued packets. If the transmitter was idle,
467 * reset the txdirty pointer.
468 */
469 if (opending == 0)
470 sc->sc_txdirty = firsttx;
471
472 /*
473 * Cause a transmit interrupt to happen on the
474 * last packet we enqueued.
475 */
476 EPIC_CDTX(sc, sc->sc_txlast)->et_control |= ET_TXCTL_IAF;
477 EPIC_CDTXSYNC(sc, sc->sc_txlast,
478 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
479
480 /*
481 * The entire packet chain is set up. Give the
482 * first descriptor to the EPIC now.
483 */
484 EPIC_CDTX(sc, firsttx)->et_txstatus = ET_TXSTAT_OWNER;
485 EPIC_CDTXSYNC(sc, firsttx,
486 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
487
488 /* Start the transmitter. */
489 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
490 COMMAND_TXQUEUED);
491
492 /* Set a watchdog timer in case the chip flakes out. */
493 ifp->if_timer = 5;
494 }
495 }
496
497 /*
498 * Watchdog timer handler.
499 * [ifnet interface function]
500 */
501 void
502 epic_watchdog(ifp)
503 struct ifnet *ifp;
504 {
505 struct epic_softc *sc = ifp->if_softc;
506
507 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
508 ifp->if_oerrors++;
509
510 (void) epic_init(sc);
511 }
512
513 /*
514 * Handle control requests from the operator.
515 * [ifnet interface function]
516 */
517 int
518 epic_ioctl(ifp, cmd, data)
519 struct ifnet *ifp;
520 u_long cmd;
521 caddr_t data;
522 {
523 struct epic_softc *sc = ifp->if_softc;
524 struct ifreq *ifr = (struct ifreq *)data;
525 struct ifaddr *ifa = (struct ifaddr *)data;
526 int s, error = 0;
527
528 s = splnet();
529
530 switch (cmd) {
531 case SIOCSIFADDR:
532 ifp->if_flags |= IFF_UP;
533
534 switch (ifa->ifa_addr->sa_family) {
535 #ifdef INET
536 case AF_INET:
537 if ((error = epic_init(sc)) != 0)
538 break;
539 arp_ifinit(ifp, ifa);
540 break;
541 #endif /* INET */
542 #ifdef NS
543 case AF_NS:
544 {
545 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
546
547 if (ns_nullhost(*ina))
548 ina->x_host = *(union ns_host *)
549 LLADDR(ifp->if_sadl);
550 else
551 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
552 ifp->if_addrlen);
553 /* Set new address. */
554 error = epic_init(sc);
555 break;
556 }
557 #endif /* NS */
558 default:
559 error = epic_init(sc);
560 break;
561 }
562 break;
563
564 case SIOCSIFMTU:
565 if (ifr->ifr_mtu > ETHERMTU)
566 error = EINVAL;
567 else
568 ifp->if_mtu = ifr->ifr_mtu;
569 break;
570
571 case SIOCSIFFLAGS:
572 if ((ifp->if_flags & IFF_UP) == 0 &&
573 (ifp->if_flags & IFF_RUNNING) != 0) {
574 /*
575 * If interface is marked down and it is running, then
576 * stop it.
577 */
578 epic_stop(sc, 1);
579 } else if ((ifp->if_flags & IFF_UP) != 0 &&
580 (ifp->if_flags & IFF_RUNNING) == 0) {
581 /*
582 * If interfase it marked up and it is stopped, then
583 * start it.
584 */
585 error = epic_init(sc);
586 } else if ((ifp->if_flags & IFF_UP) != 0) {
587 /*
588 * Reset the interface to pick up changes in any other
589 * flags that affect the hardware state.
590 */
591 error = epic_init(sc);
592 }
593 break;
594
595 case SIOCADDMULTI:
596 case SIOCDELMULTI:
597 error = (cmd == SIOCADDMULTI) ?
598 ether_addmulti(ifr, &sc->sc_ethercom) :
599 ether_delmulti(ifr, &sc->sc_ethercom);
600
601 if (error == ENETRESET) {
602 /*
603 * Multicast list has changed; set the hardware filter
604 * accordingly. Update our idea of the current media;
605 * epic_set_mchash() needs to know what it is.
606 */
607 mii_pollstat(&sc->sc_mii);
608 epic_set_mchash(sc);
609 error = 0;
610 }
611 break;
612
613 case SIOCSIFMEDIA:
614 case SIOCGIFMEDIA:
615 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
616 break;
617
618 default:
619 error = EINVAL;
620 break;
621 }
622
623 splx(s);
624 return (error);
625 }
626
627 /*
628 * Interrupt handler.
629 */
630 int
631 epic_intr(arg)
632 void *arg;
633 {
634 struct epic_softc *sc = arg;
635 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
636 struct ether_header *eh;
637 struct epic_rxdesc *rxd;
638 struct epic_txdesc *txd;
639 struct epic_descsoft *ds;
640 struct mbuf *m;
641 u_int32_t intstat;
642 int i, len, claimed = 0;
643
644 top:
645 /*
646 * Get the interrupt status from the EPIC.
647 */
648 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
649 if ((intstat & INTSTAT_INT_ACTV) == 0)
650 return (claimed);
651
652 claimed = 1;
653
654 /*
655 * Acknowledge the interrupt.
656 */
657 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
658 intstat & INTMASK);
659
660 /*
661 * Check for receive interrupts.
662 */
663 if (intstat & (INTSTAT_RCC | INTSTAT_RXE | INTSTAT_RQE)) {
664 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
665 rxd = EPIC_CDRX(sc, i);
666 ds = EPIC_DSRX(sc, i);
667
668 EPIC_CDRXSYNC(sc, i,
669 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
670
671 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
672 /*
673 * We have processed all of the
674 * receive buffers.
675 */
676 break;
677 }
678
679 /*
680 * Make sure the packet arrived intact. If an error
681 * occurred, update stats and reset the descriptor.
682 * The buffer will be reused the next time the
683 * descriptor comes up in the ring.
684 */
685 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
686 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
687 printf("%s: CRC error\n",
688 sc->sc_dev.dv_xname);
689 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
690 printf("%s: alignment error\n",
691 sc->sc_dev.dv_xname);
692 ifp->if_ierrors++;
693 EPIC_INIT_RXDESC(sc, i);
694 continue;
695 }
696
697 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
698 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
699
700 /*
701 * The EPIC includes the CRC with every packet;
702 * trim it.
703 */
704 len = rxd->er_rxlength - ETHER_CRC_LEN;
705
706 if (len < sizeof(struct ether_header)) {
707 /*
708 * Runt packet; drop it now.
709 */
710 ifp->if_ierrors++;
711 EPIC_INIT_RXDESC(sc, i);
712 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
713 ds->ds_dmamap->dm_mapsize,
714 BUS_DMASYNC_PREREAD);
715 continue;
716 }
717
718 /*
719 * If the packet is small enough to fit in a
720 * single header mbuf, allocate one and copy
721 * the data into it. This greatly reduces
722 * memory consumption when we receive lots
723 * of small packets.
724 *
725 * Otherwise, we add a new buffer to the receive
726 * chain. If this fails, we drop the packet and
727 * recycle the old buffer.
728 */
729 if (epic_copy_small != 0 && len <= MHLEN) {
730 MGETHDR(m, M_DONTWAIT, MT_DATA);
731 if (m == NULL)
732 goto dropit;
733 memcpy(mtod(m, caddr_t),
734 mtod(ds->ds_mbuf, caddr_t), len);
735 EPIC_INIT_RXDESC(sc, i);
736 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
737 ds->ds_dmamap->dm_mapsize,
738 BUS_DMASYNC_PREREAD);
739 } else {
740 m = ds->ds_mbuf;
741 if (epic_add_rxbuf(sc, i) != 0) {
742 dropit:
743 ifp->if_ierrors++;
744 EPIC_INIT_RXDESC(sc, i);
745 bus_dmamap_sync(sc->sc_dmat,
746 ds->ds_dmamap, 0,
747 ds->ds_dmamap->dm_mapsize,
748 BUS_DMASYNC_PREREAD);
749 continue;
750 }
751 }
752
753 m->m_pkthdr.rcvif = ifp;
754 m->m_pkthdr.len = m->m_len = len;
755 eh = mtod(m, struct ether_header *);
756
757 #if NBPFILTER > 0
758 /*
759 * Pass this up to any BPF listeners, but only
760 * pass it up the stack if its for us.
761 */
762 if (ifp->if_bpf) {
763 bpf_mtap(ifp->if_bpf, m);
764 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
765 memcmp(LLADDR(ifp->if_sadl),
766 eh->ether_dhost,
767 ETHER_ADDR_LEN) != 0 &&
768 ETHER_IS_MULTICAST(eh->ether_dhost) == 0) {
769 m_freem(m);
770 continue;
771 }
772 }
773 #endif /* NPBFILTER > 0 */
774
775 /* Pass it on. */
776 (*ifp->if_input)(ifp, m);
777 ifp->if_ipackets++;
778 }
779
780 /* Update the recieve pointer. */
781 sc->sc_rxptr = i;
782
783 /*
784 * Check for receive queue underflow.
785 */
786 if (intstat & INTSTAT_RQE) {
787 printf("%s: receiver queue empty\n",
788 sc->sc_dev.dv_xname);
789 /*
790 * Ring is already built; just restart the
791 * receiver.
792 */
793 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
794 EPIC_CDRXADDR(sc, sc->sc_rxptr));
795 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
796 COMMAND_RXQUEUED | COMMAND_START_RX);
797 }
798 }
799
800 /*
801 * Check for transmission complete interrupts.
802 */
803 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
804 ifp->if_flags &= ~IFF_OACTIVE;
805 for (i = sc->sc_txdirty; sc->sc_txpending != 0;
806 i = EPIC_NEXTTX(i), sc->sc_txpending--) {
807 txd = EPIC_CDTX(sc, i);
808 ds = EPIC_DSTX(sc, i);
809
810 EPIC_CDTXSYNC(sc, i,
811 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
812
813 if (txd->et_txstatus & ET_TXSTAT_OWNER)
814 break;
815
816 EPIC_CDFLSYNC(sc, i, BUS_DMASYNC_POSTWRITE);
817
818 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
819 0, ds->ds_dmamap->dm_mapsize,
820 BUS_DMASYNC_POSTWRITE);
821 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
822 m_freem(ds->ds_mbuf);
823 ds->ds_mbuf = NULL;
824
825 /*
826 * Check for errors and collisions.
827 */
828 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
829 ifp->if_oerrors++;
830 else
831 ifp->if_opackets++;
832 ifp->if_collisions +=
833 TXSTAT_COLLISIONS(txd->et_txstatus);
834 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST)
835 printf("%s: lost carrier\n",
836 sc->sc_dev.dv_xname);
837 }
838
839 /* Update the dirty transmit buffer pointer. */
840 sc->sc_txdirty = i;
841
842 /*
843 * Cancel the watchdog timer if there are no pending
844 * transmissions.
845 */
846 if (sc->sc_txpending == 0)
847 ifp->if_timer = 0;
848
849 /*
850 * Kick the transmitter after a DMA underrun.
851 */
852 if (intstat & INTSTAT_TXU) {
853 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
854 bus_space_write_4(sc->sc_st, sc->sc_sh,
855 EPIC_COMMAND, COMMAND_TXUGO);
856 if (sc->sc_txpending)
857 bus_space_write_4(sc->sc_st, sc->sc_sh,
858 EPIC_COMMAND, COMMAND_TXQUEUED);
859 }
860
861 /*
862 * Try to get more packets going.
863 */
864 epic_start(ifp);
865 }
866
867 /*
868 * Check for fatal interrupts.
869 */
870 if (intstat & INTSTAT_FATAL_INT) {
871 if (intstat & INTSTAT_PTA)
872 printf("%s: PCI target abort error\n",
873 sc->sc_dev.dv_xname);
874 else if (intstat & INTSTAT_PMA)
875 printf("%s: PCI master abort error\n",
876 sc->sc_dev.dv_xname);
877 else if (intstat & INTSTAT_APE)
878 printf("%s: PCI address parity error\n",
879 sc->sc_dev.dv_xname);
880 else if (intstat & INTSTAT_DPE)
881 printf("%s: PCI data parity error\n",
882 sc->sc_dev.dv_xname);
883 else
884 printf("%s: unknown fatal error\n",
885 sc->sc_dev.dv_xname);
886 (void) epic_init(sc);
887 }
888
889 /*
890 * Check for more interrupts.
891 */
892 goto top;
893 }
894
895 /*
896 * One second timer, used to tick the MII.
897 */
898 void
899 epic_tick(arg)
900 void *arg;
901 {
902 struct epic_softc *sc = arg;
903 int s;
904
905 s = splnet();
906 mii_tick(&sc->sc_mii);
907 splx(s);
908
909 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
910 }
911
912 /*
913 * Fixup the clock source on the EPIC.
914 */
915 void
916 epic_fixup_clock_source(sc)
917 struct epic_softc *sc;
918 {
919 int i;
920
921 /*
922 * According to SMC Application Note 7-15, the EPIC's clock
923 * source is incorrect following a reset. This manifests itself
924 * as failure to recognize when host software has written to
925 * a register on the EPIC. The appnote recommends issuing at
926 * least 16 consecutive writes to the CLOCK TEST bit to correctly
927 * configure the clock source.
928 */
929 for (i = 0; i < 16; i++)
930 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TEST,
931 TEST_CLOCKTEST);
932 }
933
934 /*
935 * Perform a soft reset on the EPIC.
936 */
937 void
938 epic_reset(sc)
939 struct epic_softc *sc;
940 {
941
942 epic_fixup_clock_source(sc);
943
944 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
945 delay(100);
946 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
947 delay(100);
948
949 epic_fixup_clock_source(sc);
950 }
951
952 /*
953 * Initialize the interface. Must be called at splnet().
954 */
955 int
956 epic_init(sc)
957 struct epic_softc *sc;
958 {
959 bus_space_tag_t st = sc->sc_st;
960 bus_space_handle_t sh = sc->sc_sh;
961 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
962 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
963 struct epic_txdesc *txd;
964 struct epic_descsoft *ds;
965 u_int32_t genctl, reg0;
966 int i, error = 0;
967
968 /*
969 * Cancel any pending I/O.
970 */
971 epic_stop(sc, 0);
972
973 /*
974 * Reset the EPIC to a known state.
975 */
976 epic_reset(sc);
977
978 /*
979 * Magical mystery initialization.
980 */
981 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
982
983 /*
984 * Initialize the EPIC genctl register:
985 *
986 * - 64 byte receive FIFO threshold
987 * - automatic advance to next receive frame
988 */
989 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
990 #if BYTE_ORDER == BIG_ENDIAN
991 genctl |= GENCTL_BIG_ENDIAN;
992 #endif
993 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
994
995 /*
996 * Reset the MII bus and PHY.
997 */
998 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
999 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
1000 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
1001 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
1002 delay(100);
1003 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
1004 delay(100);
1005 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
1006
1007 /*
1008 * Initialize Ethernet address.
1009 */
1010 reg0 = enaddr[1] << 8 | enaddr[0];
1011 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
1012 reg0 = enaddr[3] << 8 | enaddr[2];
1013 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
1014 reg0 = enaddr[5] << 8 | enaddr[4];
1015 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
1016
1017 /*
1018 * Initialize receive control. Remember the external buffer
1019 * size setting.
1020 */
1021 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
1022 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
1023 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
1024 if (ifp->if_flags & IFF_PROMISC)
1025 reg0 |= RXCON_PROMISCMODE;
1026 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
1027
1028 /* Set the current media. */
1029 mii_mediachg(&sc->sc_mii);
1030
1031 /* Set up the multicast hash table. */
1032 epic_set_mchash(sc);
1033
1034 /*
1035 * Initialize the transmit descriptor ring. txlast is initialized
1036 * to the end of the list so that it will wrap around to the first
1037 * descriptor when the first packet is transmitted.
1038 */
1039 for (i = 0; i < EPIC_NTXDESC; i++) {
1040 txd = EPIC_CDTX(sc, i);
1041 memset(txd, 0, sizeof(struct epic_txdesc));
1042 txd->et_bufaddr = EPIC_CDFLADDR(sc, i);
1043 txd->et_nextdesc = EPIC_CDTXADDR(sc, EPIC_NEXTTX(i));
1044 EPIC_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1045 }
1046 sc->sc_txpending = 0;
1047 sc->sc_txdirty = 0;
1048 sc->sc_txlast = EPIC_NTXDESC - 1;
1049
1050 /*
1051 * Initialize the receive descriptor ring.
1052 */
1053 for (i = 0; i < EPIC_NRXDESC; i++) {
1054 ds = EPIC_DSRX(sc, i);
1055 if (ds->ds_mbuf == NULL) {
1056 if ((error = epic_add_rxbuf(sc, i)) != 0) {
1057 printf("%s: unable to allocate or map rx "
1058 "buffer %d error = %d\n",
1059 sc->sc_dev.dv_xname, i, error);
1060 /*
1061 * XXX Should attempt to run with fewer receive
1062 * XXX buffers instead of just failing.
1063 */
1064 epic_rxdrain(sc);
1065 goto out;
1066 }
1067 }
1068 }
1069 sc->sc_rxptr = 0;
1070
1071 /*
1072 * Initialize the interrupt mask and enable interrupts.
1073 */
1074 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
1075 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
1076
1077 /*
1078 * Give the transmit and receive rings to the EPIC.
1079 */
1080 bus_space_write_4(st, sh, EPIC_PTCDAR,
1081 EPIC_CDTXADDR(sc, EPIC_NEXTTX(sc->sc_txlast)));
1082 bus_space_write_4(st, sh, EPIC_PRCDAR,
1083 EPIC_CDRXADDR(sc, sc->sc_rxptr));
1084
1085 /*
1086 * Set the EPIC in motion.
1087 */
1088 bus_space_write_4(st, sh, EPIC_COMMAND,
1089 COMMAND_RXQUEUED | COMMAND_START_RX);
1090
1091 /*
1092 * ...all done!
1093 */
1094 ifp->if_flags |= IFF_RUNNING;
1095 ifp->if_flags &= ~IFF_OACTIVE;
1096
1097 /*
1098 * Start the one second clock.
1099 */
1100 callout_reset(&sc->sc_mii_callout, hz, epic_tick, sc);
1101
1102 /*
1103 * Attempt to start output on the interface.
1104 */
1105 epic_start(ifp);
1106
1107 out:
1108 if (error)
1109 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1110 return (error);
1111 }
1112
1113 /*
1114 * Drain the receive queue.
1115 */
1116 void
1117 epic_rxdrain(sc)
1118 struct epic_softc *sc;
1119 {
1120 struct epic_descsoft *ds;
1121 int i;
1122
1123 for (i = 0; i < EPIC_NRXDESC; i++) {
1124 ds = EPIC_DSRX(sc, i);
1125 if (ds->ds_mbuf != NULL) {
1126 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1127 m_freem(ds->ds_mbuf);
1128 ds->ds_mbuf = NULL;
1129 }
1130 }
1131 }
1132
1133 /*
1134 * Stop transmission on the interface.
1135 */
1136 void
1137 epic_stop(sc, drain)
1138 struct epic_softc *sc;
1139 int drain;
1140 {
1141 bus_space_tag_t st = sc->sc_st;
1142 bus_space_handle_t sh = sc->sc_sh;
1143 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1144 struct epic_descsoft *ds;
1145 u_int32_t reg;
1146 int i;
1147
1148 /*
1149 * Stop the one second clock.
1150 */
1151 callout_stop(&sc->sc_mii_callout);
1152
1153 /* Down the MII. */
1154 mii_down(&sc->sc_mii);
1155
1156 /* Paranoia... */
1157 epic_fixup_clock_source(sc);
1158
1159 /*
1160 * Disable interrupts.
1161 */
1162 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1163 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1164 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1165
1166 /*
1167 * Stop the DMA engine and take the receiver off-line.
1168 */
1169 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1170 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1171
1172 /*
1173 * Release any queued transmit buffers.
1174 */
1175 for (i = 0; i < EPIC_NTXDESC; i++) {
1176 ds = EPIC_DSTX(sc, i);
1177 if (ds->ds_mbuf != NULL) {
1178 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1179 m_freem(ds->ds_mbuf);
1180 ds->ds_mbuf = NULL;
1181 }
1182 }
1183
1184 if (drain) {
1185 /*
1186 * Release the receive buffers.
1187 */
1188 epic_rxdrain(sc);
1189 }
1190
1191 /*
1192 * Mark the interface down and cancel the watchdog timer.
1193 */
1194 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1195 ifp->if_timer = 0;
1196 }
1197
1198 /*
1199 * Read the EPIC Serial EEPROM.
1200 */
1201 void
1202 epic_read_eeprom(sc, word, wordcnt, data)
1203 struct epic_softc *sc;
1204 int word, wordcnt;
1205 u_int16_t *data;
1206 {
1207 bus_space_tag_t st = sc->sc_st;
1208 bus_space_handle_t sh = sc->sc_sh;
1209 u_int16_t reg;
1210 int i, x;
1211
1212 #define EEPROM_WAIT_READY(st, sh) \
1213 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1214 /* nothing */
1215
1216 /*
1217 * Enable the EEPROM.
1218 */
1219 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1220 EEPROM_WAIT_READY(st, sh);
1221
1222 for (i = 0; i < wordcnt; i++) {
1223 /* Send CHIP SELECT for one clock tick. */
1224 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1225 EEPROM_WAIT_READY(st, sh);
1226
1227 /* Shift in the READ opcode. */
1228 for (x = 3; x > 0; x--) {
1229 reg = EECTL_ENABLE|EECTL_EECS;
1230 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1231 reg |= EECTL_EEDI;
1232 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1233 EEPROM_WAIT_READY(st, sh);
1234 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1235 EEPROM_WAIT_READY(st, sh);
1236 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1237 EEPROM_WAIT_READY(st, sh);
1238 }
1239
1240 /* Shift in address. */
1241 for (x = 6; x > 0; x--) {
1242 reg = EECTL_ENABLE|EECTL_EECS;
1243 if ((word + i) & (1 << (x - 1)))
1244 reg |= EECTL_EEDI;
1245 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1246 EEPROM_WAIT_READY(st, sh);
1247 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1248 EEPROM_WAIT_READY(st, sh);
1249 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1250 EEPROM_WAIT_READY(st, sh);
1251 }
1252
1253 /* Shift out data. */
1254 reg = EECTL_ENABLE|EECTL_EECS;
1255 data[i] = 0;
1256 for (x = 16; x > 0; x--) {
1257 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1258 EEPROM_WAIT_READY(st, sh);
1259 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1260 data[i] |= (1 << (x - 1));
1261 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1262 EEPROM_WAIT_READY(st, sh);
1263 }
1264
1265 /* Clear CHIP SELECT. */
1266 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1267 EEPROM_WAIT_READY(st, sh);
1268 }
1269
1270 /*
1271 * Disable the EEPROM.
1272 */
1273 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1274
1275 #undef EEPROM_WAIT_READY
1276 }
1277
1278 /*
1279 * Add a receive buffer to the indicated descriptor.
1280 */
1281 int
1282 epic_add_rxbuf(sc, idx)
1283 struct epic_softc *sc;
1284 int idx;
1285 {
1286 struct epic_descsoft *ds = EPIC_DSRX(sc, idx);
1287 struct mbuf *m;
1288 int error;
1289
1290 MGETHDR(m, M_DONTWAIT, MT_DATA);
1291 if (m == NULL)
1292 return (ENOBUFS);
1293
1294 MCLGET(m, M_DONTWAIT);
1295 if ((m->m_flags & M_EXT) == 0) {
1296 m_freem(m);
1297 return (ENOBUFS);
1298 }
1299
1300 if (ds->ds_mbuf != NULL)
1301 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1302
1303 ds->ds_mbuf = m;
1304
1305 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1306 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1307 if (error) {
1308 printf("%s: can't load rx DMA map %d, error = %d\n",
1309 sc->sc_dev.dv_xname, idx, error);
1310 panic("epic_add_rxbuf"); /* XXX */
1311 }
1312
1313 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1314 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1315
1316 EPIC_INIT_RXDESC(sc, idx);
1317
1318 return (0);
1319 }
1320
1321 /*
1322 * Set the EPIC multicast hash table.
1323 *
1324 * NOTE: We rely on a recently-updated mii_media_active here!
1325 */
1326 void
1327 epic_set_mchash(sc)
1328 struct epic_softc *sc;
1329 {
1330 struct ethercom *ec = &sc->sc_ethercom;
1331 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1332 struct ether_multi *enm;
1333 struct ether_multistep step;
1334 u_int32_t hash, mchash[4];
1335
1336 /*
1337 * Set up the multicast address filter by passing all multicast
1338 * addresses through a CRC generator, and then using the low-order
1339 * 6 bits as an index into the 64 bit multicast hash table (only
1340 * the lower 16 bits of each 32 bit multicast hash register are
1341 * valid). The high order bits select the register, while the
1342 * rest of the bits select the bit within the register.
1343 */
1344
1345 if (ifp->if_flags & IFF_PROMISC)
1346 goto allmulti;
1347
1348 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_10_T) {
1349 /* XXX hardware bug in 10Mbps mode. */
1350 goto allmulti;
1351 }
1352
1353 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1354
1355 ETHER_FIRST_MULTI(step, ec, enm);
1356 while (enm != NULL) {
1357 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1358 /*
1359 * We must listen to a range of multicast addresses.
1360 * For now, just accept all multicasts, rather than
1361 * trying to set only those filter bits needed to match
1362 * the range. (At this time, the only use of address
1363 * ranges is for IP multicast routing, for which the
1364 * range is big enough to require all bits set.)
1365 */
1366 goto allmulti;
1367 }
1368
1369 hash = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN);
1370 hash >>= 26;
1371
1372 /* Set the corresponding bit in the hash table. */
1373 mchash[hash >> 4] |= 1 << (hash & 0xf);
1374
1375 ETHER_NEXT_MULTI(step, enm);
1376 }
1377
1378 ifp->if_flags &= ~IFF_ALLMULTI;
1379 goto sethash;
1380
1381 allmulti:
1382 ifp->if_flags |= IFF_ALLMULTI;
1383 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1384
1385 sethash:
1386 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1387 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1388 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1389 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1390 }
1391
1392 /*
1393 * Wait for the MII to become ready.
1394 */
1395 int
1396 epic_mii_wait(sc, rw)
1397 struct epic_softc *sc;
1398 u_int32_t rw;
1399 {
1400 int i;
1401
1402 for (i = 0; i < 50; i++) {
1403 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL) & rw)
1404 == 0)
1405 break;
1406 delay(2);
1407 }
1408 if (i == 50) {
1409 printf("%s: MII timed out\n", sc->sc_dev.dv_xname);
1410 return (1);
1411 }
1412
1413 return (0);
1414 }
1415
1416 /*
1417 * Read from the MII.
1418 */
1419 int
1420 epic_mii_read(self, phy, reg)
1421 struct device *self;
1422 int phy, reg;
1423 {
1424 struct epic_softc *sc = (struct epic_softc *)self;
1425
1426 if (epic_mii_wait(sc, MMCTL_WRITE))
1427 return (0);
1428
1429 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1430 MMCTL_ARG(phy, reg, MMCTL_READ));
1431
1432 if (epic_mii_wait(sc, MMCTL_READ))
1433 return (0);
1434
1435 return (bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA) &
1436 MMDATA_MASK);
1437 }
1438
1439 /*
1440 * Write to the MII.
1441 */
1442 void
1443 epic_mii_write(self, phy, reg, val)
1444 struct device *self;
1445 int phy, reg, val;
1446 {
1447 struct epic_softc *sc = (struct epic_softc *)self;
1448
1449 if (epic_mii_wait(sc, MMCTL_WRITE))
1450 return;
1451
1452 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMDATA, val);
1453 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MMCTL,
1454 MMCTL_ARG(phy, reg, MMCTL_WRITE));
1455 }
1456
1457 /*
1458 * Callback from PHY when media changes.
1459 */
1460 void
1461 epic_statchg(self)
1462 struct device *self;
1463 {
1464 struct epic_softc *sc = (struct epic_softc *)self;
1465 u_int32_t txcon;
1466
1467 /*
1468 * Update loopback bits in TXCON to reflect duplex mode.
1469 */
1470 txcon = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_TXCON);
1471 if (sc->sc_mii.mii_media_active & IFM_FDX)
1472 txcon |= (TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1473 else
1474 txcon &= ~(TXCON_LOOPBACK_D1|TXCON_LOOPBACK_D2);
1475 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_TXCON, txcon);
1476
1477 /*
1478 * There is a multicast filter bug in 10Mbps mode. Kick the
1479 * multicast filter in case the speed changed.
1480 */
1481 epic_set_mchash(sc);
1482 }
1483
1484 /*
1485 * Callback from ifmedia to request current media status.
1486 */
1487 void
1488 epic_mediastatus(ifp, ifmr)
1489 struct ifnet *ifp;
1490 struct ifmediareq *ifmr;
1491 {
1492 struct epic_softc *sc = ifp->if_softc;
1493
1494 mii_pollstat(&sc->sc_mii);
1495 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1496 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1497 }
1498
1499 /*
1500 * Callback from ifmedia to request new media setting.
1501 */
1502 int
1503 epic_mediachange(ifp)
1504 struct ifnet *ifp;
1505 {
1506 struct epic_softc *sc = ifp->if_softc;
1507
1508 if (ifp->if_flags & IFF_UP)
1509 mii_mediachg(&sc->sc_mii);
1510 return (0);
1511 }
1512