smc83c170.c revision 1.5 1 /* $NetBSD: smc83c170.c,v 1.5 1998/07/20 21:39:05 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/ic/smc83c170reg.h>
82 #include <dev/ic/smc83c170var.h>
83
84 void epic_start __P((struct ifnet *));
85 void epic_watchdog __P((struct ifnet *));
86 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
87
88 void epic_shutdown __P((void *));
89
90 void epic_reset __P((struct epic_softc *));
91 void epic_init __P((struct epic_softc *));
92 void epic_stop __P((struct epic_softc *));
93 int epic_add_rxbuf __P((struct epic_softc *, int));
94 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
95 void epic_set_mchash __P((struct epic_softc *));
96
97 /*
98 * Fudge the incoming packets by this much, to ensure the data after
99 * the Ethernet header is aligned.
100 */
101 #define RX_ALIGNMENT_FUDGE 2
102
103 /* XXX Should be somewhere else. */
104 #define ETHER_MIN_LEN 60
105
106 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
107 INTSTAT_TXC | INTSTAT_RQE | INTSTAT_RCC)
108
109 /*
110 * Attach an EPIC interface to the system.
111 */
112 void
113 epic_attach(sc)
114 struct epic_softc *sc;
115 {
116 bus_space_tag_t st = sc->sc_st;
117 bus_space_handle_t sh = sc->sc_sh;
118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
119 int i, rseg, error, attach_stage;
120 bus_dma_segment_t seg;
121 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
122 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
123
124 attach_stage = 0;
125
126 /*
127 * Allocate the control data structures, and create and load the
128 * DMA map for it.
129 */
130 if ((error = bus_dmamem_alloc(sc->sc_dmat,
131 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
132 BUS_DMA_NOWAIT)) != 0) {
133 printf("%s: unable to allocate control data, error = %d\n",
134 sc->sc_dev.dv_xname, error);
135 goto fail;
136 }
137
138 attach_stage = 1;
139
140 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
141 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
142 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
143 printf("%s: unable to map control data, error = %d\n",
144 sc->sc_dev.dv_xname, error);
145 goto fail;
146 }
147
148 attach_stage = 2;
149
150 if ((error = bus_dmamap_create(sc->sc_dmat,
151 sizeof(struct epic_control_data), 1,
152 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
153 &sc->sc_cddmamap)) != 0) {
154 printf("%s: unable to create control data DMA map, "
155 "error = %d\n", sc->sc_dev.dv_xname, error);
156 goto fail;
157 }
158
159 attach_stage = 3;
160
161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
162 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
163 BUS_DMA_NOWAIT)) != 0) {
164 printf("%s: unable to load control data DMA map, error = %d\n",
165 sc->sc_dev.dv_xname, error);
166 goto fail;
167 }
168
169 attach_stage = 4;
170
171 /*
172 * Create the transmit buffer DMA maps.
173 */
174 for (i = 0; i < EPIC_NTXDESC; i++) {
175 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
176 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
177 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
178 printf("%s: unable to create tx DMA map %d, "
179 "error = %d\n", sc->sc_dev.dv_xname, i, error);
180 goto fail;
181 }
182 }
183
184 attach_stage = 5;
185
186 /*
187 * Create the recieve buffer DMA maps.
188 */
189 for (i = 0; i < EPIC_NRXDESC; i++) {
190 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
191 MCLBYTES, 0, BUS_DMA_NOWAIT,
192 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
193 printf("%s: unable to create rx DMA map %d, "
194 "error = %d\n", sc->sc_dev.dv_xname, i, error);
195 goto fail;
196 }
197 }
198
199 attach_stage = 6;
200
201 /*
202 * Pre-allocate the receive buffers.
203 */
204 for (i = 0; i < EPIC_NRXDESC; i++) {
205 if ((error = epic_add_rxbuf(sc, i)) != 0) {
206 printf("%s: unable to allocate or map rx buffer %d\n,"
207 " error = %d\n", sc->sc_dev.dv_xname, i, error);
208 goto fail;
209 }
210 }
211
212 attach_stage = 7;
213
214 /*
215 * Bring the chip out of low-power mode and reset it to a known state.
216 */
217 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
218 epic_reset(sc);
219
220 /*
221 * Read the Ethernet address from the EEPROM.
222 */
223 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
224 bcopy(myea, enaddr, sizeof(myea));
225
226 /*
227 * ...and the device name.
228 */
229 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
230 mydevname);
231 bcopy(mydevname, devname, sizeof(mydevname));
232 devname[sizeof(mydevname)] = '\0';
233 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
234 if (devname[i] == ' ')
235 devname[i] = '\0';
236 else
237 break;
238 }
239
240 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
241 devname, ether_sprintf(enaddr));
242
243 ifp = &sc->sc_ethercom.ec_if;
244 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
245 ifp->if_softc = sc;
246 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
247 ifp->if_ioctl = epic_ioctl;
248 ifp->if_start = epic_start;
249 ifp->if_watchdog = epic_watchdog;
250
251 /*
252 * Attach the interface.
253 */
254 if_attach(ifp);
255 ether_ifattach(ifp, enaddr);
256 #if NBPFILTER > 0
257 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
258 sizeof(struct ether_header));
259 #endif
260
261 /*
262 * Make sure the interface is shutdown during reboot.
263 */
264 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
265 if (sc->sc_sdhook == NULL)
266 printf("%s: WARNING: unable to establish shutdown hook\n",
267 sc->sc_dev.dv_xname);
268 return;
269
270 fail:
271 /*
272 * Free any resources we've allocated during the failed attach
273 * attempt. Do this in reverse order and fall through.
274 */
275 switch (attach_stage) {
276 case 7:
277 for (i = 0; i < EPIC_NRXDESC; i++) {
278 if (sc->sc_rxsoft[i].ds_mbuf != NULL) {
279 bus_dmamap_unload(sc->sc_dmat,
280 sc->sc_rxsoft[i].ds_dmamap);
281 m_freem(sc->sc_rxsoft[i].ds_mbuf);
282 }
283 }
284 /* FALLTHROUGH */
285
286 case 6:
287 for (i = 0; i < EPIC_NRXDESC; i++)
288 bus_dmamap_destroy(sc->sc_dmat,
289 sc->sc_rxsoft[i].ds_dmamap);
290 /* FALLTHROUGH */
291
292 case 5:
293 for (i = 0; i < EPIC_NTXDESC; i++)
294 bus_dmamap_destroy(sc->sc_dmat,
295 sc->sc_txsoft[i].ds_dmamap);
296 /* FALLTHROUGH */
297
298 case 4:
299 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
300 /* FALLTHROUGH */
301
302 case 3:
303 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
304 /* FALLTHROUGH */
305
306 case 2:
307 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
308 sizeof(struct epic_control_data));
309 /* FALLTHROUGH */
310
311 case 1:
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 break;
314 }
315 }
316
317 /*
318 * Shutdown hook. Make sure the interface is stopped at reboot.
319 */
320 void
321 epic_shutdown(arg)
322 void *arg;
323 {
324 struct epic_softc *sc = arg;
325
326 epic_stop(sc);
327 }
328
329 /*
330 * Start packet transmission on the interface.
331 * [ifnet interface function]
332 */
333 void
334 epic_start(ifp)
335 struct ifnet *ifp;
336 {
337 struct epic_softc *sc = ifp->if_softc;
338 struct epic_txdesc *txd;
339 struct epic_descsoft *ds;
340 struct epic_fraglist *fr;
341 bus_dmamap_t dmamap;
342 struct mbuf *m0;
343 int nexttx, seg, error, txqueued;
344
345 txqueued = 0;
346
347 /*
348 * Loop through the send queue, setting up transmit descriptors
349 * until we drain the queue, or use up all available transmit
350 * descriptors.
351 */
352 while (ifp->if_snd.ifq_head != NULL &&
353 sc->sc_txpending < EPIC_NTXDESC) {
354 /*
355 * Grab a packet off the queue.
356 */
357 IF_DEQUEUE(&ifp->if_snd, m0);
358
359 /*
360 * Get the last and next available transmit descriptor.
361 */
362 nexttx = EPIC_NEXTTX(sc->sc_txlast);
363 txd = &sc->sc_control_data->ecd_txdescs[nexttx];
364 fr = &sc->sc_control_data->ecd_txfrags[nexttx];
365 ds = &sc->sc_txsoft[nexttx];
366 dmamap = ds->ds_dmamap;
367
368 loadmap:
369 /*
370 * Load the DMA map with the packet.
371 */
372 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
373 BUS_DMA_NOWAIT);
374 switch (error) {
375 case 0:
376 /* Success. */
377 break;
378
379 case EFBIG:
380 {
381 struct mbuf *mn;
382
383 /*
384 * We ran out of segments. We have to recopy this
385 * mbuf chain first. Bail out if we can't get the
386 * new buffers.
387 */
388 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
389
390 MGETHDR(mn, M_DONTWAIT, MT_DATA);
391 if (mn == NULL) {
392 m_freem(m0);
393 printf("aborting\n");
394 goto out;
395 }
396 if (m0->m_pkthdr.len > MHLEN) {
397 MCLGET(mn, M_DONTWAIT);
398 if ((mn->m_flags & M_EXT) == 0) {
399 m_freem(mn);
400 m_freem(m0);
401 printf("aborting\n");
402 goto out;
403 }
404 }
405 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mn, caddr_t));
406 mn->m_pkthdr.len = mn->m_len = m0->m_pkthdr.len;
407 m_freem(m0);
408 m0 = mn;
409 printf("retrying\n");
410 goto loadmap;
411 }
412
413 default:
414 /*
415 * Some other problem; report it.
416 */
417 printf("%s: can't load mbuf chain, error = %d\n",
418 sc->sc_dev.dv_xname, error);
419 m_freem(m0);
420 goto out;
421 }
422
423 /*
424 * Initialize the fraglist.
425 */
426 fr->ef_nfrags = dmamap->dm_nsegs;
427 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
428 fr->ef_frags[seg].ef_addr =
429 dmamap->dm_segs[seg].ds_addr;
430 fr->ef_frags[seg].ef_length =
431 dmamap->dm_segs[seg].ds_len;
432 }
433
434 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
435 BUS_DMASYNC_PREWRITE);
436
437 /*
438 * Store a pointer to the packet so we can free it later.
439 */
440 ds->ds_mbuf = m0;
441
442 /*
443 * Finish setting up the new transmit descriptor: set the
444 * packet length and give it to the EPIC.
445 */
446 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
447 txd->et_txstatus = ET_TXSTAT_OWNER;
448
449 /*
450 * Committed; advance the lasttx pointer. If nothing was
451 * previously queued, reset the dirty pointer.
452 */
453 sc->sc_txlast = nexttx;
454 if (sc->sc_txpending == 0)
455 sc->sc_txdirty = nexttx;
456
457 sc->sc_txpending++;
458
459 txqueued = 1;
460
461 #if NBPFILTER > 0
462 /*
463 * Pass the packet to any BPF listeners.
464 */
465 if (ifp->if_bpf)
466 bpf_mtap(ifp->if_bpf, m0);
467 #endif
468 }
469
470 out:
471 /*
472 * We're finished. If we added more packets, make sure the
473 * transmit DMA engine is running.
474 */
475 if (txqueued) {
476 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
477 COMMAND_TXQUEUED);
478
479 /*
480 * Set a 5 second watchdog timer.
481 */
482 ifp->if_timer = 5;
483 }
484 }
485
486 /*
487 * Watchdog timer handler.
488 * [ifnet interface function]
489 */
490 void
491 epic_watchdog(ifp)
492 struct ifnet *ifp;
493 {
494 struct epic_softc *sc = ifp->if_softc;
495
496 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
497 ifp->if_oerrors++;
498
499 epic_init(sc);
500 }
501
502 /*
503 * Handle control requests from the operator.
504 * [ifnet interface function]
505 */
506 int
507 epic_ioctl(ifp, cmd, data)
508 struct ifnet *ifp;
509 u_long cmd;
510 caddr_t data;
511 {
512 struct epic_softc *sc = ifp->if_softc;
513 struct ifreq *ifr = (struct ifreq *)data;
514 struct ifaddr *ifa = (struct ifaddr *)data;
515 int s, error = 0;
516
517 s = splimp();
518
519 switch (cmd) {
520 case SIOCSIFADDR:
521 ifp->if_flags |= IFF_UP;
522
523 switch (ifa->ifa_addr->sa_family) {
524 #ifdef INET
525 case AF_INET:
526 epic_init(sc);
527 arp_ifinit(ifp, ifa);
528 break;
529 #endif /* INET */
530 #ifdef NS
531 case AF_NS:
532 {
533 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
534
535 if (ns_nullhost(*ina))
536 ina->x_host = *(union ns_host *)
537 LLADDR(ifp->if_sadl);
538 else
539 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
540 ifp->if_addrlen);
541 /* Set new address. */
542 epic_init(sc);
543 break;
544 }
545 #endif /* NS */
546 default:
547 epic_init(sc);
548 break;
549 }
550 break;
551
552 case SIOCSIFMTU:
553 if (ifr->ifr_mtu > ETHERMTU)
554 error = EINVAL;
555 else
556 ifp->if_mtu = ifr->ifr_mtu;
557 break;
558
559 case SIOCSIFFLAGS:
560 if ((ifp->if_flags & IFF_UP) == 0 &&
561 (ifp->if_flags & IFF_RUNNING) != 0) {
562 /*
563 * If interface is marked down and it is running, then
564 * stop it.
565 */
566 epic_stop(sc);
567 ifp->if_flags &= ~IFF_RUNNING;
568 } else if ((ifp->if_flags & IFF_UP) != 0 &&
569 (ifp->if_flags & IFF_RUNNING) == 0) {
570 /*
571 * If interfase it marked up and it is stopped, then
572 * start it.
573 */
574 epic_init(sc);
575 } else {
576 /*
577 * Reset the interface to pick up changes in any other
578 * flags that affect the hardware state.
579 */
580 epic_init(sc);
581 }
582 break;
583
584 case SIOCADDMULTI:
585 case SIOCDELMULTI:
586 error = (cmd == SIOCADDMULTI) ?
587 ether_addmulti(ifr, &sc->sc_ethercom) :
588 ether_delmulti(ifr, &sc->sc_ethercom);
589
590 if (error == ENETRESET) {
591 /*
592 * Multicast list has changed; set the hardware filter
593 * accordingly.
594 */
595 epic_init(sc);
596 error = 0;
597 }
598 break;
599
600 default:
601 error = EINVAL;
602 break;
603 }
604
605 splx(s);
606 return (error);
607 }
608
609 /*
610 * Interrupt handler.
611 */
612 int
613 epic_intr(arg)
614 void *arg;
615 {
616 struct epic_softc *sc = arg;
617 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
618 struct ether_header *eh;
619 struct epic_rxdesc *rxd;
620 struct epic_txdesc *txd;
621 struct epic_descsoft *ds;
622 struct mbuf *m;
623 u_int32_t intstat;
624 int i, len, claimed = 0, error;
625
626 top:
627 /*
628 * Get the interrupt status from the EPIC.
629 */
630 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
631 if ((intstat & INTSTAT_INT_ACTV) == 0)
632 return (claimed);
633
634 claimed = 1;
635
636 /*
637 * Acknowledge the interrupt.
638 */
639 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
640 intstat & INTMASK);
641
642 /*
643 * Check for receive interrupts.
644 */
645 if (intstat & (INTSTAT_RCC | INTSTAT_RQE)) {
646 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
647 rxd = &sc->sc_control_data->ecd_rxdescs[i];
648 ds = &sc->sc_rxsoft[i];
649 m = ds->ds_mbuf;
650 error = 0;
651
652 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
653 /*
654 * We have processed all of the
655 * receive buffers.
656 */
657 break;
658 }
659
660 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
661 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
662
663 /*
664 * Make sure the packet arrived intact.
665 */
666 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
667 #if 1
668 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
669 printf("%s: CRC error\n",
670 sc->sc_dev.dv_xname);
671 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
672 printf("%s: alignment error\n",
673 sc->sc_dev.dv_xname);
674 #endif
675 ifp->if_ierrors++;
676 error = 1;
677 }
678
679 /*
680 * Add a new buffer to the receive chain. If this
681 * fails, the old buffer is recycled.
682 */
683 if (epic_add_rxbuf(sc, i) == 0) {
684 /*
685 * We wanted to reset the buffer, but
686 * didn't want to pass it on up.
687 */
688 if (error) {
689 m_freem(m);
690 continue;
691 }
692
693 len = rxd->er_buflength;
694 if (len < sizeof(struct ether_header)) {
695 m_freem(m);
696 continue;
697 }
698
699 m->m_pkthdr.rcvif = ifp;
700 m->m_pkthdr.len = m->m_len = len;
701 eh = mtod(m, struct ether_header *);
702 #if NBPFILTER > 0
703 /*
704 * Pass this up to any BPF listeners.
705 */
706 if (ifp->if_bpf) {
707 bpf_mtap(ifp->if_bpf, m);
708
709 /*
710 * Only pass this up the stack
711 * if it's for us.
712 */
713 if ((ifp->if_flags & IFF_PROMISC) &&
714 bcmp(LLADDR(ifp->if_sadl),
715 eh->ether_dhost,
716 ETHER_ADDR_LEN) != 0 &&
717 (rxd->er_rxstatus &
718 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST))
719 == 0) {
720 m_freem(m);
721 continue;
722 }
723 }
724 #endif /* NPBFILTER > 0 */
725 m->m_data += sizeof(struct ether_header);
726 m->m_len -= sizeof(struct ether_header);
727 m->m_pkthdr.len = m->m_len;
728 ether_input(ifp, eh, m);
729 }
730 }
731
732 /*
733 * Update the recieve pointer.
734 */
735 sc->sc_rxptr = i;
736
737 /*
738 * Check for receive queue underflow.
739 */
740 if (intstat & INTSTAT_RQE) {
741 printf("%s: receiver queue empty\n",
742 sc->sc_dev.dv_xname);
743 /*
744 * Ring is already built; just restart the
745 * receiver.
746 */
747 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
748 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
749 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
750 COMMAND_RXQUEUED | COMMAND_START_RX);
751 }
752 }
753
754 /*
755 * Check for transmission complete interrupts.
756 */
757 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
758 for (i = sc->sc_txdirty;; i = EPIC_NEXTTX(i)) {
759 txd = &sc->sc_control_data->ecd_txdescs[i];
760 ds = &sc->sc_txsoft[i];
761
762 if (sc->sc_txpending == 0 ||
763 (txd->et_txstatus & ET_TXSTAT_OWNER) != 0)
764 break;
765
766 if (ds->ds_mbuf != NULL) {
767 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
768 0, ds->ds_dmamap->dm_mapsize,
769 BUS_DMASYNC_POSTWRITE);
770 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
771 m_freem(ds->ds_mbuf);
772 ds->ds_mbuf = NULL;
773 }
774 sc->sc_txpending--;
775
776 /*
777 * Check for errors and collisions.
778 */
779 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
780 ifp->if_oerrors++;
781 ifp->if_collisions +=
782 TXSTAT_COLLISIONS(txd->et_txstatus);
783 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST) {
784 #if 1
785 printf("%s: lost carrier\n",
786 sc->sc_dev.dv_xname);
787 #endif
788 /* XXX clear "active" but in media data */
789 }
790 }
791
792 /*
793 * Update the dirty transmit buffer pointer.
794 */
795 sc->sc_txdirty = i;
796
797 /*
798 * Cancel the watchdog timer if there are no pending
799 * transmissions.
800 */
801 if (sc->sc_txpending == 0)
802 ifp->if_timer = 0;
803
804 /*
805 * Kick the transmitter after a DMA underrun.
806 */
807 if (intstat & INTSTAT_TXU) {
808 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
809 bus_space_write_4(sc->sc_st, sc->sc_sh,
810 EPIC_COMMAND, COMMAND_TXUGO);
811 if (sc->sc_txpending)
812 bus_space_write_4(sc->sc_st, sc->sc_sh,
813 EPIC_COMMAND, COMMAND_TXQUEUED);
814 }
815
816 /*
817 * Try to get more packets going.
818 */
819 epic_start(ifp);
820 }
821
822 /*
823 * Check for fatal interrupts.
824 */
825 if (intstat & INTSTAT_FATAL_INT) {
826 printf("%s: fatal error, resetting\n", sc->sc_dev.dv_xname);
827 epic_init(sc);
828 }
829
830 /*
831 * Check for more interrupts.
832 */
833 goto top;
834 }
835
836 /*
837 * Perform a soft reset on the EPIC.
838 */
839 void
840 epic_reset(sc)
841 struct epic_softc *sc;
842 {
843
844 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
845 delay(100);
846 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
847 delay(100);
848 }
849
850 /*
851 * Initialize the interface. Must be called at splimp().
852 */
853 void
854 epic_init(sc)
855 struct epic_softc *sc;
856 {
857 bus_space_tag_t st = sc->sc_st;
858 bus_space_handle_t sh = sc->sc_sh;
859 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
860 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
861 struct epic_txdesc *txd;
862 struct epic_rxdesc *rxd;
863 u_int32_t genctl, reg0;
864 int i;
865
866 /*
867 * Cancel any pending I/O.
868 */
869 epic_stop(sc);
870
871 /*
872 * Reset the EPIC to a known state.
873 */
874 epic_reset(sc);
875
876 /*
877 * According to SMC Application Note 7-15, the EPIC's clock
878 * source is incorrect following a reset. This manifests itself
879 * as failure to recognize when host software has written to
880 * a register on the EPIC. The appnote recommends issuing at
881 * least 16 consecutive writes to the CLOCK TEST bit to correctly
882 * configure the clock source.
883 */
884 for (i = 0; i < 16; i++)
885 bus_space_write_4(st, sh, EPIC_TEST, TEST_CLOCKTEST);
886
887 /*
888 * Magical mystery initialization.
889 */
890 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
891
892 /*
893 * Initialize the EPIC genctl register:
894 *
895 * - 64 byte receive FIFO threshold
896 * - automatic advance to next receive frame
897 */
898 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
899 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
900
901 /*
902 * Reset the MII bus and PHY.
903 */
904 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
905 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
906 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
907 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
908 delay(100);
909 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
910 delay(100);
911 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
912
913 /*
914 * Initialize Ethernet address.
915 */
916 reg0 = enaddr[1] << 8 | enaddr[0];
917 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
918 reg0 = enaddr[3] << 8 | enaddr[2];
919 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
920 reg0 = enaddr[5] << 8 | enaddr[4];
921 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
922
923 /*
924 * Set up the multicast hash table.
925 */
926 epic_set_mchash(sc);
927
928 /*
929 * Initialize receive control. Remember the external buffer
930 * size setting.
931 */
932 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
933 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
934 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
935 if (ifp->if_flags & IFF_PROMISC)
936 reg0 |= RXCON_PROMISCMODE;
937 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
938
939 /*
940 * XXX Media (full-duplex in TXCON).
941 */
942
943 /*
944 * Initialize the transmit descriptors.
945 */
946 txd = sc->sc_control_data->ecd_txdescs;
947 bzero(txd, sizeof(sc->sc_control_data->ecd_txdescs));
948 for (i = 0; i < EPIC_NTXDESC; i++) {
949 txd[i].et_control = ET_TXCTL_LASTDESC | ET_TXCTL_IAF |
950 ET_TXCTL_FRAGLIST;
951 txd[i].et_bufaddr = sc->sc_cddma + EPIC_CDOFF(ecd_txfrags[i]);
952 txd[i].et_nextdesc = sc->sc_cddma +
953 EPIC_CDOFF(ecd_txdescs[(i + 1) & EPIC_NTXDESC_MASK]);
954 }
955
956 /*
957 * Initialize the receive descriptors. Note the buffers
958 * and control word have already been initialized; we only
959 * need to initialize the ring.
960 */
961 rxd = sc->sc_control_data->ecd_rxdescs;
962 for (i = 0; i < EPIC_NRXDESC; i++) {
963 rxd[i].er_nextdesc = sc->sc_cddma +
964 EPIC_CDOFF(ecd_rxdescs[(i + 1) & EPIC_NRXDESC_MASK]);
965 }
966
967 /*
968 * Initialize the interrupt mask and enable interrupts.
969 */
970 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
971 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
972
973 /*
974 * Give the transmit and receive rings to the EPIC.
975 */
976 bus_space_write_4(st, sh, EPIC_PTCDAR,
977 sc->sc_cddma + EPIC_CDOFF(ecd_txdescs[0]));
978 bus_space_write_4(st, sh, EPIC_PRCDAR,
979 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
980
981 /*
982 * Initialize our ring pointers. txlast it initialized to
983 * the end of the list so that it will wrap around to the
984 * first descriptor when the first packet is transmitted.
985 */
986 sc->sc_txpending = 0;
987 sc->sc_txdirty = 0;
988 sc->sc_txlast = EPIC_NTXDESC - 1;
989
990 sc->sc_rxptr = 0;
991
992 /*
993 * Set the EPIC in motion.
994 */
995 bus_space_write_4(st, sh, EPIC_COMMAND,
996 COMMAND_RXQUEUED | COMMAND_START_RX);
997
998 /*
999 * ...all done!
1000 */
1001 ifp->if_flags |= IFF_RUNNING;
1002 ifp->if_flags &= ~IFF_OACTIVE;
1003 }
1004
1005 /*
1006 * Stop transmission on the interface.
1007 */
1008 void
1009 epic_stop(sc)
1010 struct epic_softc *sc;
1011 {
1012 bus_space_tag_t st = sc->sc_st;
1013 bus_space_handle_t sh = sc->sc_sh;
1014 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1015 struct epic_descsoft *ds;
1016 u_int32_t reg;
1017 int i;
1018
1019 /*
1020 * Disable interrupts.
1021 */
1022 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1023 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1024 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1025
1026 /*
1027 * Stop the DMA engine and take the receiver off-line.
1028 */
1029 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1030 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1031
1032 /*
1033 * Release any queued transmit buffers.
1034 */
1035 for (i = 0; i < EPIC_NTXDESC; i++) {
1036 ds = &sc->sc_txsoft[i];
1037 if (ds->ds_mbuf != NULL) {
1038 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1039 m_freem(ds->ds_mbuf);
1040 ds->ds_mbuf = NULL;
1041 }
1042 }
1043 sc->sc_txpending = 0;
1044
1045 /*
1046 * Release the receive buffers, then reallocate/reinitialize.
1047 */
1048 for (i = 0; i < EPIC_NRXDESC; i++) {
1049 ds = &sc->sc_rxsoft[i];
1050 if (ds->ds_mbuf != NULL) {
1051 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1052 m_freem(ds->ds_mbuf);
1053 ds->ds_mbuf = NULL;
1054 }
1055 if (epic_add_rxbuf(sc, i) != 0) {
1056 /*
1057 * This "can't happen" - we're at splimp()
1058 * and we just freed the buffer we need
1059 * above.
1060 */
1061 panic("epic_stop: no buffers!");
1062 }
1063 }
1064
1065 /*
1066 * Mark the interface down and cancel the watchdog timer.
1067 */
1068 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1069 ifp->if_timer = 0;
1070 }
1071
1072 /*
1073 * Read the EPIC Serial EEPROM.
1074 */
1075 void
1076 epic_read_eeprom(sc, word, wordcnt, data)
1077 struct epic_softc *sc;
1078 int word, wordcnt;
1079 u_int16_t *data;
1080 {
1081 bus_space_tag_t st = sc->sc_st;
1082 bus_space_handle_t sh = sc->sc_sh;
1083 u_int16_t reg;
1084 int i, x;
1085
1086 #define EEPROM_WAIT_READY(st, sh) \
1087 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1088 /* nothing */
1089
1090 /*
1091 * Enable the EEPROM.
1092 */
1093 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1094 EEPROM_WAIT_READY(st, sh);
1095
1096 for (i = 0; i < wordcnt; i++) {
1097 /* Send CHIP SELECT for one clock tick. */
1098 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1099 EEPROM_WAIT_READY(st, sh);
1100
1101 /* Shift in the READ opcode. */
1102 for (x = 3; x > 0; x--) {
1103 reg = EECTL_ENABLE|EECTL_EECS;
1104 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1105 reg |= EECTL_EEDI;
1106 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1107 EEPROM_WAIT_READY(st, sh);
1108 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1109 EEPROM_WAIT_READY(st, sh);
1110 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1111 EEPROM_WAIT_READY(st, sh);
1112 }
1113
1114 /* Shift in address. */
1115 for (x = 6; x > 0; x--) {
1116 reg = EECTL_ENABLE|EECTL_EECS;
1117 if ((word + i) & (1 << (x - 1)))
1118 reg |= EECTL_EEDI;
1119 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1120 EEPROM_WAIT_READY(st, sh);
1121 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1122 EEPROM_WAIT_READY(st, sh);
1123 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1124 EEPROM_WAIT_READY(st, sh);
1125 }
1126
1127 /* Shift out data. */
1128 reg = EECTL_ENABLE|EECTL_EECS;
1129 data[i] = 0;
1130 for (x = 16; x > 0; x--) {
1131 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1132 EEPROM_WAIT_READY(st, sh);
1133 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1134 data[i] |= (1 << (x - 1));
1135 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1136 EEPROM_WAIT_READY(st, sh);
1137 }
1138
1139 /* Clear CHIP SELECT. */
1140 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1141 EEPROM_WAIT_READY(st, sh);
1142 }
1143
1144 /*
1145 * Disable the EEPROM.
1146 */
1147 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1148
1149 #undef EEPROM_WAIT_READY
1150 }
1151
1152 /*
1153 * Add a receive buffer to the indicated descriptor.
1154 */
1155 int
1156 epic_add_rxbuf(sc, idx)
1157 struct epic_softc *sc;
1158 int idx;
1159 {
1160 struct epic_rxdesc *rxd = &sc->sc_control_data->ecd_rxdescs[idx];
1161 struct epic_descsoft *ds = &sc->sc_rxsoft[idx];
1162 struct mbuf *m, *oldm;
1163 int error = 0;
1164
1165 oldm = ds->ds_mbuf;
1166
1167 MGETHDR(m, M_DONTWAIT, MT_DATA);
1168 if (m != NULL) {
1169 MCLGET(m, M_DONTWAIT);
1170 if ((m->m_flags & M_EXT) == 0) {
1171 error = ENOMEM;
1172 m_freem(m);
1173 if (oldm == NULL)
1174 return (error);
1175 m = oldm;
1176 m->m_data = m->m_ext.ext_buf;
1177 }
1178 } else {
1179 error = ENOMEM;
1180 if (oldm == NULL)
1181 return (error);
1182 m = oldm;
1183 m->m_data = m->m_ext.ext_buf;
1184 }
1185
1186 ds->ds_mbuf = m;
1187
1188 /*
1189 * Set up the DMA map for this receive buffer.
1190 */
1191 if (m != oldm) {
1192 if (oldm != NULL)
1193 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1194 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1195 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1196 if (error) {
1197 printf("%s: can't load rx buffer, error = %d\n",
1198 sc->sc_dev.dv_xname, error);
1199 panic("epic_add_rxbuf"); /* XXX */
1200 }
1201 }
1202
1203 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1204 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1205
1206 /*
1207 * Move the data pointer up so that the incoming packet
1208 * will be 32-bit aligned.
1209 */
1210 m->m_data += RX_ALIGNMENT_FUDGE;
1211
1212 /*
1213 * Initialize the receive descriptor.
1214 */
1215 rxd->er_bufaddr = ds->ds_dmamap->dm_segs[0].ds_addr +
1216 RX_ALIGNMENT_FUDGE;
1217 rxd->er_buflength = m->m_ext.ext_size - RX_ALIGNMENT_FUDGE;
1218 rxd->er_control = 0;
1219 rxd->er_rxstatus = ER_RXSTAT_OWNER;
1220
1221 return (error);
1222 }
1223
1224 /*
1225 * Set the EPIC multicast hash table.
1226 */
1227 void
1228 epic_set_mchash(sc)
1229 struct epic_softc *sc;
1230 {
1231 struct ethercom *ec = &sc->sc_ethercom;
1232 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1233 struct ether_multi *enm;
1234 struct ether_multistep step;
1235 u_int8_t *cp;
1236 u_int32_t crc, mchash[4];
1237 int len;
1238 static const u_int32_t crctab[] = {
1239 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1240 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1241 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1242 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1243 };
1244
1245 /*
1246 * Set up the multicast address filter by passing all multicast
1247 * addresses through a CRC generator, and then using the high-order
1248 * 6 bits as an index into the 64 bit multicast hash table (only
1249 * the lower 16 bits of each 32 bit multicast hash register are
1250 * valid). The high order bit selects the register, while the
1251 * rest of the bits select the bit within the register.
1252 */
1253
1254 if (ifp->if_flags & IFF_PROMISC)
1255 goto allmulti;
1256
1257 #if 1 /* XXX thorpej - hardware bug in 10Mb mode */
1258 goto allmulti;
1259 #endif
1260
1261 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1262
1263 ETHER_FIRST_MULTI(step, ec, enm);
1264 while (enm != NULL) {
1265 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1266 /*
1267 * We must listen to a range of multicast addresses.
1268 * For now, just accept all multicasts, rather than
1269 * trying to set only those filter bits needed to match
1270 * the range. (At this time, the only use of address
1271 * ranges is for IP multicast routing, for which the
1272 * range is big enough to require all bits set.)
1273 */
1274 goto allmulti;
1275 }
1276
1277 cp = enm->enm_addrlo;
1278 crc = 0xffffffff;
1279 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1280 crc ^= *cp++;
1281 crc = (crc >> 4) ^ crctab[crc & 0xf];
1282 crc = (crc >> 4) ^ crctab[crc & 0xf];
1283 }
1284 /* Just want the 6 most significant bits. */
1285 crc >>= 26;
1286
1287 /* Set the corresponding bit in the hash table. */
1288 mchash[crc >> 4] |= 1 << (crc & 0xf);
1289
1290 ETHER_NEXT_MULTI(step, enm);
1291 }
1292
1293 ifp->if_flags &= ~IFF_ALLMULTI;
1294 goto sethash;
1295
1296 allmulti:
1297 ifp->if_flags |= IFF_ALLMULTI;
1298 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1299
1300 sethash:
1301 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1302 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1303 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1304 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1305 }
1306