smc83c170.c revision 1.3 1 /* $NetBSD: smc83c170.c,v 1.3 1998/07/05 06:49:12 jonathan Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/mbuf.h>
52 #include <sys/malloc.h>
53 #include <sys/kernel.h>
54 #include <sys/socket.h>
55 #include <sys/ioctl.h>
56 #include <sys/errno.h>
57 #include <sys/device.h>
58
59 #include <net/if.h>
60 #include <net/if_dl.h>
61 #include <net/if_media.h>
62 #include <net/if_ether.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #endif
67
68 #ifdef INET
69 #include <netinet/in.h>
70 #include <netinet/if_inarp.h>
71 #endif
72
73 #ifdef NS
74 #include <netns/ns.h>
75 #include <netns/ns_if.h>
76 #endif
77
78 #include <machine/bus.h>
79 #include <machine/intr.h>
80
81 #include <dev/ic/smc83c170reg.h>
82 #include <dev/ic/smc83c170var.h>
83
84 void epic_start __P((struct ifnet *));
85 void epic_watchdog __P((struct ifnet *));
86 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
87
88 void epic_shutdown __P((void *));
89
90 void epic_reset __P((struct epic_softc *));
91 void epic_init __P((struct epic_softc *));
92 void epic_stop __P((struct epic_softc *));
93 int epic_add_rxbuf __P((struct epic_softc *, int));
94 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
95 void epic_set_mchash __P((struct epic_softc *));
96
97 /*
98 * Fudge the incoming packets by this much, to ensure the data after
99 * the Ethernet header is aligned.
100 */
101 #define RX_ALIGNMENT_FUDGE 2
102
103 /* XXX Should be somewhere else. */
104 #define ETHER_MIN_LEN 60
105
106 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
107 INTSTAT_TXC | INTSTAT_RQE | INTSTAT_RCC)
108
109 /*
110 * Attach an EPIC interface to the system.
111 */
112 void
113 epic_attach(sc)
114 struct epic_softc *sc;
115 {
116 bus_space_tag_t st = sc->sc_st;
117 bus_space_handle_t sh = sc->sc_sh;
118 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
119 int i, rseg, error, attach_stage;
120 bus_dma_segment_t seg;
121 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
122 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
123
124 attach_stage = 0;
125
126 /*
127 * Allocate the control data structures, and create and load the
128 * DMA map for it.
129 */
130 if ((error = bus_dmamem_alloc(sc->sc_dmat,
131 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
132 BUS_DMA_NOWAIT)) != 0) {
133 printf("%s: unable to allocate control data, error = %d\n",
134 sc->sc_dev.dv_xname, error);
135 goto fail;
136 }
137
138 attach_stage = 1;
139
140 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
141 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
142 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
143 printf("%s: unable to map control data, error = %d\n",
144 sc->sc_dev.dv_xname, error);
145 goto fail;
146 }
147
148 attach_stage = 2;
149
150 if ((error = bus_dmamap_create(sc->sc_dmat,
151 sizeof(struct epic_control_data), 1,
152 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
153 &sc->sc_cddmamap)) != 0) {
154 printf("%s: unable to create control data DMA map, "
155 "error = %d\n", sc->sc_dev.dv_xname, error);
156 goto fail;
157 }
158
159 attach_stage = 3;
160
161 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
162 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
163 BUS_DMA_NOWAIT)) != 0) {
164 printf("%s: unable to load control data DMA map, error = %d\n",
165 sc->sc_dev.dv_xname, error);
166 goto fail;
167 }
168
169 attach_stage = 4;
170
171 /*
172 * Create the transmit buffer DMA maps.
173 */
174 for (i = 0; i < EPIC_NTXDESC; i++) {
175 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
176 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
177 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
178 printf("%s: unable to create tx DMA map %d, "
179 "error = %d\n", sc->sc_dev.dv_xname, i, error);
180 goto fail;
181 }
182 }
183
184 attach_stage = 5;
185
186 /*
187 * Create the recieve buffer DMA maps.
188 */
189 for (i = 0; i < EPIC_NRXDESC; i++) {
190 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
191 MCLBYTES, 0, BUS_DMA_NOWAIT,
192 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
193 printf("%s: unable to create rx DMA map %d, "
194 "error = %d\n", sc->sc_dev.dv_xname, i, error);
195 goto fail;
196 }
197 }
198
199 attach_stage = 6;
200
201 /*
202 * Pre-allocate the receive buffers.
203 */
204 for (i = 0; i < EPIC_NRXDESC; i++) {
205 if ((error = epic_add_rxbuf(sc, i)) != 0) {
206 printf("%s: unable to allocate or map rx buffer %d\n,"
207 " error = %d\n", sc->sc_dev.dv_xname, i, error);
208 goto fail;
209 }
210 }
211
212 attach_stage = 7;
213
214 /*
215 * Bring the chip out of low-power mode and reset it to a known state.
216 */
217 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
218 epic_reset(sc);
219
220 /*
221 * Read the Ethernet address from the EEPROM.
222 */
223 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
224 bcopy(myea, enaddr, sizeof(myea));
225
226 /*
227 * ...and the device name.
228 */
229 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
230 mydevname);
231 bcopy(mydevname, devname, sizeof(mydevname));
232 devname[sizeof(mydevname)] = '\0';
233 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
234 if (devname[i] == ' ')
235 devname[i] = '\0';
236 else
237 break;
238 }
239
240 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
241 devname, ether_sprintf(enaddr));
242
243 ifp = &sc->sc_ethercom.ec_if;
244 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
245 ifp->if_softc = sc;
246 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
247 ifp->if_ioctl = epic_ioctl;
248 ifp->if_start = epic_start;
249 ifp->if_watchdog = epic_watchdog;
250
251 /*
252 * Attach the interface.
253 */
254 if_attach(ifp);
255 ether_ifattach(ifp, enaddr);
256 #if NBPFILTER > 0
257 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
258 sizeof(struct ether_header));
259 #endif
260
261 /*
262 * Make sure the interface is shutdown during reboot.
263 */
264 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
265 if (sc->sc_sdhook == NULL)
266 printf("%s: WARNING: unable to establish shutdown hook\n",
267 sc->sc_dev.dv_xname);
268 return;
269
270 fail:
271 /*
272 * Free any resources we've allocated during the failed attach
273 * attempt. Do this in reverse order and fall through.
274 */
275 switch (attach_stage) {
276 case 7:
277 for (i = 0; i < EPIC_NRXDESC; i++) {
278 if (sc->sc_rxsoft[i].ds_mbuf != NULL) {
279 bus_dmamap_unload(sc->sc_dmat,
280 sc->sc_rxsoft[i].ds_dmamap);
281 m_freem(sc->sc_rxsoft[i].ds_mbuf);
282 }
283 }
284 /* FALLTHROUGH */
285
286 case 6:
287 for (i = 0; i < EPIC_NRXDESC; i++)
288 bus_dmamap_destroy(sc->sc_dmat,
289 sc->sc_rxsoft[i].ds_dmamap);
290 /* FALLTHROUGH */
291
292 case 5:
293 for (i = 0; i < EPIC_NTXDESC; i++)
294 bus_dmamap_destroy(sc->sc_dmat,
295 sc->sc_txsoft[i].ds_dmamap);
296 /* FALLTHROUGH */
297
298 case 4:
299 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
300 /* FALLTHROUGH */
301
302 case 3:
303 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
304 /* FALLTHROUGH */
305
306 case 2:
307 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
308 sizeof(struct epic_control_data));
309 /* FALLTHROUGH */
310
311 case 1:
312 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
313 break;
314 }
315 }
316
317 /*
318 * Shutdown hook. Make sure the interface is stopped at reboot.
319 */
320 void
321 epic_shutdown(arg)
322 void *arg;
323 {
324 struct epic_softc *sc = arg;
325
326 epic_stop(sc);
327 }
328
329 /*
330 * Start packet transmission on the interface.
331 * [ifnet interface function]
332 */
333 void
334 epic_start(ifp)
335 struct ifnet *ifp;
336 {
337 struct epic_softc *sc = ifp->if_softc;
338 struct epic_txdesc *txd;
339 struct epic_descsoft *ds;
340 struct epic_fraglist *fr;
341 bus_dmamap_t dmamap;
342 struct mbuf *m0;
343 int nexttx, seg, error, txqueued;
344
345 txqueued = 0;
346
347 /*
348 * Loop through the send queue, setting up transmit descriptors
349 * until we drain the queue, or use up all available transmit
350 * descriptors.
351 */
352 while (ifp->if_snd.ifq_head != NULL &&
353 sc->sc_txpending < EPIC_NTXDESC) {
354 /*
355 * Grab a packet off the queue.
356 */
357 IF_DEQUEUE(&ifp->if_snd, m0);
358
359 /*
360 * Get the last and next available transmit descriptor.
361 */
362 nexttx = EPIC_NEXTTX(sc->sc_txlast);
363 txd = &sc->sc_control_data->ecd_txdescs[nexttx];
364 fr = &sc->sc_control_data->ecd_txfrags[nexttx];
365 ds = &sc->sc_txsoft[nexttx];
366 dmamap = ds->ds_dmamap;
367
368 loadmap:
369 /*
370 * Load the DMA map with the packet.
371 */
372 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
373 BUS_DMA_NOWAIT);
374 switch (error) {
375 case 0:
376 /* Success. */
377 break;
378
379 case EFBIG:
380 {
381 struct mbuf *mn;
382
383 /*
384 * We ran out of segments. We have to recopy this
385 * mbuf chain first. Bail out if we can't get the
386 * new buffers.
387 */
388 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
389
390 MGETHDR(mn, M_DONTWAIT, MT_DATA);
391 if (mn == NULL) {
392 m_freem(m0);
393 printf("aborting\n");
394 goto out;
395 }
396 if (m0->m_pkthdr.len > MHLEN) {
397 MCLGET(mn, M_DONTWAIT);
398 if ((mn->m_flags & M_EXT) == 0) {
399 m_freem(mn);
400 m_freem(m0);
401 printf("aborting\n");
402 goto out;
403 }
404 }
405 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mn, caddr_t));
406 mn->m_pkthdr.len = mn->m_len = m0->m_pkthdr.len;
407 m_freem(m0);
408 m0 = mn;
409 printf("retrying\n");
410 goto loadmap;
411 }
412
413 default:
414 /*
415 * Some other problem; report it.
416 */
417 printf("%s: can't load mbuf chain, error = %d\n",
418 sc->sc_dev.dv_xname, error);
419 m_freem(m0);
420 goto out;
421 }
422
423 /*
424 * Initialize the fraglist.
425 */
426 fr->ef_nfrags = dmamap->dm_nsegs;
427 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
428 fr->ef_frags[seg].ef_addr =
429 dmamap->dm_segs[seg].ds_addr;
430 fr->ef_frags[seg].ef_length =
431 dmamap->dm_segs[seg].ds_len;
432 }
433
434 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
435 BUS_DMASYNC_PREWRITE);
436
437 /*
438 * Store a pointer to the packet so we can free it later.
439 */
440 ds->ds_mbuf = m0;
441
442 /*
443 * Finish setting up the new transmit descriptor: set the
444 * packet length and give it to the EPIC.
445 */
446 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
447 txd->et_txstatus = ET_TXSTAT_OWNER;
448
449 /*
450 * Committed; advance the lasttx pointer. If nothing was
451 * previously queued, reset the dirty pointer.
452 */
453 sc->sc_txlast = nexttx;
454 if (sc->sc_txpending == 0)
455 sc->sc_txdirty = nexttx;
456
457 sc->sc_txpending++;
458
459 txqueued = 1;
460
461 #if NBPFILTER > 0
462 /*
463 * Pass the packet to any BPF listeners.
464 */
465 if (ifp->if_bpf)
466 bpf_mtap(ifp->if_bpf, m0);
467 #endif
468 }
469
470 out:
471 /*
472 * We're finished. If we added more packets, make sure the
473 * transmit DMA engine is running.
474 */
475 if (txqueued) {
476 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
477 COMMAND_TXQUEUED);
478
479 /*
480 * Set a 5 second watchdog timer.
481 */
482 ifp->if_timer = 5;
483 }
484 }
485
486 /*
487 * Watchdog timer handler.
488 * [ifnet interface function]
489 */
490 void
491 epic_watchdog(ifp)
492 struct ifnet *ifp;
493 {
494 struct epic_softc *sc = ifp->if_softc;
495
496 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
497 ifp->if_oerrors++;
498
499 epic_init(sc);
500 }
501
502 /*
503 * Handle control requests from the operator.
504 * [ifnet interface function]
505 */
506 int
507 epic_ioctl(ifp, cmd, data)
508 struct ifnet *ifp;
509 u_long cmd;
510 caddr_t data;
511 {
512 struct epic_softc *sc = ifp->if_softc;
513 struct ifreq *ifr = (struct ifreq *)data;
514 struct ifaddr *ifa = (struct ifaddr *)data;
515 int s, error = 0;
516
517 s = splimp();
518
519 switch (cmd) {
520 case SIOCSIFADDR:
521 ifp->if_flags |= IFF_UP;
522
523 switch (ifa->ifa_addr->sa_family) {
524 #ifdef INET
525 case AF_INET:
526 epic_init(sc);
527 arp_ifinit(ifp, ifa);
528 break;
529 #endif /* INET */
530 #ifdef NS
531 case AF_NS:
532 {
533 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
534
535 if (ns_nullhost(*ina))
536 ina->x_host = *(union ns_host *)
537 LLADDR(ifp->if_sadl);
538 else
539 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
540 ifp->if_addrlen);
541 /* Set new address. */
542 epic_init(sc);
543 break;
544 }
545 #endif /* NS */
546 default:
547 epic_init(sc);
548 break;
549 }
550 break;
551
552 case SIOCSIFMTU:
553 if (ifr->ifr_mtu > ETHERMTU)
554 error = EINVAL;
555 else
556 ifp->if_mtu = ifr->ifr_mtu;
557 break;
558
559 case SIOCSIFFLAGS:
560 if ((ifp->if_flags & IFF_UP) == 0 &&
561 (ifp->if_flags & IFF_RUNNING) != 0) {
562 /*
563 * If interface is marked down and it is running, then
564 * stop it.
565 */
566 epic_stop(sc);
567 ifp->if_flags &= ~IFF_RUNNING;
568 } else if ((ifp->if_flags & IFF_UP) != 0 &&
569 (ifp->if_flags & IFF_RUNNING) == 0) {
570 /*
571 * If interfase it marked up and it is stopped, then
572 * start it.
573 */
574 epic_init(sc);
575 } else {
576 /*
577 * Reset the interface to pick up changes in any other
578 * flags that affect the hardware state.
579 */
580 epic_init(sc);
581 }
582 break;
583
584 case SIOCADDMULTI:
585 case SIOCDELMULTI:
586 error = (cmd == SIOCADDMULTI) ?
587 ether_addmulti(ifr, &sc->sc_ethercom) :
588 ether_delmulti(ifr, &sc->sc_ethercom);
589
590 if (error == ENETRESET) {
591 /*
592 * Multicast list has changed; set the hardware filter
593 * accordingly.
594 */
595 epic_init(sc);
596 error = 0;
597 }
598 break;
599
600 default:
601 error = EINVAL;
602 break;
603 }
604
605 splx(s);
606 return (error);
607 }
608
609 /*
610 * Interrupt handler.
611 */
612 int
613 epic_intr(arg)
614 void *arg;
615 {
616 struct epic_softc *sc = arg;
617 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
618 struct ether_header *eh;
619 struct epic_rxdesc *rxd;
620 struct epic_txdesc *txd;
621 struct epic_descsoft *ds;
622 struct mbuf *m;
623 u_int32_t intstat;
624 int i, len, claimed = 0, error;
625
626 top:
627 /*
628 * Get the interrupt status from the EPIC.
629 */
630 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
631 if ((intstat & INTSTAT_INT_ACTV) == 0)
632 return (claimed);
633
634 claimed = 1;
635
636 /*
637 * Acknowledge the interrupt.
638 */
639 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
640 intstat & INTMASK);
641
642 /*
643 * Check for receive interrupts.
644 */
645 if (intstat & (INTSTAT_RCC | INTSTAT_RQE)) {
646 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
647 rxd = &sc->sc_control_data->ecd_rxdescs[i];
648 ds = &sc->sc_rxsoft[i];
649 m = ds->ds_mbuf;
650 error = 0;
651
652 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
653 /*
654 * We have processed all of the
655 * receive buffers.
656 */
657 break;
658 }
659
660 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
661 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
662
663 /*
664 * Make sure the packet arrived intact.
665 */
666 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
667 #if 1
668 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
669 printf("%s: CRC error\n",
670 sc->sc_dev.dv_xname);
671 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
672 printf("%s: alignment error\n",
673 sc->sc_dev.dv_xname);
674 #endif
675 ifp->if_ierrors++;
676 error = 1;
677 }
678
679 /*
680 * Add a new buffer to the receive chain. If this
681 * fails, the old buffer is recycled.
682 */
683 if (epic_add_rxbuf(sc, i) == 0) {
684 /*
685 * We wanted to reset the buffer, but
686 * didn't want to pass it on up.
687 */
688 if (error) {
689 m_freem(m);
690 continue;
691 }
692
693 len = rxd->er_buflength;
694 if (len < sizeof(struct ether_header)) {
695 m_freem(m);
696 continue;
697 }
698
699 m->m_pkthdr.rcvif = ifp;
700 m->m_pkthdr.len = m->m_len = len;
701 eh = mtod(m, struct ether_header *);
702 #if NBPFILTER > 0
703 /*
704 * Pass this up to any BPF listeners.
705 */
706 if (ifp->if_bpf) {
707 bpf_mtap(ifp->if_bpf, m);
708
709 /*
710 * Only pass this up the stack
711 * if it's for us.
712 */
713 if ((ifp->if_flags & IFF_PROMISC) &&
714 bcmp(LLADDR(ifp->if_sadl),
715 eh->ether_dhost,
716 ETHER_ADDR_LEN) != 0 &&
717 (rxd->er_rxstatus &
718 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST))
719 == 0) {
720 m_freem(m);
721 continue;
722 }
723 }
724 #endif /* NPBFILTER > 0 */
725 m->m_data += sizeof(struct ether_header);
726 m->m_len -= sizeof(struct ether_header);
727 m->m_pkthdr.len = m->m_len;
728 ether_input(ifp, eh, m);
729 }
730 }
731
732 /*
733 * Update the recieve pointer.
734 */
735 sc->sc_rxptr = i;
736
737 /*
738 * Check for receive queue underflow.
739 */
740 if (intstat & INTSTAT_RQE) {
741 printf("%s: receiver queue empty\n",
742 sc->sc_dev.dv_xname);
743 /*
744 * Ring is already built; just restart the
745 * receiver.
746 */
747 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
748 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
749 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
750 COMMAND_RXQUEUED | COMMAND_START_RX);
751 }
752 }
753
754 /*
755 * Check for transmission complete interrupts.
756 */
757 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
758 for (i = sc->sc_txdirty;; i = EPIC_NEXTTX(i)) {
759 txd = &sc->sc_control_data->ecd_txdescs[i];
760 ds = &sc->sc_txsoft[i];
761
762 if (sc->sc_txpending == 0 ||
763 (txd->et_txstatus & ET_TXSTAT_OWNER) != 0)
764 break;
765
766 if (ds->ds_mbuf != NULL) {
767 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
768 0, ds->ds_dmamap->dm_mapsize,
769 BUS_DMASYNC_POSTWRITE);
770 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
771 m_freem(ds->ds_mbuf);
772 ds->ds_mbuf = NULL;
773 }
774 sc->sc_txpending--;
775
776 /*
777 * Check for errors and collisions.
778 */
779 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
780 ifp->if_oerrors++;
781 ifp->if_collisions +=
782 TXSTAT_COLLISIONS(txd->et_txstatus);
783 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST) {
784 #if 1
785 printf("%s: lost carrier\n",
786 sc->sc_dev.dv_xname);
787 #endif
788 /* XXX clear "active" but in media data */
789 }
790 }
791
792 /*
793 * Update the dirty transmit buffer pointer.
794 */
795 sc->sc_txdirty = i;
796
797 /*
798 * Cancel the watchdog timer if there are no pending
799 * transmissions.
800 */
801 if (sc->sc_txpending == 0)
802 ifp->if_timer = 0;
803
804 /*
805 * Kick the transmitter after a DMA underrun.
806 */
807 if (intstat & INTSTAT_TXU) {
808 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
809 bus_space_write_4(sc->sc_st, sc->sc_sh,
810 EPIC_COMMAND, COMMAND_TXUGO);
811 if (sc->sc_txpending)
812 bus_space_write_4(sc->sc_st, sc->sc_sh,
813 EPIC_COMMAND, COMMAND_TXQUEUED);
814 }
815
816 /*
817 * Try to get more packets going.
818 */
819 epic_start(ifp);
820 }
821
822 /*
823 * Check for fatal interrupts.
824 */
825 if (intstat & INTSTAT_FATAL_INT) {
826 printf("%s: fatal error, resetting\n", sc->sc_dev.dv_xname);
827 epic_init(sc);
828 }
829
830 /*
831 * Check for more interrupts.
832 */
833 goto top;
834 }
835
836 /*
837 * Perform a soft reset on the EPIC.
838 */
839 void
840 epic_reset(sc)
841 struct epic_softc *sc;
842 {
843
844 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
845 delay(100);
846 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
847 delay(100);
848 }
849
850 /*
851 * Initialize the interface. Must be called at splimp().
852 */
853 void
854 epic_init(sc)
855 struct epic_softc *sc;
856 {
857 bus_space_tag_t st = sc->sc_st;
858 bus_space_handle_t sh = sc->sc_sh;
859 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
860 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
861 struct epic_txdesc *txd;
862 struct epic_rxdesc *rxd;
863 u_int32_t genctl, reg0;
864 int i;
865
866 /*
867 * Cancel any pending I/O.
868 */
869 epic_stop(sc);
870
871 /*
872 * Reset the EPIC to a known state.
873 */
874 epic_reset(sc);
875
876 /*
877 * Magical mystery initialization.
878 */
879 bus_space_write_4(st, sh, EPIC_TEST, TEST_INIT);
880 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
881
882 /*
883 * Initialize the EPIC genctl register:
884 *
885 * - 64 byte receive FIFO threshold
886 * - automatic advance to next receive frame
887 */
888 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
889 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
890
891 /*
892 * Reset the MII bus and PHY.
893 */
894 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
895 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
896 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
897 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
898 delay(100);
899 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
900 delay(100);
901 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
902
903 /*
904 * Initialize Ethernet address.
905 */
906 reg0 = enaddr[1] << 8 | enaddr[0];
907 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
908 reg0 = enaddr[3] << 8 | enaddr[2];
909 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
910 reg0 = enaddr[5] << 8 | enaddr[4];
911 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
912
913 /*
914 * Set up the multicast hash table.
915 */
916 epic_set_mchash(sc);
917
918 /*
919 * Initialize receive control. Remember the external buffer
920 * size setting.
921 */
922 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
923 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
924 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
925 if (ifp->if_flags & IFF_PROMISC)
926 reg0 |= RXCON_PROMISCMODE;
927 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
928
929 /*
930 * XXX Media (full-duplex in TXCON).
931 */
932
933 /*
934 * Initialize the transmit descriptors.
935 */
936 txd = sc->sc_control_data->ecd_txdescs;
937 bzero(txd, sizeof(sc->sc_control_data->ecd_txdescs));
938 for (i = 0; i < EPIC_NTXDESC; i++) {
939 txd[i].et_control = ET_TXCTL_LASTDESC | ET_TXCTL_IAF |
940 ET_TXCTL_FRAGLIST;
941 txd[i].et_bufaddr = sc->sc_cddma + EPIC_CDOFF(ecd_txfrags[i]);
942 txd[i].et_nextdesc = sc->sc_cddma +
943 EPIC_CDOFF(ecd_txdescs[(i + 1) & EPIC_NTXDESC_MASK]);
944 }
945
946 /*
947 * Initialize the receive descriptors. Note the buffers
948 * and control word have already been initialized; we only
949 * need to initialize the ring.
950 */
951 rxd = sc->sc_control_data->ecd_rxdescs;
952 for (i = 0; i < EPIC_NRXDESC; i++) {
953 rxd[i].er_nextdesc = sc->sc_cddma +
954 EPIC_CDOFF(ecd_rxdescs[(i + 1) & EPIC_NRXDESC_MASK]);
955 }
956
957 /*
958 * Initialize the interrupt mask and enable interrupts.
959 */
960 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
961 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
962
963 /*
964 * Give the transmit and receive rings to the EPIC.
965 */
966 bus_space_write_4(st, sh, EPIC_PTCDAR,
967 sc->sc_cddma + EPIC_CDOFF(ecd_txdescs[0]));
968 bus_space_write_4(st, sh, EPIC_PRCDAR,
969 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
970
971 /*
972 * Initialize our ring pointers. txlast it initialized to
973 * the end of the list so that it will wrap around to the
974 * first descriptor when the first packet is transmitted.
975 */
976 sc->sc_txpending = 0;
977 sc->sc_txdirty = 0;
978 sc->sc_txlast = EPIC_NTXDESC - 1;
979
980 sc->sc_rxptr = 0;
981
982 /*
983 * Set the EPIC in motion.
984 */
985 bus_space_write_4(st, sh, EPIC_COMMAND,
986 COMMAND_RXQUEUED | COMMAND_START_RX);
987
988 /*
989 * ...all done!
990 */
991 ifp->if_flags |= IFF_RUNNING;
992 ifp->if_flags &= ~IFF_OACTIVE;
993 }
994
995 /*
996 * Stop transmission on the interface.
997 */
998 void
999 epic_stop(sc)
1000 struct epic_softc *sc;
1001 {
1002 bus_space_tag_t st = sc->sc_st;
1003 bus_space_handle_t sh = sc->sc_sh;
1004 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1005 struct epic_descsoft *ds;
1006 u_int32_t reg;
1007 int i;
1008
1009 /*
1010 * Disable interrupts.
1011 */
1012 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1013 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1014 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1015
1016 /*
1017 * Stop the DMA engine and take the receiver off-line.
1018 */
1019 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1020 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1021
1022 /*
1023 * Release any queued transmit buffers.
1024 */
1025 for (i = 0; i < EPIC_NTXDESC; i++) {
1026 ds = &sc->sc_txsoft[i];
1027 if (ds->ds_mbuf != NULL) {
1028 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1029 m_freem(ds->ds_mbuf);
1030 ds->ds_mbuf = NULL;
1031 }
1032 }
1033 sc->sc_txpending = 0;
1034
1035 /*
1036 * Release the receive buffers, then reallocate/reinitialize.
1037 */
1038 for (i = 0; i < EPIC_NRXDESC; i++) {
1039 ds = &sc->sc_rxsoft[i];
1040 if (ds->ds_mbuf != NULL) {
1041 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1042 m_freem(ds->ds_mbuf);
1043 ds->ds_mbuf = NULL;
1044 }
1045 if (epic_add_rxbuf(sc, i) != 0) {
1046 /*
1047 * This "can't happen" - we're at splimp()
1048 * and we just freed the buffer we need
1049 * above.
1050 */
1051 panic("epic_stop: no buffers!");
1052 }
1053 }
1054
1055 /*
1056 * Mark the interface down and cancel the watchdog timer.
1057 */
1058 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1059 ifp->if_timer = 0;
1060 }
1061
1062 /*
1063 * Read the EPIC Serial EEPROM.
1064 */
1065 void
1066 epic_read_eeprom(sc, word, wordcnt, data)
1067 struct epic_softc *sc;
1068 int word, wordcnt;
1069 u_int16_t *data;
1070 {
1071 bus_space_tag_t st = sc->sc_st;
1072 bus_space_handle_t sh = sc->sc_sh;
1073 u_int16_t reg;
1074 int i, x;
1075
1076 #define EEPROM_WAIT_READY(st, sh) \
1077 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1078 /* nothing */
1079
1080 /*
1081 * Enable the EEPROM.
1082 */
1083 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1084 EEPROM_WAIT_READY(st, sh);
1085
1086 for (i = 0; i < wordcnt; i++) {
1087 /* Send CHIP SELECT for one clock tick. */
1088 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1089 EEPROM_WAIT_READY(st, sh);
1090
1091 /* Shift in the READ opcode. */
1092 for (x = 3; x > 0; x--) {
1093 reg = EECTL_ENABLE|EECTL_EECS;
1094 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1095 reg |= EECTL_EEDI;
1096 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1097 EEPROM_WAIT_READY(st, sh);
1098 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1099 EEPROM_WAIT_READY(st, sh);
1100 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1101 EEPROM_WAIT_READY(st, sh);
1102 }
1103
1104 /* Shift in address. */
1105 for (x = 6; x > 0; x--) {
1106 reg = EECTL_ENABLE|EECTL_EECS;
1107 if ((word + i) & (1 << (x - 1)))
1108 reg |= EECTL_EEDI;
1109 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1110 EEPROM_WAIT_READY(st, sh);
1111 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1112 EEPROM_WAIT_READY(st, sh);
1113 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1114 EEPROM_WAIT_READY(st, sh);
1115 }
1116
1117 /* Shift out data. */
1118 reg = EECTL_ENABLE|EECTL_EECS;
1119 data[i] = 0;
1120 for (x = 16; x > 0; x--) {
1121 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1122 EEPROM_WAIT_READY(st, sh);
1123 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1124 data[i] |= (1 << (x - 1));
1125 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1126 EEPROM_WAIT_READY(st, sh);
1127 }
1128
1129 /* Clear CHIP SELECT. */
1130 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1131 EEPROM_WAIT_READY(st, sh);
1132 }
1133
1134 /*
1135 * Disable the EEPROM.
1136 */
1137 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1138
1139 #undef EEPROM_WAIT_READY
1140 }
1141
1142 /*
1143 * Add a receive buffer to the indicated descriptor.
1144 */
1145 int
1146 epic_add_rxbuf(sc, idx)
1147 struct epic_softc *sc;
1148 int idx;
1149 {
1150 struct epic_rxdesc *rxd = &sc->sc_control_data->ecd_rxdescs[idx];
1151 struct epic_descsoft *ds = &sc->sc_rxsoft[idx];
1152 struct mbuf *m, *oldm;
1153 int error = 0;
1154
1155 oldm = ds->ds_mbuf;
1156
1157 MGETHDR(m, M_DONTWAIT, MT_DATA);
1158 if (m != NULL) {
1159 MCLGET(m, M_DONTWAIT);
1160 if ((m->m_flags & M_EXT) == 0) {
1161 error = ENOMEM;
1162 m_freem(m);
1163 if (oldm == NULL)
1164 return (error);
1165 m = oldm;
1166 m->m_data = m->m_ext.ext_buf;
1167 }
1168 } else {
1169 error = ENOMEM;
1170 if (oldm == NULL)
1171 return (error);
1172 m = oldm;
1173 m->m_data = m->m_ext.ext_buf;
1174 }
1175
1176 ds->ds_mbuf = m;
1177
1178 /*
1179 * Set up the DMA map for this receive buffer.
1180 */
1181 if (m != oldm) {
1182 if (oldm != NULL)
1183 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1184 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1185 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1186 if (error) {
1187 printf("%s: can't load rx buffer, error = %d\n",
1188 sc->sc_dev.dv_xname, error);
1189 panic("epic_add_rxbuf"); /* XXX */
1190 }
1191 }
1192
1193 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1194 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1195
1196 /*
1197 * Move the data pointer up so that the incoming packet
1198 * will be 32-bit aligned.
1199 */
1200 m->m_data += RX_ALIGNMENT_FUDGE;
1201
1202 /*
1203 * Initialize the receive descriptor.
1204 */
1205 rxd->er_bufaddr = ds->ds_dmamap->dm_segs[0].ds_addr +
1206 RX_ALIGNMENT_FUDGE;
1207 rxd->er_buflength = m->m_ext.ext_size - RX_ALIGNMENT_FUDGE;
1208 rxd->er_control = 0;
1209 rxd->er_rxstatus = ER_RXSTAT_OWNER;
1210
1211 return (error);
1212 }
1213
1214 /*
1215 * Set the EPIC multicast hash table.
1216 */
1217 void
1218 epic_set_mchash(sc)
1219 struct epic_softc *sc;
1220 {
1221 struct ethercom *ec = &sc->sc_ethercom;
1222 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1223 struct ether_multi *enm;
1224 struct ether_multistep step;
1225 u_int8_t *cp;
1226 u_int32_t crc, mchash[4];
1227 int len;
1228 static const u_int32_t crctab[] = {
1229 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1230 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1231 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1232 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1233 };
1234
1235 /*
1236 * Set up the multicast address filter by passing all multicast
1237 * addresses through a CRC generator, and then using the high-order
1238 * 6 bits as an index into the 64 bit multicast hash table (only
1239 * the lower 16 bits of each 32 bit multicast hash register are
1240 * valid). The high order bit selects the register, while the
1241 * rest of the bits select the bit within the register.
1242 */
1243
1244 if (ifp->if_flags & IFF_PROMISC)
1245 goto allmulti;
1246
1247 #if 1 /* XXX thorpej - hardware bug in 10Mb mode */
1248 goto allmulti;
1249 #endif
1250
1251 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1252
1253 ETHER_FIRST_MULTI(step, ec, enm);
1254 while (enm != NULL) {
1255 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1256 /*
1257 * We must listen to a range of multicast addresses.
1258 * For now, just accept all multicasts, rather than
1259 * trying to set only those filter bits needed to match
1260 * the range. (At this time, the only use of address
1261 * ranges is for IP multicast routing, for which the
1262 * range is big enough to require all bits set.)
1263 */
1264 goto allmulti;
1265 }
1266
1267 cp = enm->enm_addrlo;
1268 crc = 0xffffffff;
1269 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1270 crc ^= *cp++;
1271 crc = (crc >> 4) ^ crctab[crc & 0xf];
1272 crc = (crc >> 4) ^ crctab[crc & 0xf];
1273 }
1274 /* Just want the 6 most significant bits. */
1275 crc >>= 26;
1276
1277 /* Set the corresponding bit in the hash table. */
1278 mchash[crc >> 4] |= 1 << (crc & 0xf);
1279
1280 ETHER_NEXT_MULTI(step, enm);
1281 }
1282
1283 ifp->if_flags &= ~IFF_ALLMULTI;
1284 goto sethash;
1285
1286 allmulti:
1287 ifp->if_flags |= IFF_ALLMULTI;
1288 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1289
1290 sethash:
1291 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1292 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1293 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1294 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1295 }
1296