smc83c170.c revision 1.1 1 /* $NetBSD: smc83c170.c,v 1.1 1998/06/02 01:29:42 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Device driver for the Standard Microsystems Corp. 83C170
42 * Ethernet PCI Integrated Controller (EPIC/100).
43 */
44
45 #include "bpfilter.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/mbuf.h>
50 #include <sys/malloc.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/ioctl.h>
54 #include <sys/errno.h>
55 #include <sys/device.h>
56
57 #include <net/if.h>
58 #include <net/if_dl.h>
59 #include <net/if_media.h>
60 #include <net/if_ether.h>
61
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #endif
65
66 #ifdef INET
67 #include <netinet/in.h>
68 #include <netinet/if_inarp.h>
69 #endif
70
71 #ifdef NS
72 #include <netns/ns.h>
73 #include <netns/ns_if.h>
74 #endif
75
76 #include <machine/bus.h>
77 #include <machine/intr.h>
78
79 #include <dev/ic/smc83c170reg.h>
80 #include <dev/ic/smc83c170var.h>
81
82 void epic_start __P((struct ifnet *));
83 void epic_watchdog __P((struct ifnet *));
84 int epic_ioctl __P((struct ifnet *, u_long, caddr_t));
85
86 void epic_shutdown __P((void *));
87
88 void epic_reset __P((struct epic_softc *));
89 void epic_init __P((struct epic_softc *));
90 void epic_stop __P((struct epic_softc *));
91 int epic_add_rxbuf __P((struct epic_softc *, int));
92 void epic_read_eeprom __P((struct epic_softc *, int, int, u_int16_t *));
93 void epic_set_mchash __P((struct epic_softc *));
94
95 /*
96 * Fudge the incoming packets by this much, to ensure the data after
97 * the Ethernet header is aligned.
98 */
99 #define RX_ALIGNMENT_FUDGE 2
100
101 /* XXX Should be somewhere else. */
102 #define ETHER_MIN_LEN 60
103
104 #define INTMASK (INTSTAT_FATAL_INT | INTSTAT_TXU | \
105 INTSTAT_TXC | INTSTAT_RQE | INTSTAT_RCC)
106
107 /*
108 * Attach an EPIC interface to the system.
109 */
110 void
111 epic_attach(sc)
112 struct epic_softc *sc;
113 {
114 bus_space_tag_t st = sc->sc_st;
115 bus_space_handle_t sh = sc->sc_sh;
116 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
117 int i, rseg, error, attach_stage;
118 bus_dma_segment_t seg;
119 u_int8_t enaddr[ETHER_ADDR_LEN], devname[12 + 1];
120 u_int16_t myea[ETHER_ADDR_LEN / 2], mydevname[6];
121
122 attach_stage = 0;
123
124 /*
125 * Allocate the control data structures, and create and load the
126 * DMA map for it.
127 */
128 if ((error = bus_dmamem_alloc(sc->sc_dmat,
129 sizeof(struct epic_control_data), NBPG, 0, &seg, 1, &rseg,
130 BUS_DMA_NOWAIT)) != 0) {
131 printf("%s: unable to allocate control data, error = %d\n",
132 sc->sc_dev.dv_xname, error);
133 goto fail;
134 }
135
136 attach_stage = 1;
137
138 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
139 sizeof(struct epic_control_data), (caddr_t *)&sc->sc_control_data,
140 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
141 printf("%s: unable to map control data, error = %d\n",
142 sc->sc_dev.dv_xname, error);
143 goto fail;
144 }
145
146 attach_stage = 2;
147
148 if ((error = bus_dmamap_create(sc->sc_dmat,
149 sizeof(struct epic_control_data), 1,
150 sizeof(struct epic_control_data), 0, BUS_DMA_NOWAIT,
151 &sc->sc_cddmamap)) != 0) {
152 printf("%s: unable to create control data DMA map, "
153 "error = %d\n", sc->sc_dev.dv_xname, error);
154 goto fail;
155 }
156
157 attach_stage = 3;
158
159 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
160 sc->sc_control_data, sizeof(struct epic_control_data), NULL,
161 BUS_DMA_NOWAIT)) != 0) {
162 printf("%s: unable to load control data DMA map, error = %d\n",
163 sc->sc_dev.dv_xname, error);
164 goto fail;
165 }
166
167 attach_stage = 4;
168
169 /*
170 * Create the transmit buffer DMA maps.
171 */
172 for (i = 0; i < EPIC_NTXDESC; i++) {
173 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
174 EPIC_NFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
175 &sc->sc_txsoft[i].ds_dmamap)) != 0) {
176 printf("%s: unable to create tx DMA map %d, "
177 "error = %d\n", sc->sc_dev.dv_xname, i, error);
178 goto fail;
179 }
180 }
181
182 attach_stage = 5;
183
184 /*
185 * Create the recieve buffer DMA maps.
186 */
187 for (i = 0; i < EPIC_NRXDESC; i++) {
188 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
189 MCLBYTES, 0, BUS_DMA_NOWAIT,
190 &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
191 printf("%s: unable to create rx DMA map %d, "
192 "error = %d\n", sc->sc_dev.dv_xname, i, error);
193 goto fail;
194 }
195 }
196
197 attach_stage = 6;
198
199 /*
200 * Pre-allocate the receive buffers.
201 */
202 for (i = 0; i < EPIC_NRXDESC; i++) {
203 if ((error = epic_add_rxbuf(sc, i)) != 0) {
204 printf("%s: unable to allocate or map rx buffer %d\n,"
205 " error = %d\n", sc->sc_dev.dv_xname, i, error);
206 goto fail;
207 }
208 }
209
210 attach_stage = 7;
211
212 /*
213 * Bring the chip out of low-power mode and reset it to a known state.
214 */
215 bus_space_write_4(st, sh, EPIC_GENCTL, 0);
216 epic_reset(sc);
217
218 /*
219 * Read the Ethernet address from the EEPROM.
220 */
221 epic_read_eeprom(sc, 0, (sizeof(myea) / sizeof(myea[0])), myea);
222 bcopy(myea, enaddr, sizeof(myea));
223
224 /*
225 * ...and the device name.
226 */
227 epic_read_eeprom(sc, 0x2c, (sizeof(mydevname) / sizeof(mydevname[0])),
228 mydevname);
229 bcopy(mydevname, devname, sizeof(mydevname));
230 devname[sizeof(mydevname)] = '\0';
231 for (i = sizeof(mydevname) - 1; i >= 0; i--) {
232 if (devname[i] == ' ')
233 devname[i] = '\0';
234 else
235 break;
236 }
237
238 printf("%s: %s, Ethernet address %s\n", sc->sc_dev.dv_xname,
239 devname, ether_sprintf(enaddr));
240
241 ifp = &sc->sc_ethercom.ec_if;
242 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
243 ifp->if_softc = sc;
244 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
245 ifp->if_ioctl = epic_ioctl;
246 ifp->if_start = epic_start;
247 ifp->if_watchdog = epic_watchdog;
248
249 /*
250 * Attach the interface.
251 */
252 if_attach(ifp);
253 ether_ifattach(ifp, enaddr);
254 #if NBPFILTER > 0
255 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
256 sizeof(struct ether_header));
257 #endif
258
259 /*
260 * Make sure the interface is shutdown during reboot.
261 */
262 sc->sc_sdhook = shutdownhook_establish(epic_shutdown, sc);
263 if (sc->sc_sdhook == NULL)
264 printf("%s: WARNING: unable to establish shutdown hook\n",
265 sc->sc_dev.dv_xname);
266 return;
267
268 fail:
269 /*
270 * Free any resources we've allocated during the failed attach
271 * attempt. Do this in reverse order and fall through.
272 */
273 switch (attach_stage) {
274 case 7:
275 for (i = 0; i < EPIC_NRXDESC; i++) {
276 if (sc->sc_rxsoft[i].ds_mbuf != NULL) {
277 bus_dmamap_unload(sc->sc_dmat,
278 sc->sc_rxsoft[i].ds_dmamap);
279 m_freem(sc->sc_rxsoft[i].ds_mbuf);
280 }
281 }
282 /* FALLTHROUGH */
283
284 case 6:
285 for (i = 0; i < EPIC_NRXDESC; i++)
286 bus_dmamap_destroy(sc->sc_dmat,
287 sc->sc_rxsoft[i].ds_dmamap);
288 /* FALLTHROUGH */
289
290 case 5:
291 for (i = 0; i < EPIC_NTXDESC; i++)
292 bus_dmamap_destroy(sc->sc_dmat,
293 sc->sc_txsoft[i].ds_dmamap);
294 /* FALLTHROUGH */
295
296 case 4:
297 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
298 /* FALLTHROUGH */
299
300 case 3:
301 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
302 /* FALLTHROUGH */
303
304 case 2:
305 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
306 sizeof(struct epic_control_data));
307 /* FALLTHROUGH */
308
309 case 1:
310 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
311 break;
312 }
313 }
314
315 /*
316 * Shutdown hook. Make sure the interface is stopped at reboot.
317 */
318 void
319 epic_shutdown(arg)
320 void *arg;
321 {
322 struct epic_softc *sc = arg;
323
324 epic_stop(sc);
325 }
326
327 /*
328 * Start packet transmission on the interface.
329 * [ifnet interface function]
330 */
331 void
332 epic_start(ifp)
333 struct ifnet *ifp;
334 {
335 struct epic_softc *sc = ifp->if_softc;
336 struct epic_txdesc *txd;
337 struct epic_descsoft *ds;
338 struct epic_fraglist *fr;
339 bus_dmamap_t dmamap;
340 struct mbuf *m0;
341 int nexttx, seg, error, txqueued;
342
343 txqueued = 0;
344
345 /*
346 * Loop through the send queue, setting up transmit descriptors
347 * until we drain the queue, or use up all available transmit
348 * descriptors.
349 */
350 while (ifp->if_snd.ifq_head != NULL &&
351 sc->sc_txpending < EPIC_NTXDESC) {
352 /*
353 * Grab a packet off the queue.
354 */
355 IF_DEQUEUE(&ifp->if_snd, m0);
356
357 /*
358 * Get the last and next available transmit descriptor.
359 */
360 nexttx = EPIC_NEXTTX(sc->sc_txlast);
361 txd = &sc->sc_control_data->ecd_txdescs[nexttx];
362 fr = &sc->sc_control_data->ecd_txfrags[nexttx];
363 ds = &sc->sc_txsoft[nexttx];
364 dmamap = ds->ds_dmamap;
365
366 loadmap:
367 /*
368 * Load the DMA map with the packet.
369 */
370 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
371 BUS_DMA_NOWAIT);
372 switch (error) {
373 case 0:
374 /* Success. */
375 break;
376
377 case EFBIG:
378 {
379 struct mbuf *mn;
380
381 /*
382 * We ran out of segments. We have to recopy this
383 * mbuf chain first. Bail out if we can't get the
384 * new buffers.
385 */
386 printf("%s: too many segments, ", sc->sc_dev.dv_xname);
387
388 MGETHDR(mn, M_DONTWAIT, MT_DATA);
389 if (mn == NULL) {
390 m_freem(m0);
391 printf("aborting\n");
392 goto out;
393 }
394 if (m0->m_pkthdr.len > MHLEN) {
395 MCLGET(mn, M_DONTWAIT);
396 if ((mn->m_flags & M_EXT) == 0) {
397 m_freem(mn);
398 m_freem(m0);
399 printf("aborting\n");
400 goto out;
401 }
402 }
403 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(mn, caddr_t));
404 mn->m_pkthdr.len = mn->m_len = m0->m_pkthdr.len;
405 m_freem(m0);
406 m0 = mn;
407 printf("retrying\n");
408 goto loadmap;
409 }
410
411 default:
412 /*
413 * Some other problem; report it.
414 */
415 printf("%s: can't load mbuf chain, error = %d\n",
416 sc->sc_dev.dv_xname, error);
417 m_freem(m0);
418 goto out;
419 }
420
421 /*
422 * Initialize the fraglist.
423 */
424 fr->ef_nfrags = dmamap->dm_nsegs;
425 for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
426 fr->ef_frags[seg].ef_addr =
427 dmamap->dm_segs[seg].ds_addr;
428 fr->ef_frags[seg].ef_length =
429 dmamap->dm_segs[seg].ds_len;
430 }
431
432 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
433 BUS_DMASYNC_PREWRITE);
434
435 /*
436 * Store a pointer to the packet so we can free it later.
437 */
438 ds->ds_mbuf = m0;
439
440 /*
441 * Finish setting up the new transmit descriptor: set the
442 * packet length and give it to the EPIC.
443 */
444 txd->et_txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
445 txd->et_txstatus = ET_TXSTAT_OWNER;
446
447 /*
448 * Committed; advance the lasttx pointer. If nothing was
449 * previously queued, reset the dirty pointer.
450 */
451 sc->sc_txlast = nexttx;
452 if (sc->sc_txpending == 0)
453 sc->sc_txdirty = nexttx;
454
455 sc->sc_txpending++;
456
457 txqueued = 1;
458
459 #if NBPFILTER > 0
460 /*
461 * Pass the packet to any BPF listeners.
462 */
463 if (ifp->if_bpf)
464 bpf_mtap(ifp->if_bpf, m0);
465 #endif
466 }
467
468 out:
469 /*
470 * We're finished. If we added more packets, make sure the
471 * transmit DMA engine is running.
472 */
473 if (txqueued) {
474 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
475 COMMAND_TXQUEUED);
476
477 /*
478 * Set a 5 second watchdog timer.
479 */
480 ifp->if_timer = 5;
481 }
482 }
483
484 /*
485 * Watchdog timer handler.
486 * [ifnet interface function]
487 */
488 void
489 epic_watchdog(ifp)
490 struct ifnet *ifp;
491 {
492 struct epic_softc *sc = ifp->if_softc;
493
494 printf("%s: device timeout\n", sc->sc_dev.dv_xname);
495 ifp->if_oerrors++;
496
497 epic_init(sc);
498 }
499
500 /*
501 * Handle control requests from the operator.
502 * [ifnet interface function]
503 */
504 int
505 epic_ioctl(ifp, cmd, data)
506 struct ifnet *ifp;
507 u_long cmd;
508 caddr_t data;
509 {
510 struct epic_softc *sc = ifp->if_softc;
511 struct ifreq *ifr = (struct ifreq *)data;
512 struct ifaddr *ifa = (struct ifaddr *)data;
513 int s, error = 0;
514
515 s = splimp();
516
517 switch (cmd) {
518 case SIOCSIFADDR:
519 ifp->if_flags |= IFF_UP;
520
521 switch (ifa->ifa_addr->sa_family) {
522 #ifdef INET
523 case AF_INET:
524 epic_init(sc);
525 arp_ifinit(ifp, ifa);
526 break;
527 #endif /* INET */
528 #ifdef NS
529 case AF_NS:
530 {
531 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
532
533 if (ns_nullhost(*ina))
534 ina->x_host = *(union ns_host *)
535 LLADDR(ifp->if_sadl);
536 else
537 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
538 ifp->if_addrlen);
539 /* Set new address. */
540 epic_init(sc);
541 break;
542 }
543 #endif /* NS */
544 default:
545 epic_init(sc);
546 break;
547 }
548 break;
549
550 case SIOCSIFMTU:
551 if (ifr->ifr_mtu > ETHERMTU)
552 error = EINVAL;
553 else
554 ifp->if_mtu = ifr->ifr_mtu;
555 break;
556
557 case SIOCSIFFLAGS:
558 if ((ifp->if_flags & IFF_UP) == 0 &&
559 (ifp->if_flags & IFF_RUNNING) != 0) {
560 /*
561 * If interface is marked down and it is running, then
562 * stop it.
563 */
564 epic_stop(sc);
565 ifp->if_flags &= ~IFF_RUNNING;
566 } else if ((ifp->if_flags & IFF_UP) != 0 &&
567 (ifp->if_flags & IFF_RUNNING) == 0) {
568 /*
569 * If interfase it marked up and it is stopped, then
570 * start it.
571 */
572 epic_init(sc);
573 } else {
574 /*
575 * Reset the interface to pick up changes in any other
576 * flags that affect the hardware state.
577 */
578 epic_init(sc);
579 }
580 break;
581
582 case SIOCADDMULTI:
583 case SIOCDELMULTI:
584 error = (cmd == SIOCADDMULTI) ?
585 ether_addmulti(ifr, &sc->sc_ethercom) :
586 ether_delmulti(ifr, &sc->sc_ethercom);
587
588 if (error == ENETRESET) {
589 /*
590 * Multicast list has changed; set the hardware filter
591 * accordingly.
592 */
593 epic_init(sc);
594 error = 0;
595 }
596 break;
597
598 default:
599 error = EINVAL;
600 break;
601 }
602
603 splx(s);
604 return (error);
605 }
606
607 /*
608 * Interrupt handler.
609 */
610 int
611 epic_intr(arg)
612 void *arg;
613 {
614 struct epic_softc *sc = arg;
615 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
616 struct ether_header *eh;
617 struct epic_rxdesc *rxd;
618 struct epic_txdesc *txd;
619 struct epic_descsoft *ds;
620 struct mbuf *m;
621 u_int32_t intstat;
622 int i, len, claimed = 0, error;
623
624 top:
625 /*
626 * Get the interrupt status from the EPIC.
627 */
628 intstat = bus_space_read_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT);
629 if ((intstat & INTSTAT_INT_ACTV) == 0)
630 return (claimed);
631
632 claimed = 1;
633
634 /*
635 * Acknowledge the interrupt.
636 */
637 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_INTSTAT,
638 intstat & INTMASK);
639
640 /*
641 * Check for receive interrupts.
642 */
643 if (intstat & (INTSTAT_RCC | INTSTAT_RQE)) {
644 for (i = sc->sc_rxptr;; i = EPIC_NEXTRX(i)) {
645 rxd = &sc->sc_control_data->ecd_rxdescs[i];
646 ds = &sc->sc_rxsoft[i];
647 m = ds->ds_mbuf;
648 error = 0;
649
650 if (rxd->er_rxstatus & ER_RXSTAT_OWNER) {
651 /*
652 * We have processed all of the
653 * receive buffers.
654 */
655 break;
656 }
657
658 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
659 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
660
661 /*
662 * Make sure the packet arrived intact.
663 */
664 if ((rxd->er_rxstatus & ER_RXSTAT_PKTINTACT) == 0) {
665 #if 1
666 if (rxd->er_rxstatus & ER_RXSTAT_CRCERROR)
667 printf("%s: CRC error\n",
668 sc->sc_dev.dv_xname);
669 if (rxd->er_rxstatus & ER_RXSTAT_ALIGNERROR)
670 printf("%s: alignment error\n",
671 sc->sc_dev.dv_xname);
672 #endif
673 ifp->if_ierrors++;
674 error = 1;
675 }
676
677 /*
678 * Add a new buffer to the receive chain. If this
679 * fails, the old buffer is recycled.
680 */
681 if (epic_add_rxbuf(sc, i) == 0) {
682 /*
683 * We wanted to reset the buffer, but
684 * didn't want to pass it on up.
685 */
686 if (error) {
687 m_freem(m);
688 continue;
689 }
690
691 len = rxd->er_buflength;
692 if (len < sizeof(struct ether_header)) {
693 m_freem(m);
694 continue;
695 }
696
697 m->m_pkthdr.rcvif = ifp;
698 m->m_pkthdr.len = m->m_len = len;
699 eh = mtod(m, struct ether_header *);
700 #if NBPFILTER > 0
701 /*
702 * Pass this up to any BPF listeners.
703 */
704 if (ifp->if_bpf) {
705 bpf_mtap(ifp->if_bpf, m);
706
707 /*
708 * Only pass this up the stack
709 * if it's for us.
710 */
711 if ((ifp->if_flags & IFF_PROMISC) &&
712 bcmp(LLADDR(ifp->if_sadl),
713 eh->ether_dhost,
714 ETHER_ADDR_LEN) != 0 &&
715 (rxd->er_rxstatus &
716 (ER_RXSTAT_BCAST|ER_RXSTAT_MCAST))
717 == 0) {
718 m_freem(m);
719 continue;
720 }
721 }
722 #endif /* NPBFILTER > 0 */
723 m->m_data += sizeof(struct ether_header);
724 m->m_len -= sizeof(struct ether_header);
725 m->m_pkthdr.len = m->m_len;
726 ether_input(ifp, eh, m);
727 }
728 }
729
730 /*
731 * Update the recieve pointer.
732 */
733 sc->sc_rxptr = i;
734
735 /*
736 * Check for receive queue underflow.
737 */
738 if (intstat & INTSTAT_RQE) {
739 printf("%s: receiver queue empty\n",
740 sc->sc_dev.dv_xname);
741 /*
742 * Ring is already built; just restart the
743 * receiver.
744 */
745 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_PRCDAR,
746 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
747 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_COMMAND,
748 COMMAND_RXQUEUED | COMMAND_START_RX);
749 }
750 }
751
752 /*
753 * Check for transmission complete interrupts.
754 */
755 if (intstat & (INTSTAT_TXC | INTSTAT_TXU)) {
756 for (i = sc->sc_txdirty;; i = EPIC_NEXTTX(i)) {
757 txd = &sc->sc_control_data->ecd_txdescs[i];
758 ds = &sc->sc_txsoft[i];
759
760 if (sc->sc_txpending == 0 ||
761 (txd->et_txstatus & ET_TXSTAT_OWNER) != 0)
762 break;
763
764 if (ds->ds_mbuf != NULL) {
765 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap,
766 0, ds->ds_dmamap->dm_mapsize,
767 BUS_DMASYNC_POSTWRITE);
768 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
769 m_freem(ds->ds_mbuf);
770 ds->ds_mbuf = NULL;
771 }
772 sc->sc_txpending--;
773
774 /*
775 * Check for errors and collisions.
776 */
777 if ((txd->et_txstatus & ET_TXSTAT_PACKETTX) == 0)
778 ifp->if_oerrors++;
779 ifp->if_collisions +=
780 TXSTAT_COLLISIONS(txd->et_txstatus);
781 if (txd->et_txstatus & ET_TXSTAT_CARSENSELOST) {
782 #if 1
783 printf("%s: lost carrier\n",
784 sc->sc_dev.dv_xname);
785 #endif
786 /* XXX clear "active" but in media data */
787 }
788 }
789
790 /*
791 * Update the dirty transmit buffer pointer.
792 */
793 sc->sc_txdirty = i;
794
795 /*
796 * Cancel the watchdog timer if there are no pending
797 * transmissions.
798 */
799 if (sc->sc_txpending == 0)
800 ifp->if_timer = 0;
801
802 /*
803 * Kick the transmitter after a DMA underrun.
804 */
805 if (intstat & INTSTAT_TXU) {
806 printf("%s: transmit underrun\n", sc->sc_dev.dv_xname);
807 bus_space_write_4(sc->sc_st, sc->sc_sh,
808 EPIC_COMMAND, COMMAND_TXUGO);
809 if (sc->sc_txpending)
810 bus_space_write_4(sc->sc_st, sc->sc_sh,
811 EPIC_COMMAND, COMMAND_TXQUEUED);
812 }
813
814 /*
815 * Try to get more packets going.
816 */
817 epic_start(ifp);
818 }
819
820 /*
821 * Check for fatal interrupts.
822 */
823 if (intstat & INTSTAT_FATAL_INT) {
824 printf("%s: fatal error, resetting\n", sc->sc_dev.dv_xname);
825 epic_init(sc);
826 }
827
828 /*
829 * Check for more interrupts.
830 */
831 goto top;
832 }
833
834 /*
835 * Perform a soft reset on the EPIC.
836 */
837 void
838 epic_reset(sc)
839 struct epic_softc *sc;
840 {
841
842 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, 0);
843 delay(100);
844 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_GENCTL, GENCTL_SOFTRESET);
845 delay(100);
846 }
847
848 /*
849 * Initialize the interface. Must be called at splimp().
850 */
851 void
852 epic_init(sc)
853 struct epic_softc *sc;
854 {
855 bus_space_tag_t st = sc->sc_st;
856 bus_space_handle_t sh = sc->sc_sh;
857 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
858 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
859 struct epic_txdesc *txd;
860 struct epic_rxdesc *rxd;
861 u_int32_t genctl, reg0;
862 int i;
863
864 /*
865 * Cancel any pending I/O.
866 */
867 epic_stop(sc);
868
869 /*
870 * Reset the EPIC to a known state.
871 */
872 epic_reset(sc);
873
874 /*
875 * Magical mystery initialization.
876 */
877 bus_space_write_4(st, sh, EPIC_TEST, TEST_INIT);
878 bus_space_write_4(st, sh, EPIC_TXTEST, 0);
879
880 /*
881 * Initialize the EPIC genctl register:
882 *
883 * - 64 byte receive FIFO threshold
884 * - automatic advance to next receive frame
885 */
886 genctl = GENCTL_RX_FIFO_THRESH0 | GENCTL_ONECOPY;
887 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
888
889 /*
890 * Reset the MII bus and PHY.
891 */
892 reg0 = bus_space_read_4(st, sh, EPIC_NVCTL);
893 bus_space_write_4(st, sh, EPIC_NVCTL, reg0 | NVCTL_GPIO1 | NVCTL_GPOE1);
894 bus_space_write_4(st, sh, EPIC_MIICFG, MIICFG_ENASER);
895 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_RESET_PHY);
896 delay(100);
897 bus_space_write_4(st, sh, EPIC_GENCTL, genctl);
898 delay(100);
899 bus_space_write_4(st, sh, EPIC_NVCTL, reg0);
900
901 /*
902 * Initialize Ethernet address.
903 */
904 reg0 = enaddr[1] << 8 | enaddr[0];
905 bus_space_write_4(st, sh, EPIC_LAN0, reg0);
906 reg0 = enaddr[3] << 8 | enaddr[2];
907 bus_space_write_4(st, sh, EPIC_LAN1, reg0);
908 reg0 = enaddr[5] << 8 | enaddr[4];
909 bus_space_write_4(st, sh, EPIC_LAN2, reg0);
910
911 /*
912 * Set up the multicast hash table.
913 */
914 epic_set_mchash(sc);
915
916 /*
917 * Initialize receive control. Remember the external buffer
918 * size setting.
919 */
920 reg0 = bus_space_read_4(st, sh, EPIC_RXCON) &
921 (RXCON_EXTBUFSIZESEL1 | RXCON_EXTBUFSIZESEL0);
922 reg0 |= (RXCON_RXMULTICAST | RXCON_RXBROADCAST);
923 if (ifp->if_flags & IFF_PROMISC)
924 reg0 |= RXCON_PROMISCMODE;
925 bus_space_write_4(st, sh, EPIC_RXCON, reg0);
926
927 /*
928 * XXX Media (full-duplex in TXCON).
929 */
930
931 /*
932 * Initialize the transmit descriptors.
933 */
934 txd = sc->sc_control_data->ecd_txdescs;
935 bzero(txd, sizeof(sc->sc_control_data->ecd_txdescs));
936 for (i = 0; i < EPIC_NTXDESC; i++) {
937 txd[i].et_control = ET_TXCTL_LASTDESC | ET_TXCTL_IAF |
938 ET_TXCTL_FRAGLIST;
939 txd[i].et_bufaddr = sc->sc_cddma + EPIC_CDOFF(ecd_txfrags[i]);
940 txd[i].et_nextdesc = sc->sc_cddma +
941 EPIC_CDOFF(ecd_txdescs[(i + 1) & EPIC_NTXDESC_MASK]);
942 }
943
944 /*
945 * Initialize the receive descriptors. Note the buffers
946 * and control word have already been initialized; we only
947 * need to initialize the ring.
948 */
949 rxd = sc->sc_control_data->ecd_rxdescs;
950 for (i = 0; i < EPIC_NRXDESC; i++) {
951 rxd[i].er_nextdesc = sc->sc_cddma +
952 EPIC_CDOFF(ecd_rxdescs[(i + 1) & EPIC_NRXDESC_MASK]);
953 }
954
955 /*
956 * Initialize the interrupt mask and enable interrupts.
957 */
958 bus_space_write_4(st, sh, EPIC_INTMASK, INTMASK);
959 bus_space_write_4(st, sh, EPIC_GENCTL, genctl | GENCTL_INTENA);
960
961 /*
962 * Give the transmit and receive rings to the EPIC.
963 */
964 bus_space_write_4(st, sh, EPIC_PTCDAR,
965 sc->sc_cddma + EPIC_CDOFF(ecd_txdescs[0]));
966 bus_space_write_4(st, sh, EPIC_PRCDAR,
967 sc->sc_cddma + EPIC_CDOFF(ecd_rxdescs[0]));
968
969 /*
970 * Initialize our ring pointers. txlast it initialized to
971 * the end of the list so that it will wrap around to the
972 * first descriptor when the first packet is transmitted.
973 */
974 sc->sc_txpending = 0;
975 sc->sc_txdirty = 0;
976 sc->sc_txlast = EPIC_NTXDESC - 1;
977
978 sc->sc_rxptr = 0;
979
980 /*
981 * Set the EPIC in motion.
982 */
983 bus_space_write_4(st, sh, EPIC_COMMAND,
984 COMMAND_RXQUEUED | COMMAND_START_RX);
985
986 /*
987 * ...all done!
988 */
989 ifp->if_flags |= IFF_RUNNING;
990 ifp->if_flags &= ~IFF_OACTIVE;
991 }
992
993 /*
994 * Stop transmission on the interface.
995 */
996 void
997 epic_stop(sc)
998 struct epic_softc *sc;
999 {
1000 bus_space_tag_t st = sc->sc_st;
1001 bus_space_handle_t sh = sc->sc_sh;
1002 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1003 struct epic_descsoft *ds;
1004 u_int32_t reg;
1005 int i;
1006
1007 /*
1008 * Disable interrupts.
1009 */
1010 reg = bus_space_read_4(st, sh, EPIC_GENCTL);
1011 bus_space_write_4(st, sh, EPIC_GENCTL, reg & ~GENCTL_INTENA);
1012 bus_space_write_4(st, sh, EPIC_INTMASK, 0);
1013
1014 /*
1015 * Stop the DMA engine and take the receiver off-line.
1016 */
1017 bus_space_write_4(st, sh, EPIC_COMMAND, COMMAND_STOP_RDMA |
1018 COMMAND_STOP_TDMA | COMMAND_STOP_RX);
1019
1020 /*
1021 * Release any queued transmit buffers.
1022 */
1023 for (i = 0; i < EPIC_NTXDESC; i++) {
1024 ds = &sc->sc_txsoft[i];
1025 if (ds->ds_mbuf != NULL) {
1026 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1027 m_freem(ds->ds_mbuf);
1028 ds->ds_mbuf = NULL;
1029 }
1030 }
1031 sc->sc_txpending = 0;
1032
1033 /*
1034 * Release the receive buffers, then reallocate/reinitialize.
1035 */
1036 for (i = 0; i < EPIC_NRXDESC; i++) {
1037 ds = &sc->sc_rxsoft[i];
1038 if (ds->ds_mbuf != NULL) {
1039 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1040 m_freem(ds->ds_mbuf);
1041 ds->ds_mbuf = NULL;
1042 }
1043 if (epic_add_rxbuf(sc, i) != 0) {
1044 /*
1045 * This "can't happen" - we're at splimp()
1046 * and we just freed the buffer we need
1047 * above.
1048 */
1049 panic("epic_stop: no buffers!");
1050 }
1051 }
1052
1053 /*
1054 * Mark the interface down and cancel the watchdog timer.
1055 */
1056 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1057 ifp->if_timer = 0;
1058 }
1059
1060 /*
1061 * Read the EPIC Serial EEPROM.
1062 */
1063 void
1064 epic_read_eeprom(sc, word, wordcnt, data)
1065 struct epic_softc *sc;
1066 int word, wordcnt;
1067 u_int16_t *data;
1068 {
1069 bus_space_tag_t st = sc->sc_st;
1070 bus_space_handle_t sh = sc->sc_sh;
1071 u_int16_t reg;
1072 int i, x;
1073
1074 #define EEPROM_WAIT_READY(st, sh) \
1075 while ((bus_space_read_4((st), (sh), EPIC_EECTL) & EECTL_EERDY) == 0) \
1076 /* nothing */
1077
1078 /*
1079 * Enable the EEPROM.
1080 */
1081 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1082 EEPROM_WAIT_READY(st, sh);
1083
1084 for (i = 0; i < wordcnt; i++) {
1085 /* Send CHIP SELECT for one clock tick. */
1086 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE|EECTL_EECS);
1087 EEPROM_WAIT_READY(st, sh);
1088
1089 /* Shift in the READ opcode. */
1090 for (x = 3; x > 0; x--) {
1091 reg = EECTL_ENABLE|EECTL_EECS;
1092 if (EPIC_EEPROM_OPC_READ & (1 << (x - 1)))
1093 reg |= EECTL_EEDI;
1094 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1095 EEPROM_WAIT_READY(st, sh);
1096 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1097 EEPROM_WAIT_READY(st, sh);
1098 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1099 EEPROM_WAIT_READY(st, sh);
1100 }
1101
1102 /* Shift in address. */
1103 for (x = 6; x > 0; x--) {
1104 reg = EECTL_ENABLE|EECTL_EECS;
1105 if ((word + i) & (1 << (x - 1)))
1106 reg |= EECTL_EEDI;
1107 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1108 EEPROM_WAIT_READY(st, sh);
1109 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1110 EEPROM_WAIT_READY(st, sh);
1111 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1112 EEPROM_WAIT_READY(st, sh);
1113 }
1114
1115 /* Shift out data. */
1116 reg = EECTL_ENABLE|EECTL_EECS;
1117 data[i] = 0;
1118 for (x = 16; x > 0; x--) {
1119 bus_space_write_4(st, sh, EPIC_EECTL, reg|EECTL_EESK);
1120 EEPROM_WAIT_READY(st, sh);
1121 if (bus_space_read_4(st, sh, EPIC_EECTL) & EECTL_EEDO)
1122 data[i] |= (1 << (x - 1));
1123 bus_space_write_4(st, sh, EPIC_EECTL, reg);
1124 EEPROM_WAIT_READY(st, sh);
1125 }
1126
1127 /* Clear CHIP SELECT. */
1128 bus_space_write_4(st, sh, EPIC_EECTL, EECTL_ENABLE);
1129 EEPROM_WAIT_READY(st, sh);
1130 }
1131
1132 /*
1133 * Disable the EEPROM.
1134 */
1135 bus_space_write_4(st, sh, EPIC_EECTL, 0);
1136
1137 #undef EEPROM_WAIT_READY
1138 }
1139
1140 /*
1141 * Add a receive buffer to the indicated descriptor.
1142 */
1143 int
1144 epic_add_rxbuf(sc, idx)
1145 struct epic_softc *sc;
1146 int idx;
1147 {
1148 struct epic_rxdesc *rxd = &sc->sc_control_data->ecd_rxdescs[idx];
1149 struct epic_descsoft *ds = &sc->sc_rxsoft[idx];
1150 struct mbuf *m, *oldm;
1151 int error = 0;
1152
1153 oldm = ds->ds_mbuf;
1154
1155 MGETHDR(m, M_DONTWAIT, MT_DATA);
1156 if (m != NULL) {
1157 MCLGET(m, M_DONTWAIT);
1158 if ((m->m_flags & M_EXT) == 0) {
1159 error = ENOMEM;
1160 m_freem(m);
1161 if (oldm == NULL)
1162 return (error);
1163 m = oldm;
1164 m->m_data = m->m_ext.ext_buf;
1165 }
1166 } else {
1167 error = ENOMEM;
1168 if (oldm == NULL)
1169 return (error);
1170 m = oldm;
1171 m->m_data = m->m_ext.ext_buf;
1172 }
1173
1174 ds->ds_mbuf = m;
1175
1176 /*
1177 * Set up the DMA map for this receive buffer.
1178 */
1179 if (m != oldm) {
1180 if (oldm != NULL)
1181 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1182 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1183 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
1184 if (error) {
1185 printf("%s: can't load rx buffer, error = %d\n",
1186 sc->sc_dev.dv_xname, error);
1187 panic("epic_add_rxbuf"); /* XXX */
1188 }
1189 }
1190
1191 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1192 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1193
1194 /*
1195 * Move the data pointer up so that the incoming packet
1196 * will be 32-bit aligned.
1197 */
1198 m->m_data += RX_ALIGNMENT_FUDGE;
1199
1200 /*
1201 * Initialize the receive descriptor.
1202 */
1203 rxd->er_bufaddr = ds->ds_dmamap->dm_segs[0].ds_addr +
1204 RX_ALIGNMENT_FUDGE;
1205 rxd->er_buflength = m->m_ext.ext_size - RX_ALIGNMENT_FUDGE;
1206 rxd->er_control = 0;
1207 rxd->er_rxstatus = ER_RXSTAT_OWNER;
1208
1209 return (error);
1210 }
1211
1212 /*
1213 * Set the EPIC multicast hash table.
1214 */
1215 void
1216 epic_set_mchash(sc)
1217 struct epic_softc *sc;
1218 {
1219 struct ethercom *ec = &sc->sc_ethercom;
1220 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1221 struct ether_multi *enm;
1222 struct ether_multistep step;
1223 u_int8_t *cp;
1224 u_int32_t crc, mchash[4];
1225 int len;
1226 static const u_int32_t crctab[] = {
1227 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1228 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1229 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1230 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1231 };
1232
1233 /*
1234 * Set up the multicast address filter by passing all multicast
1235 * addresses through a CRC generator, and then using the high-order
1236 * 6 bits as an index into the 64 bit multicast hash table (only
1237 * the lower 16 bits of each 32 bit multicast hash register are
1238 * valid). The high order bit selects the register, while the
1239 * rest of the bits select the bit within the register.
1240 */
1241
1242 if (ifp->if_flags & IFF_PROMISC)
1243 goto allmulti;
1244
1245 #if 1 /* XXX thorpej - hardware bug in 10Mb mode */
1246 goto allmulti;
1247 #endif
1248
1249 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0;
1250
1251 ETHER_FIRST_MULTI(step, ec, enm);
1252 while (enm != NULL) {
1253 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1254 /*
1255 * We must listen to a range of multicast addresses.
1256 * For now, just accept all multicasts, rather than
1257 * trying to set only those filter bits needed to match
1258 * the range. (At this time, the only use of address
1259 * ranges is for IP multicast routing, for which the
1260 * range is big enough to require all bits set.)
1261 */
1262 goto allmulti;
1263 }
1264
1265 cp = enm->enm_addrlo;
1266 crc = 0xffffffff;
1267 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1268 crc ^= *cp++;
1269 crc = (crc >> 4) ^ crctab[crc & 0xf];
1270 crc = (crc >> 4) ^ crctab[crc & 0xf];
1271 }
1272 /* Just want the 6 most significant bits. */
1273 crc >>= 26;
1274
1275 /* Set the corresponding bit in the hash table. */
1276 mchash[crc >> 4] |= 1 << (crc & 0xf);
1277
1278 ETHER_NEXT_MULTI(step, enm);
1279 }
1280
1281 ifp->if_flags &= ~IFF_ALLMULTI;
1282 goto sethash;
1283
1284 allmulti:
1285 ifp->if_flags |= IFF_ALLMULTI;
1286 mchash[0] = mchash[1] = mchash[2] = mchash[3] = 0xffff;
1287
1288 sethash:
1289 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC0, mchash[0]);
1290 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC1, mchash[1]);
1291 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC2, mchash[2]);
1292 bus_space_write_4(sc->sc_st, sc->sc_sh, EPIC_MC3, mchash[3]);
1293 }
1294