sgec.c revision 1.5 1 /* $NetBSD: sgec.c,v 1.5 2000/06/04 02:14:14 matt Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 * on for example the VAX 4000/300 (KA670).
35 *
36 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37 *
38 * Even though the chip is capable to use virtual addresses (read the
39 * System Page Table directly) this driver doesn't do so, and there
40 * is no benefit in doing it either in NetBSD of today.
41 *
42 * Things that is still to do:
43 * Collect statistics.
44 * Use imperfect filtering when many multicast addresses.
45 */
46
47 #include "opt_inet.h"
48 #include "bpfilter.h"
49
50 #include <sys/param.h>
51 #include <sys/mbuf.h>
52 #include <sys/socket.h>
53 #include <sys/device.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56
57 #include <net/if.h>
58 #include <net/if_ether.h>
59 #include <net/if_dl.h>
60
61 #include <netinet/in.h>
62 #include <netinet/if_inarp.h>
63
64 #if NBPFILTER > 0
65 #include <net/bpf.h>
66 #include <net/bpfdesc.h>
67 #endif
68
69 #include <machine/bus.h>
70
71 #include <dev/ic/sgecreg.h>
72 #include <dev/ic/sgecvar.h>
73
74 static void zeinit __P((struct ze_softc *));
75 static void zestart __P((struct ifnet *));
76 static int zeioctl __P((struct ifnet *, u_long, caddr_t));
77 static int ze_add_rxbuf __P((struct ze_softc *, int));
78 static void ze_setup __P((struct ze_softc *));
79 static void zetimeout __P((struct ifnet *));
80 static int zereset __P((struct ze_softc *));
81
82 #define ZE_WCSR(csr, val) \
83 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
84 #define ZE_RCSR(csr) \
85 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
86
87 /*
88 * Interface exists: make available by filling in network interface
89 * record. System will initialize the interface when it is ready
90 * to accept packets.
91 */
92 void
93 sgec_attach(sc)
94 struct ze_softc *sc;
95 {
96 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
97 struct ze_tdes *tp;
98 struct ze_rdes *rp;
99 bus_dma_segment_t seg;
100 int i, rseg, error;
101
102 /*
103 * Allocate DMA safe memory for descriptors and setup memory.
104 */
105 if ((error = bus_dmamem_alloc(sc->sc_dmat,
106 sizeof(struct ze_cdata), NBPG, 0, &seg, 1, &rseg,
107 BUS_DMA_NOWAIT)) != 0) {
108 printf(": unable to allocate control data, error = %d\n",
109 error);
110 goto fail_0;
111 }
112
113 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
114 sizeof(struct ze_cdata), (caddr_t *)&sc->sc_zedata,
115 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
116 printf(": unable to map control data, error = %d\n", error);
117 goto fail_1;
118 }
119
120 if ((error = bus_dmamap_create(sc->sc_dmat,
121 sizeof(struct ze_cdata), 1,
122 sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT,
123 &sc->sc_cmap)) != 0) {
124 printf(": unable to create control data DMA map, error = %d\n",
125 error);
126 goto fail_2;
127 }
128
129 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
130 sc->sc_zedata, sizeof(struct ze_cdata), NULL,
131 BUS_DMA_NOWAIT)) != 0) {
132 printf(": unable to load control data DMA map, error = %d\n",
133 error);
134 goto fail_3;
135 }
136
137 /*
138 * Zero the newly allocated memory.
139 */
140 bzero(sc->sc_zedata, sizeof(struct ze_cdata));
141 /*
142 * Create the transmit descriptor DMA maps.
143 */
144 for (i = 0; i < TXDESCS; i++) {
145 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
146 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
147 &sc->sc_xmtmap[i]))) {
148 printf(": unable to create tx DMA map %d, error = %d\n",
149 i, error);
150 goto fail_4;
151 }
152 }
153
154 /*
155 * Create receive buffer DMA maps.
156 */
157 for (i = 0; i < RXDESCS; i++) {
158 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
159 MCLBYTES, 0, BUS_DMA_NOWAIT,
160 &sc->sc_rcvmap[i]))) {
161 printf(": unable to create rx DMA map %d, error = %d\n",
162 i, error);
163 goto fail_5;
164 }
165 }
166 /*
167 * Pre-allocate the receive buffers.
168 */
169 for (i = 0; i < RXDESCS; i++) {
170 if ((error = ze_add_rxbuf(sc, i)) != 0) {
171 printf(": unable to allocate or map rx buffer %d\n,"
172 " error = %d\n", i, error);
173 goto fail_6;
174 }
175 }
176
177 /* For vmstat -i
178 */
179 evcnt_attach(&sc->sc_dev, "intr", &sc->sc_intrcnt);
180
181 /*
182 * Create ring loops of the buffer chains.
183 * This is only done once.
184 */
185 sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
186
187 rp = sc->sc_zedata->zc_recv;
188 rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
189 rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
190 rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
191
192 tp = sc->sc_zedata->zc_xmit;
193 tp[TXDESCS].ze_tdr = ZE_TDR_OW;
194 tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
195 tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
196
197 if (zereset(sc))
198 return;
199
200 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
201 ifp->if_softc = sc;
202 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
203 ifp->if_start = zestart;
204 ifp->if_ioctl = zeioctl;
205 ifp->if_watchdog = zetimeout;
206
207 /*
208 * Attach the interface.
209 */
210 if_attach(ifp);
211 ether_ifattach(ifp, sc->sc_enaddr);
212
213 #if NBPFILTER > 0
214 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
215 #endif
216 printf("\n%s: hardware address %s\n", sc->sc_dev.dv_xname,
217 ether_sprintf(sc->sc_enaddr));
218 return;
219
220 /*
221 * Free any resources we've allocated during the failed attach
222 * attempt. Do this in reverse order and fall through.
223 */
224 fail_6:
225 for (i = 0; i < RXDESCS; i++) {
226 if (sc->sc_rxmbuf[i] != NULL) {
227 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
228 m_freem(sc->sc_rxmbuf[i]);
229 }
230 }
231 fail_5:
232 for (i = 0; i < RXDESCS; i++) {
233 if (sc->sc_xmtmap[i] != NULL)
234 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
235 }
236 fail_4:
237 for (i = 0; i < TXDESCS; i++) {
238 if (sc->sc_rcvmap[i] != NULL)
239 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
240 }
241 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
242 fail_3:
243 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
244 fail_2:
245 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_zedata,
246 sizeof(struct ze_cdata));
247 fail_1:
248 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
249 fail_0:
250 return;
251 }
252
253 /*
254 * Initialization of interface.
255 */
256 void
257 zeinit(sc)
258 struct ze_softc *sc;
259 {
260 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
261 struct ze_cdata *zc = sc->sc_zedata;
262 int i;
263
264 /*
265 * Reset the interface.
266 */
267 if (zereset(sc))
268 return;
269
270 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
271 /*
272 * Release and init transmit descriptors.
273 */
274 for (i = 0; i < TXDESCS; i++) {
275 if (sc->sc_txmbuf[i]) {
276 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
277 m_freem(sc->sc_txmbuf[i]);
278 sc->sc_txmbuf[i] = 0;
279 }
280 zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
281 }
282
283
284 /*
285 * Init receive descriptors.
286 */
287 for (i = 0; i < RXDESCS; i++)
288 zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
289 sc->sc_nextrx = 0;
290
291 ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
292 ZE_NICSR6_SR|ZE_NICSR6_DC);
293
294 ifp->if_flags |= IFF_RUNNING;
295 ifp->if_flags &= ~IFF_OACTIVE;
296
297 /*
298 * Send a setup frame.
299 * This will start the transmit machinery as well.
300 */
301 ze_setup(sc);
302
303 }
304
305 /*
306 * Start output on interface.
307 */
308 void
309 zestart(ifp)
310 struct ifnet *ifp;
311 {
312 struct ze_softc *sc = ifp->if_softc;
313 struct ze_cdata *zc = sc->sc_zedata;
314 paddr_t buffer;
315 struct mbuf *m, *m0;
316 int idx, len, s, i, totlen, error;
317 int old_inq = sc->sc_inq;
318 short orword;
319
320 s = splimp();
321 while (sc->sc_inq < (TXDESCS - 1)) {
322
323 if (sc->sc_setup) {
324 ze_setup(sc);
325 continue;
326 }
327 idx = sc->sc_nexttx;
328 IF_DEQUEUE(&sc->sc_if.if_snd, m);
329 if (m == 0)
330 goto out;
331 /*
332 * Count number of mbufs in chain.
333 * Always do DMA directly from mbufs, therefore the transmit
334 * ring is really big.
335 */
336 for (m0 = m, i = 0; m0; m0 = m0->m_next)
337 if (m0->m_len)
338 i++;
339 if (i >= TXDESCS)
340 panic("zestart"); /* XXX */
341
342 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
343 IF_PREPEND(&sc->sc_if.if_snd, m);
344 ifp->if_flags |= IFF_OACTIVE;
345 goto out;
346 }
347
348 #if NBPFILTER > 0
349 if (ifp->if_bpf)
350 bpf_mtap(ifp->if_bpf, m);
351 #endif
352 /*
353 * m now points to a mbuf chain that can be loaded.
354 * Loop around and set it.
355 */
356 totlen = 0;
357 for (m0 = m; m0; m0 = m0->m_next) {
358 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
359 mtod(m0, void *), m0->m_len, 0, 0);
360 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
361 len = m0->m_len;
362 if (len == 0)
363 continue;
364
365 totlen += len;
366 /* Word alignment calc */
367 orword = 0;
368 if (totlen == len)
369 orword = ZE_TDES1_FS;
370 if (totlen == m->m_pkthdr.len) {
371 if (totlen < ETHER_MIN_LEN)
372 len += (ETHER_MIN_LEN - totlen);
373 orword |= ZE_TDES1_LS;
374 sc->sc_txmbuf[idx] = m;
375 }
376 zc->zc_xmit[idx].ze_bufsize = len;
377 zc->zc_xmit[idx].ze_bufaddr = (char *)buffer;
378 zc->zc_xmit[idx].ze_tdes1 = orword | ZE_TDES1_IC;
379 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
380
381 if (++idx == TXDESCS)
382 idx = 0;
383 sc->sc_inq++;
384 }
385 #ifdef DIAGNOSTIC
386 if (totlen != m->m_pkthdr.len)
387 panic("zestart: len fault");
388 #endif
389
390 /*
391 * Kick off the transmit logic, if it is stopped.
392 */
393 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
394 ZE_WCSR(ZE_CSR1, -1);
395 sc->sc_nexttx = idx;
396 }
397 if (sc->sc_inq == (TXDESCS - 1))
398 ifp->if_flags |= IFF_OACTIVE;
399
400 out: if (old_inq < sc->sc_inq)
401 ifp->if_timer = 5; /* If transmit logic dies */
402 splx(s);
403 }
404
405 int
406 sgec_intr(sc)
407 struct ze_softc *sc;
408 {
409 struct ze_cdata *zc = sc->sc_zedata;
410 struct ifnet *ifp = &sc->sc_if;
411 struct ether_header *eh;
412 struct mbuf *m;
413 int csr, len;
414
415 csr = ZE_RCSR(ZE_CSR5);
416 if ((csr & ZE_NICSR5_IS) == 0) /* Wasn't we */
417 return 0;
418 ZE_WCSR(ZE_CSR5, csr);
419
420 if (csr & ZE_NICSR5_RI)
421 while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
422 ZE_FRAMELEN_OW) == 0) {
423
424 ifp->if_ipackets++;
425 m = sc->sc_rxmbuf[sc->sc_nextrx];
426 len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
427 ze_add_rxbuf(sc, sc->sc_nextrx);
428 m->m_pkthdr.rcvif = ifp;
429 m->m_pkthdr.len = m->m_len = len;
430 if (++sc->sc_nextrx == RXDESCS)
431 sc->sc_nextrx = 0;
432 eh = mtod(m, struct ether_header *);
433 #if NBPFILTER > 0
434 if (ifp->if_bpf) {
435 bpf_mtap(ifp->if_bpf, m);
436 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
437 ((eh->ether_dhost[0] & 1) == 0) &&
438 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
439 ETHER_ADDR_LEN) != 0) {
440 m_freem(m);
441 continue;
442 }
443 }
444 #endif
445 /*
446 * ALLMULTI means PROMISC in this driver.
447 */
448 if ((ifp->if_flags & IFF_ALLMULTI) &&
449 ((eh->ether_dhost[0] & 1) == 0) &&
450 bcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
451 ETHER_ADDR_LEN)) {
452 m_freem(m);
453 continue;
454 }
455 (*ifp->if_input)(ifp, m);
456 }
457
458 if (csr & ZE_NICSR5_TI) {
459 while ((zc->zc_xmit[sc->sc_lastack].ze_tdr & ZE_TDR_OW) == 0) {
460 int idx = sc->sc_lastack;
461
462 if (sc->sc_lastack == sc->sc_nexttx)
463 break;
464 sc->sc_inq--;
465 if (++sc->sc_lastack == TXDESCS)
466 sc->sc_lastack = 0;
467
468 if ((zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_DT) ==
469 ZE_TDES1_DT_SETUP)
470 continue;
471 /* XXX collect statistics */
472 if (zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_LS)
473 ifp->if_opackets++;
474 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
475 if (sc->sc_txmbuf[idx]) {
476 m_freem(sc->sc_txmbuf[idx]);
477 sc->sc_txmbuf[idx] = 0;
478 }
479 }
480 if (sc->sc_inq == 0)
481 ifp->if_timer = 0;
482 ifp->if_flags &= ~IFF_OACTIVE;
483 zestart(ifp); /* Put in more in queue */
484 }
485 return 1;
486 }
487
488 /*
489 * Process an ioctl request.
490 */
491 int
492 zeioctl(ifp, cmd, data)
493 struct ifnet *ifp;
494 u_long cmd;
495 caddr_t data;
496 {
497 struct ze_softc *sc = ifp->if_softc;
498 struct ifreq *ifr = (struct ifreq *)data;
499 struct ifaddr *ifa = (struct ifaddr *)data;
500 int s = splnet(), error = 0;
501
502 switch (cmd) {
503
504 case SIOCSIFADDR:
505 ifp->if_flags |= IFF_UP;
506 switch(ifa->ifa_addr->sa_family) {
507 #ifdef INET
508 case AF_INET:
509 zeinit(sc);
510 arp_ifinit(ifp, ifa);
511 break;
512 #endif
513 }
514 break;
515
516 case SIOCSIFFLAGS:
517 if ((ifp->if_flags & IFF_UP) == 0 &&
518 (ifp->if_flags & IFF_RUNNING) != 0) {
519 /*
520 * If interface is marked down and it is running,
521 * stop it. (by disabling receive mechanism).
522 */
523 ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
524 ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
525 ifp->if_flags &= ~IFF_RUNNING;
526 } else if ((ifp->if_flags & IFF_UP) != 0 &&
527 (ifp->if_flags & IFF_RUNNING) == 0) {
528 /*
529 * If interface it marked up and it is stopped, then
530 * start it.
531 */
532 zeinit(sc);
533 } else if ((ifp->if_flags & IFF_UP) != 0) {
534 /*
535 * Send a new setup packet to match any new changes.
536 * (Like IFF_PROMISC etc)
537 */
538 ze_setup(sc);
539 }
540 break;
541
542 case SIOCADDMULTI:
543 case SIOCDELMULTI:
544 /*
545 * Update our multicast list.
546 */
547 error = (cmd == SIOCADDMULTI) ?
548 ether_addmulti(ifr, &sc->sc_ec):
549 ether_delmulti(ifr, &sc->sc_ec);
550
551 if (error == ENETRESET) {
552 /*
553 * Multicast list has changed; set the hardware filter
554 * accordingly.
555 */
556 ze_setup(sc);
557 error = 0;
558 }
559 break;
560
561 default:
562 error = EINVAL;
563
564 }
565 splx(s);
566 return (error);
567 }
568
569 /*
570 * Add a receive buffer to the indicated descriptor.
571 */
572 int
573 ze_add_rxbuf(sc, i)
574 struct ze_softc *sc;
575 int i;
576 {
577 struct mbuf *m;
578 struct ze_rdes *rp;
579 int error;
580
581 MGETHDR(m, M_DONTWAIT, MT_DATA);
582 if (m == NULL)
583 return (ENOBUFS);
584
585 MCLGET(m, M_DONTWAIT);
586 if ((m->m_flags & M_EXT) == 0) {
587 m_freem(m);
588 return (ENOBUFS);
589 }
590
591 if (sc->sc_rxmbuf[i] != NULL)
592 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
593
594 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
595 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
596 if (error)
597 panic("%s: can't load rx DMA map %d, error = %d\n",
598 sc->sc_dev.dv_xname, i, error);
599 sc->sc_rxmbuf[i] = m;
600
601 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
602 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
603
604 /*
605 * We know that the mbuf cluster is page aligned. Also, be sure
606 * that the IP header will be longword aligned.
607 */
608 m->m_data += 2;
609 rp = &sc->sc_zedata->zc_recv[i];
610 rp->ze_bufsize = (m->m_ext.ext_size - 2);
611 rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
612 rp->ze_framelen = ZE_FRAMELEN_OW;
613
614 return (0);
615 }
616
617 /*
618 * Create a setup packet and put in queue for sending.
619 */
620 void
621 ze_setup(sc)
622 struct ze_softc *sc;
623 {
624 struct ether_multi *enm;
625 struct ether_multistep step;
626 struct ze_cdata *zc = sc->sc_zedata;
627 struct ifnet *ifp = &sc->sc_if;
628 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
629 int j, idx, s, reg;
630
631 s = splimp();
632 if (sc->sc_inq == (TXDESCS - 1)) {
633 sc->sc_setup = 1;
634 splx(s);
635 return;
636 }
637 sc->sc_setup = 0;
638 /*
639 * Init the setup packet with valid info.
640 */
641 memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
642 bcopy(enaddr, zc->zc_setup, ETHER_ADDR_LEN);
643
644 /*
645 * Multicast handling. The SGEC can handle up to 16 direct
646 * ethernet addresses.
647 */
648 j = 16;
649 ifp->if_flags &= ~IFF_ALLMULTI;
650 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
651 while (enm != NULL) {
652 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
653 ifp->if_flags |= IFF_ALLMULTI;
654 break;
655 }
656 bcopy(enm->enm_addrlo, &zc->zc_setup[j], ETHER_ADDR_LEN);
657 j += 8;
658 ETHER_NEXT_MULTI(step, enm);
659 if ((enm != NULL)&& (j == 128)) {
660 ifp->if_flags |= IFF_ALLMULTI;
661 break;
662 }
663 }
664
665 /*
666 * Fiddle with the receive logic.
667 */
668 reg = ZE_RCSR(ZE_CSR6);
669 DELAY(10);
670 ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
671 reg &= ~ZE_NICSR6_AF;
672 if (ifp->if_flags & IFF_PROMISC)
673 reg |= ZE_NICSR6_AF_PROM;
674 else if (ifp->if_flags & IFF_ALLMULTI)
675 reg |= ZE_NICSR6_AF_ALLM;
676 DELAY(10);
677 ZE_WCSR(ZE_CSR6, reg);
678 /*
679 * Only send a setup packet if needed.
680 */
681 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
682 idx = sc->sc_nexttx;
683 zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
684 zc->zc_xmit[idx].ze_bufsize = 128;
685 zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
686 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
687
688 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
689 ZE_WCSR(ZE_CSR1, -1);
690
691 sc->sc_inq++;
692 if (++sc->sc_nexttx == TXDESCS)
693 sc->sc_nexttx = 0;
694 }
695 splx(s);
696 }
697
698 /*
699 * Check for dead transmit logic.
700 */
701 void
702 zetimeout(ifp)
703 struct ifnet *ifp;
704 {
705 struct ze_softc *sc = ifp->if_softc;
706
707 if (sc->sc_inq == 0)
708 return;
709
710 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
711 /*
712 * Do a reset of interface, to get it going again.
713 * Will it work by just restart the transmit logic?
714 */
715 zeinit(sc);
716 }
717
718 /*
719 * Reset chip:
720 * Set/reset the reset flag.
721 * Write interrupt vector.
722 * Write ring buffer addresses.
723 * Write SBR.
724 */
725 int
726 zereset(sc)
727 struct ze_softc *sc;
728 {
729 int reg, i, s;
730
731 ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
732 DELAY(50000);
733 if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
734 printf("%s: selftest failed\n", sc->sc_dev.dv_xname);
735 return 1;
736 }
737
738 /*
739 * Get the vector that were set at match time, and remember it.
740 * WHICH VECTOR TO USE? Take one unused. XXX
741 * Funny way to set vector described in the programmers manual.
742 */
743 reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
744 i = 10;
745 s = splimp();
746 do {
747 if (i-- == 0) {
748 printf("Failing SGEC CSR0 init\n");
749 splx(s);
750 return 1;
751 }
752 ZE_WCSR(ZE_CSR0, reg);
753 } while (ZE_RCSR(ZE_CSR0) != reg);
754 splx(s);
755
756 ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
757 ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
758 return 0;
759 }
760