sgec.c revision 1.18 1 /* $NetBSD: sgec.c,v 1.18 2001/11/13 13:14:44 lukem Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 * on for example the VAX 4000/300 (KA670).
35 *
36 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37 *
38 * Even though the chip is capable to use virtual addresses (read the
39 * System Page Table directly) this driver doesn't do so, and there
40 * is no benefit in doing it either in NetBSD of today.
41 *
42 * Things that is still to do:
43 * Collect statistics.
44 * Use imperfect filtering when many multicast addresses.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.18 2001/11/13 13:14:44 lukem Exp $");
49
50 #include "opt_inet.h"
51 #include "bpfilter.h"
52
53 #include <sys/param.h>
54 #include <sys/mbuf.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59
60 #include <uvm/uvm_extern.h>
61
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_dl.h>
65
66 #include <netinet/in.h>
67 #include <netinet/if_inarp.h>
68
69 #if NBPFILTER > 0
70 #include <net/bpf.h>
71 #include <net/bpfdesc.h>
72 #endif
73
74 #include <machine/bus.h>
75
76 #include <dev/ic/sgecreg.h>
77 #include <dev/ic/sgecvar.h>
78
79 static void zeinit __P((struct ze_softc *));
80 static void zestart __P((struct ifnet *));
81 static int zeioctl __P((struct ifnet *, u_long, caddr_t));
82 static int ze_add_rxbuf __P((struct ze_softc *, int));
83 static void ze_setup __P((struct ze_softc *));
84 static void zetimeout __P((struct ifnet *));
85 static int zereset __P((struct ze_softc *));
86
87 #define ZE_WCSR(csr, val) \
88 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
89 #define ZE_RCSR(csr) \
90 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
91
92 /*
93 * Interface exists: make available by filling in network interface
94 * record. System will initialize the interface when it is ready
95 * to accept packets.
96 */
97 void
98 sgec_attach(sc)
99 struct ze_softc *sc;
100 {
101 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
102 struct ze_tdes *tp;
103 struct ze_rdes *rp;
104 bus_dma_segment_t seg;
105 int i, rseg, error;
106
107 /*
108 * Allocate DMA safe memory for descriptors and setup memory.
109 */
110 if ((error = bus_dmamem_alloc(sc->sc_dmat,
111 sizeof(struct ze_cdata), PAGE_SIZE, 0, &seg, 1, &rseg,
112 BUS_DMA_NOWAIT)) != 0) {
113 printf(": unable to allocate control data, error = %d\n",
114 error);
115 goto fail_0;
116 }
117
118 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
119 sizeof(struct ze_cdata), (caddr_t *)&sc->sc_zedata,
120 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
121 printf(": unable to map control data, error = %d\n", error);
122 goto fail_1;
123 }
124
125 if ((error = bus_dmamap_create(sc->sc_dmat,
126 sizeof(struct ze_cdata), 1,
127 sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT,
128 &sc->sc_cmap)) != 0) {
129 printf(": unable to create control data DMA map, error = %d\n",
130 error);
131 goto fail_2;
132 }
133
134 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
135 sc->sc_zedata, sizeof(struct ze_cdata), NULL,
136 BUS_DMA_NOWAIT)) != 0) {
137 printf(": unable to load control data DMA map, error = %d\n",
138 error);
139 goto fail_3;
140 }
141
142 /*
143 * Zero the newly allocated memory.
144 */
145 memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
146 /*
147 * Create the transmit descriptor DMA maps.
148 */
149 for (i = 0; i < TXDESCS; i++) {
150 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
151 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
152 &sc->sc_xmtmap[i]))) {
153 printf(": unable to create tx DMA map %d, error = %d\n",
154 i, error);
155 goto fail_4;
156 }
157 }
158
159 /*
160 * Create receive buffer DMA maps.
161 */
162 for (i = 0; i < RXDESCS; i++) {
163 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
164 MCLBYTES, 0, BUS_DMA_NOWAIT,
165 &sc->sc_rcvmap[i]))) {
166 printf(": unable to create rx DMA map %d, error = %d\n",
167 i, error);
168 goto fail_5;
169 }
170 }
171 /*
172 * Pre-allocate the receive buffers.
173 */
174 for (i = 0; i < RXDESCS; i++) {
175 if ((error = ze_add_rxbuf(sc, i)) != 0) {
176 printf(": unable to allocate or map rx buffer %d\n,"
177 " error = %d\n", i, error);
178 goto fail_6;
179 }
180 }
181
182 /* For vmstat -i
183 */
184 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
185 sc->sc_dev.dv_xname, "intr");
186
187 /*
188 * Create ring loops of the buffer chains.
189 * This is only done once.
190 */
191 sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
192
193 rp = sc->sc_zedata->zc_recv;
194 rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
195 rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
196 rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
197
198 tp = sc->sc_zedata->zc_xmit;
199 tp[TXDESCS].ze_tdr = ZE_TDR_OW;
200 tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
201 tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
202
203 if (zereset(sc))
204 return;
205
206 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
207 ifp->if_softc = sc;
208 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
209 ifp->if_start = zestart;
210 ifp->if_ioctl = zeioctl;
211 ifp->if_watchdog = zetimeout;
212 IFQ_SET_READY(&ifp->if_snd);
213
214 /*
215 * Attach the interface.
216 */
217 if_attach(ifp);
218 ether_ifattach(ifp, sc->sc_enaddr);
219
220 printf("\n%s: hardware address %s\n", sc->sc_dev.dv_xname,
221 ether_sprintf(sc->sc_enaddr));
222 return;
223
224 /*
225 * Free any resources we've allocated during the failed attach
226 * attempt. Do this in reverse order and fall through.
227 */
228 fail_6:
229 for (i = 0; i < RXDESCS; i++) {
230 if (sc->sc_rxmbuf[i] != NULL) {
231 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
232 m_freem(sc->sc_rxmbuf[i]);
233 }
234 }
235 fail_5:
236 for (i = 0; i < RXDESCS; i++) {
237 if (sc->sc_xmtmap[i] != NULL)
238 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
239 }
240 fail_4:
241 for (i = 0; i < TXDESCS; i++) {
242 if (sc->sc_rcvmap[i] != NULL)
243 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
244 }
245 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
246 fail_3:
247 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
248 fail_2:
249 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_zedata,
250 sizeof(struct ze_cdata));
251 fail_1:
252 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
253 fail_0:
254 return;
255 }
256
257 /*
258 * Initialization of interface.
259 */
260 void
261 zeinit(sc)
262 struct ze_softc *sc;
263 {
264 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
265 struct ze_cdata *zc = sc->sc_zedata;
266 int i;
267
268 /*
269 * Reset the interface.
270 */
271 if (zereset(sc))
272 return;
273
274 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
275 /*
276 * Release and init transmit descriptors.
277 */
278 for (i = 0; i < TXDESCS; i++) {
279 if (sc->sc_txmbuf[i]) {
280 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
281 m_freem(sc->sc_txmbuf[i]);
282 sc->sc_txmbuf[i] = 0;
283 }
284 zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
285 }
286
287
288 /*
289 * Init receive descriptors.
290 */
291 for (i = 0; i < RXDESCS; i++)
292 zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
293 sc->sc_nextrx = 0;
294
295 ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
296 ZE_NICSR6_SR|ZE_NICSR6_DC);
297
298 ifp->if_flags |= IFF_RUNNING;
299 ifp->if_flags &= ~IFF_OACTIVE;
300
301 /*
302 * Send a setup frame.
303 * This will start the transmit machinery as well.
304 */
305 ze_setup(sc);
306
307 }
308
309 /*
310 * Start output on interface.
311 */
312 void
313 zestart(ifp)
314 struct ifnet *ifp;
315 {
316 struct ze_softc *sc = ifp->if_softc;
317 struct ze_cdata *zc = sc->sc_zedata;
318 paddr_t buffer;
319 struct mbuf *m, *m0;
320 int idx, len, i, totlen, error;
321 int old_inq = sc->sc_inq;
322 short orword;
323
324 while (sc->sc_inq < (TXDESCS - 1)) {
325
326 if (sc->sc_setup) {
327 ze_setup(sc);
328 continue;
329 }
330 idx = sc->sc_nexttx;
331 IFQ_POLL(&sc->sc_if.if_snd, m);
332 if (m == 0)
333 goto out;
334 /*
335 * Count number of mbufs in chain.
336 * Always do DMA directly from mbufs, therefore the transmit
337 * ring is really big.
338 */
339 for (m0 = m, i = 0; m0; m0 = m0->m_next)
340 if (m0->m_len)
341 i++;
342 if (i >= TXDESCS)
343 panic("zestart"); /* XXX */
344
345 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
346 ifp->if_flags |= IFF_OACTIVE;
347 goto out;
348 }
349
350 #if NBPFILTER > 0
351 if (ifp->if_bpf)
352 bpf_mtap(ifp->if_bpf, m);
353 #endif
354 /*
355 * m now points to a mbuf chain that can be loaded.
356 * Loop around and set it.
357 */
358 totlen = 0;
359 for (m0 = m; m0; m0 = m0->m_next) {
360 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
361 mtod(m0, void *), m0->m_len, 0, BUS_DMA_WRITE);
362 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
363 len = m0->m_len;
364 if (len == 0)
365 continue;
366
367 totlen += len;
368 /* Word alignment calc */
369 orword = 0;
370 if (totlen == len)
371 orword = ZE_TDES1_FS;
372 if (totlen == m->m_pkthdr.len) {
373 if (totlen < ETHER_MIN_LEN)
374 len += (ETHER_MIN_LEN - totlen);
375 orword |= ZE_TDES1_LS;
376 sc->sc_txmbuf[idx] = m;
377 }
378 zc->zc_xmit[idx].ze_bufsize = len;
379 zc->zc_xmit[idx].ze_bufaddr = (char *)buffer;
380 zc->zc_xmit[idx].ze_tdes1 = orword | ZE_TDES1_IC;
381 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
382
383 if (++idx == TXDESCS)
384 idx = 0;
385 sc->sc_inq++;
386 }
387 IFQ_DEQUEUE(&ifp->if_snd, m);
388 #ifdef DIAGNOSTIC
389 if (totlen != m->m_pkthdr.len)
390 panic("zestart: len fault");
391 #endif
392
393 /*
394 * Kick off the transmit logic, if it is stopped.
395 */
396 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
397 ZE_WCSR(ZE_CSR1, -1);
398 sc->sc_nexttx = idx;
399 }
400 if (sc->sc_inq == (TXDESCS - 1))
401 ifp->if_flags |= IFF_OACTIVE;
402
403 out: if (old_inq < sc->sc_inq)
404 ifp->if_timer = 5; /* If transmit logic dies */
405 }
406
407 int
408 sgec_intr(sc)
409 struct ze_softc *sc;
410 {
411 struct ze_cdata *zc = sc->sc_zedata;
412 struct ifnet *ifp = &sc->sc_if;
413 struct mbuf *m;
414 int csr, len;
415
416 csr = ZE_RCSR(ZE_CSR5);
417 if ((csr & ZE_NICSR5_IS) == 0) /* Wasn't we */
418 return 0;
419 ZE_WCSR(ZE_CSR5, csr);
420
421 if (csr & ZE_NICSR5_RI)
422 while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
423 ZE_FRAMELEN_OW) == 0) {
424
425 ifp->if_ipackets++;
426 m = sc->sc_rxmbuf[sc->sc_nextrx];
427 len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
428 ze_add_rxbuf(sc, sc->sc_nextrx);
429 m->m_pkthdr.rcvif = ifp;
430 m->m_pkthdr.len = m->m_len = len;
431 m->m_flags |= M_HASFCS;
432 if (++sc->sc_nextrx == RXDESCS)
433 sc->sc_nextrx = 0;
434 #if NBPFILTER > 0
435 if (ifp->if_bpf)
436 bpf_mtap(ifp->if_bpf, m);
437 #endif
438 (*ifp->if_input)(ifp, m);
439 }
440
441 if (csr & ZE_NICSR5_TI) {
442 while ((zc->zc_xmit[sc->sc_lastack].ze_tdr & ZE_TDR_OW) == 0) {
443 int idx = sc->sc_lastack;
444
445 if (sc->sc_lastack == sc->sc_nexttx)
446 break;
447 sc->sc_inq--;
448 if (++sc->sc_lastack == TXDESCS)
449 sc->sc_lastack = 0;
450
451 if ((zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_DT) ==
452 ZE_TDES1_DT_SETUP)
453 continue;
454 /* XXX collect statistics */
455 if (zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_LS)
456 ifp->if_opackets++;
457 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
458 if (sc->sc_txmbuf[idx]) {
459 m_freem(sc->sc_txmbuf[idx]);
460 sc->sc_txmbuf[idx] = 0;
461 }
462 }
463 if (sc->sc_inq == 0)
464 ifp->if_timer = 0;
465 ifp->if_flags &= ~IFF_OACTIVE;
466 zestart(ifp); /* Put in more in queue */
467 }
468 return 1;
469 }
470
471 /*
472 * Process an ioctl request.
473 */
474 int
475 zeioctl(ifp, cmd, data)
476 struct ifnet *ifp;
477 u_long cmd;
478 caddr_t data;
479 {
480 struct ze_softc *sc = ifp->if_softc;
481 struct ifreq *ifr = (struct ifreq *)data;
482 struct ifaddr *ifa = (struct ifaddr *)data;
483 int s = splnet(), error = 0;
484
485 switch (cmd) {
486
487 case SIOCSIFADDR:
488 ifp->if_flags |= IFF_UP;
489 switch(ifa->ifa_addr->sa_family) {
490 #ifdef INET
491 case AF_INET:
492 zeinit(sc);
493 arp_ifinit(ifp, ifa);
494 break;
495 #endif
496 }
497 break;
498
499 case SIOCSIFFLAGS:
500 if ((ifp->if_flags & IFF_UP) == 0 &&
501 (ifp->if_flags & IFF_RUNNING) != 0) {
502 /*
503 * If interface is marked down and it is running,
504 * stop it. (by disabling receive mechanism).
505 */
506 ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
507 ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
508 ifp->if_flags &= ~IFF_RUNNING;
509 } else if ((ifp->if_flags & IFF_UP) != 0 &&
510 (ifp->if_flags & IFF_RUNNING) == 0) {
511 /*
512 * If interface it marked up and it is stopped, then
513 * start it.
514 */
515 zeinit(sc);
516 } else if ((ifp->if_flags & IFF_UP) != 0) {
517 /*
518 * Send a new setup packet to match any new changes.
519 * (Like IFF_PROMISC etc)
520 */
521 ze_setup(sc);
522 }
523 break;
524
525 case SIOCADDMULTI:
526 case SIOCDELMULTI:
527 /*
528 * Update our multicast list.
529 */
530 error = (cmd == SIOCADDMULTI) ?
531 ether_addmulti(ifr, &sc->sc_ec):
532 ether_delmulti(ifr, &sc->sc_ec);
533
534 if (error == ENETRESET) {
535 /*
536 * Multicast list has changed; set the hardware filter
537 * accordingly.
538 */
539 ze_setup(sc);
540 error = 0;
541 }
542 break;
543
544 default:
545 error = EINVAL;
546
547 }
548 splx(s);
549 return (error);
550 }
551
552 /*
553 * Add a receive buffer to the indicated descriptor.
554 */
555 int
556 ze_add_rxbuf(sc, i)
557 struct ze_softc *sc;
558 int i;
559 {
560 struct mbuf *m;
561 struct ze_rdes *rp;
562 int error;
563
564 MGETHDR(m, M_DONTWAIT, MT_DATA);
565 if (m == NULL)
566 return (ENOBUFS);
567
568 MCLGET(m, M_DONTWAIT);
569 if ((m->m_flags & M_EXT) == 0) {
570 m_freem(m);
571 return (ENOBUFS);
572 }
573
574 if (sc->sc_rxmbuf[i] != NULL)
575 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
576
577 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
578 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
579 BUS_DMA_READ|BUS_DMA_NOWAIT);
580 if (error)
581 panic("%s: can't load rx DMA map %d, error = %d\n",
582 sc->sc_dev.dv_xname, i, error);
583 sc->sc_rxmbuf[i] = m;
584
585 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
586 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
587
588 /*
589 * We know that the mbuf cluster is page aligned. Also, be sure
590 * that the IP header will be longword aligned.
591 */
592 m->m_data += 2;
593 rp = &sc->sc_zedata->zc_recv[i];
594 rp->ze_bufsize = (m->m_ext.ext_size - 2);
595 rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
596 rp->ze_framelen = ZE_FRAMELEN_OW;
597
598 return (0);
599 }
600
601 /*
602 * Create a setup packet and put in queue for sending.
603 */
604 void
605 ze_setup(sc)
606 struct ze_softc *sc;
607 {
608 struct ether_multi *enm;
609 struct ether_multistep step;
610 struct ze_cdata *zc = sc->sc_zedata;
611 struct ifnet *ifp = &sc->sc_if;
612 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
613 int j, idx, reg;
614
615 if (sc->sc_inq == (TXDESCS - 1)) {
616 sc->sc_setup = 1;
617 return;
618 }
619 sc->sc_setup = 0;
620 /*
621 * Init the setup packet with valid info.
622 */
623 memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
624 memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
625
626 /*
627 * Multicast handling. The SGEC can handle up to 16 direct
628 * ethernet addresses.
629 */
630 j = 16;
631 ifp->if_flags &= ~IFF_ALLMULTI;
632 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
633 while (enm != NULL) {
634 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
635 ifp->if_flags |= IFF_ALLMULTI;
636 break;
637 }
638 memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
639 j += 8;
640 ETHER_NEXT_MULTI(step, enm);
641 if ((enm != NULL)&& (j == 128)) {
642 ifp->if_flags |= IFF_ALLMULTI;
643 break;
644 }
645 }
646
647 /*
648 * ALLMULTI implies PROMISC in this driver.
649 */
650 if (ifp->if_flags & IFF_ALLMULTI)
651 ifp->if_flags |= IFF_PROMISC;
652 else if (ifp->if_pcount == 0)
653 ifp->if_flags &= ~IFF_PROMISC;
654
655 /*
656 * Fiddle with the receive logic.
657 */
658 reg = ZE_RCSR(ZE_CSR6);
659 DELAY(10);
660 ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
661 reg &= ~ZE_NICSR6_AF;
662 if (ifp->if_flags & IFF_PROMISC)
663 reg |= ZE_NICSR6_AF_PROM;
664 else if (ifp->if_flags & IFF_ALLMULTI)
665 reg |= ZE_NICSR6_AF_ALLM;
666 DELAY(10);
667 ZE_WCSR(ZE_CSR6, reg);
668 /*
669 * Only send a setup packet if needed.
670 */
671 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
672 idx = sc->sc_nexttx;
673 zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
674 zc->zc_xmit[idx].ze_bufsize = 128;
675 zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
676 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
677
678 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
679 ZE_WCSR(ZE_CSR1, -1);
680
681 sc->sc_inq++;
682 if (++sc->sc_nexttx == TXDESCS)
683 sc->sc_nexttx = 0;
684 }
685 }
686
687 /*
688 * Check for dead transmit logic.
689 */
690 void
691 zetimeout(ifp)
692 struct ifnet *ifp;
693 {
694 struct ze_softc *sc = ifp->if_softc;
695
696 if (sc->sc_inq == 0)
697 return;
698
699 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
700 /*
701 * Do a reset of interface, to get it going again.
702 * Will it work by just restart the transmit logic?
703 */
704 zeinit(sc);
705 }
706
707 /*
708 * Reset chip:
709 * Set/reset the reset flag.
710 * Write interrupt vector.
711 * Write ring buffer addresses.
712 * Write SBR.
713 */
714 int
715 zereset(sc)
716 struct ze_softc *sc;
717 {
718 int reg, i;
719
720 ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
721 DELAY(50000);
722 if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
723 printf("%s: selftest failed\n", sc->sc_dev.dv_xname);
724 return 1;
725 }
726
727 /*
728 * Get the vector that were set at match time, and remember it.
729 * WHICH VECTOR TO USE? Take one unused. XXX
730 * Funny way to set vector described in the programmers manual.
731 */
732 reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
733 i = 10;
734 do {
735 if (i-- == 0) {
736 printf("Failing SGEC CSR0 init\n");
737 return 1;
738 }
739 ZE_WCSR(ZE_CSR0, reg);
740 } while (ZE_RCSR(ZE_CSR0) != reg);
741
742 ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
743 ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
744 return 0;
745 }
746