sgec.c revision 1.11 1 /* $NetBSD: sgec.c,v 1.11 2000/12/14 06:27:26 thorpej Exp $ */
2 /*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed at Ludd, University of
16 * Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 * on for example the VAX 4000/300 (KA670).
35 *
36 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37 *
38 * Even though the chip is capable to use virtual addresses (read the
39 * System Page Table directly) this driver doesn't do so, and there
40 * is no benefit in doing it either in NetBSD of today.
41 *
42 * Things that is still to do:
43 * Collect statistics.
44 * Use imperfect filtering when many multicast addresses.
45 */
46
47 #include "opt_inet.h"
48 #include "bpfilter.h"
49
50 #include <sys/param.h>
51 #include <sys/mbuf.h>
52 #include <sys/socket.h>
53 #include <sys/device.h>
54 #include <sys/systm.h>
55 #include <sys/sockio.h>
56
57 #include <uvm/uvm_extern.h>
58
59 #include <net/if.h>
60 #include <net/if_ether.h>
61 #include <net/if_dl.h>
62
63 #include <netinet/in.h>
64 #include <netinet/if_inarp.h>
65
66 #if NBPFILTER > 0
67 #include <net/bpf.h>
68 #include <net/bpfdesc.h>
69 #endif
70
71 #include <machine/bus.h>
72
73 #include <dev/ic/sgecreg.h>
74 #include <dev/ic/sgecvar.h>
75
76 static void zeinit __P((struct ze_softc *));
77 static void zestart __P((struct ifnet *));
78 static int zeioctl __P((struct ifnet *, u_long, caddr_t));
79 static int ze_add_rxbuf __P((struct ze_softc *, int));
80 static void ze_setup __P((struct ze_softc *));
81 static void zetimeout __P((struct ifnet *));
82 static int zereset __P((struct ze_softc *));
83
84 #define ZE_WCSR(csr, val) \
85 bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
86 #define ZE_RCSR(csr) \
87 bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
88
89 /*
90 * Interface exists: make available by filling in network interface
91 * record. System will initialize the interface when it is ready
92 * to accept packets.
93 */
94 void
95 sgec_attach(sc)
96 struct ze_softc *sc;
97 {
98 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
99 struct ze_tdes *tp;
100 struct ze_rdes *rp;
101 bus_dma_segment_t seg;
102 int i, rseg, error;
103
104 /*
105 * Allocate DMA safe memory for descriptors and setup memory.
106 */
107 if ((error = bus_dmamem_alloc(sc->sc_dmat,
108 sizeof(struct ze_cdata), PAGE_SIZE, 0, &seg, 1, &rseg,
109 BUS_DMA_NOWAIT)) != 0) {
110 printf(": unable to allocate control data, error = %d\n",
111 error);
112 goto fail_0;
113 }
114
115 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
116 sizeof(struct ze_cdata), (caddr_t *)&sc->sc_zedata,
117 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
118 printf(": unable to map control data, error = %d\n", error);
119 goto fail_1;
120 }
121
122 if ((error = bus_dmamap_create(sc->sc_dmat,
123 sizeof(struct ze_cdata), 1,
124 sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT,
125 &sc->sc_cmap)) != 0) {
126 printf(": unable to create control data DMA map, error = %d\n",
127 error);
128 goto fail_2;
129 }
130
131 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
132 sc->sc_zedata, sizeof(struct ze_cdata), NULL,
133 BUS_DMA_NOWAIT)) != 0) {
134 printf(": unable to load control data DMA map, error = %d\n",
135 error);
136 goto fail_3;
137 }
138
139 /*
140 * Zero the newly allocated memory.
141 */
142 bzero(sc->sc_zedata, sizeof(struct ze_cdata));
143 /*
144 * Create the transmit descriptor DMA maps.
145 */
146 for (i = 0; i < TXDESCS; i++) {
147 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
148 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
149 &sc->sc_xmtmap[i]))) {
150 printf(": unable to create tx DMA map %d, error = %d\n",
151 i, error);
152 goto fail_4;
153 }
154 }
155
156 /*
157 * Create receive buffer DMA maps.
158 */
159 for (i = 0; i < RXDESCS; i++) {
160 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
161 MCLBYTES, 0, BUS_DMA_NOWAIT,
162 &sc->sc_rcvmap[i]))) {
163 printf(": unable to create rx DMA map %d, error = %d\n",
164 i, error);
165 goto fail_5;
166 }
167 }
168 /*
169 * Pre-allocate the receive buffers.
170 */
171 for (i = 0; i < RXDESCS; i++) {
172 if ((error = ze_add_rxbuf(sc, i)) != 0) {
173 printf(": unable to allocate or map rx buffer %d\n,"
174 " error = %d\n", i, error);
175 goto fail_6;
176 }
177 }
178
179 /* For vmstat -i
180 */
181 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
182 sc->sc_dev.dv_xname, "intr");
183
184 /*
185 * Create ring loops of the buffer chains.
186 * This is only done once.
187 */
188 sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
189
190 rp = sc->sc_zedata->zc_recv;
191 rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
192 rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
193 rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
194
195 tp = sc->sc_zedata->zc_xmit;
196 tp[TXDESCS].ze_tdr = ZE_TDR_OW;
197 tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
198 tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
199
200 if (zereset(sc))
201 return;
202
203 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
204 ifp->if_softc = sc;
205 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
206 ifp->if_start = zestart;
207 ifp->if_ioctl = zeioctl;
208 ifp->if_watchdog = zetimeout;
209 IFQ_SET_READY(&ifp->if_snd);
210
211 /*
212 * Attach the interface.
213 */
214 if_attach(ifp);
215 ether_ifattach(ifp, sc->sc_enaddr);
216
217 printf("\n%s: hardware address %s\n", sc->sc_dev.dv_xname,
218 ether_sprintf(sc->sc_enaddr));
219 return;
220
221 /*
222 * Free any resources we've allocated during the failed attach
223 * attempt. Do this in reverse order and fall through.
224 */
225 fail_6:
226 for (i = 0; i < RXDESCS; i++) {
227 if (sc->sc_rxmbuf[i] != NULL) {
228 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
229 m_freem(sc->sc_rxmbuf[i]);
230 }
231 }
232 fail_5:
233 for (i = 0; i < RXDESCS; i++) {
234 if (sc->sc_xmtmap[i] != NULL)
235 bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
236 }
237 fail_4:
238 for (i = 0; i < TXDESCS; i++) {
239 if (sc->sc_rcvmap[i] != NULL)
240 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
241 }
242 bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
243 fail_3:
244 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
245 fail_2:
246 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_zedata,
247 sizeof(struct ze_cdata));
248 fail_1:
249 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
250 fail_0:
251 return;
252 }
253
254 /*
255 * Initialization of interface.
256 */
257 void
258 zeinit(sc)
259 struct ze_softc *sc;
260 {
261 struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
262 struct ze_cdata *zc = sc->sc_zedata;
263 int i;
264
265 /*
266 * Reset the interface.
267 */
268 if (zereset(sc))
269 return;
270
271 sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
272 /*
273 * Release and init transmit descriptors.
274 */
275 for (i = 0; i < TXDESCS; i++) {
276 if (sc->sc_txmbuf[i]) {
277 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
278 m_freem(sc->sc_txmbuf[i]);
279 sc->sc_txmbuf[i] = 0;
280 }
281 zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
282 }
283
284
285 /*
286 * Init receive descriptors.
287 */
288 for (i = 0; i < RXDESCS; i++)
289 zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
290 sc->sc_nextrx = 0;
291
292 ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
293 ZE_NICSR6_SR|ZE_NICSR6_DC);
294
295 ifp->if_flags |= IFF_RUNNING;
296 ifp->if_flags &= ~IFF_OACTIVE;
297
298 /*
299 * Send a setup frame.
300 * This will start the transmit machinery as well.
301 */
302 ze_setup(sc);
303
304 }
305
306 /*
307 * Start output on interface.
308 */
309 void
310 zestart(ifp)
311 struct ifnet *ifp;
312 {
313 struct ze_softc *sc = ifp->if_softc;
314 struct ze_cdata *zc = sc->sc_zedata;
315 paddr_t buffer;
316 struct mbuf *m, *m0;
317 int idx, len, s, i, totlen, error;
318 int old_inq = sc->sc_inq;
319 short orword;
320
321 s = splimp();
322 while (sc->sc_inq < (TXDESCS - 1)) {
323
324 if (sc->sc_setup) {
325 ze_setup(sc);
326 continue;
327 }
328 idx = sc->sc_nexttx;
329 IFQ_POLL(&sc->sc_if.if_snd, m);
330 if (m == 0)
331 goto out;
332 /*
333 * Count number of mbufs in chain.
334 * Always do DMA directly from mbufs, therefore the transmit
335 * ring is really big.
336 */
337 for (m0 = m, i = 0; m0; m0 = m0->m_next)
338 if (m0->m_len)
339 i++;
340 if (i >= TXDESCS)
341 panic("zestart"); /* XXX */
342
343 if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
344 ifp->if_flags |= IFF_OACTIVE;
345 goto out;
346 }
347
348 #if NBPFILTER > 0
349 if (ifp->if_bpf)
350 bpf_mtap(ifp->if_bpf, m);
351 #endif
352 /*
353 * m now points to a mbuf chain that can be loaded.
354 * Loop around and set it.
355 */
356 totlen = 0;
357 for (m0 = m; m0; m0 = m0->m_next) {
358 error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
359 mtod(m0, void *), m0->m_len, 0, 0);
360 buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
361 len = m0->m_len;
362 if (len == 0)
363 continue;
364
365 totlen += len;
366 /* Word alignment calc */
367 orword = 0;
368 if (totlen == len)
369 orword = ZE_TDES1_FS;
370 if (totlen == m->m_pkthdr.len) {
371 if (totlen < ETHER_MIN_LEN)
372 len += (ETHER_MIN_LEN - totlen);
373 orword |= ZE_TDES1_LS;
374 sc->sc_txmbuf[idx] = m;
375 }
376 zc->zc_xmit[idx].ze_bufsize = len;
377 zc->zc_xmit[idx].ze_bufaddr = (char *)buffer;
378 zc->zc_xmit[idx].ze_tdes1 = orword | ZE_TDES1_IC;
379 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
380
381 if (++idx == TXDESCS)
382 idx = 0;
383 sc->sc_inq++;
384 }
385 IFQ_DEQUEUE(&ifp->if_snd, m);
386 #ifdef DIAGNOSTIC
387 if (totlen != m->m_pkthdr.len)
388 panic("zestart: len fault");
389 #endif
390
391 /*
392 * Kick off the transmit logic, if it is stopped.
393 */
394 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
395 ZE_WCSR(ZE_CSR1, -1);
396 sc->sc_nexttx = idx;
397 }
398 if (sc->sc_inq == (TXDESCS - 1))
399 ifp->if_flags |= IFF_OACTIVE;
400
401 out: if (old_inq < sc->sc_inq)
402 ifp->if_timer = 5; /* If transmit logic dies */
403 splx(s);
404 }
405
406 int
407 sgec_intr(sc)
408 struct ze_softc *sc;
409 {
410 struct ze_cdata *zc = sc->sc_zedata;
411 struct ifnet *ifp = &sc->sc_if;
412 struct mbuf *m;
413 int csr, len;
414
415 csr = ZE_RCSR(ZE_CSR5);
416 if ((csr & ZE_NICSR5_IS) == 0) /* Wasn't we */
417 return 0;
418 ZE_WCSR(ZE_CSR5, csr);
419
420 if (csr & ZE_NICSR5_RI)
421 while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
422 ZE_FRAMELEN_OW) == 0) {
423
424 ifp->if_ipackets++;
425 m = sc->sc_rxmbuf[sc->sc_nextrx];
426 len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
427 ze_add_rxbuf(sc, sc->sc_nextrx);
428 m->m_pkthdr.rcvif = ifp;
429 m->m_pkthdr.len = m->m_len = len;
430 m->m_flags |= M_HASFCS;
431 if (++sc->sc_nextrx == RXDESCS)
432 sc->sc_nextrx = 0;
433 #if NBPFILTER > 0
434 if (ifp->if_bpf)
435 bpf_mtap(ifp->if_bpf, m);
436 #endif
437 (*ifp->if_input)(ifp, m);
438 }
439
440 if (csr & ZE_NICSR5_TI) {
441 while ((zc->zc_xmit[sc->sc_lastack].ze_tdr & ZE_TDR_OW) == 0) {
442 int idx = sc->sc_lastack;
443
444 if (sc->sc_lastack == sc->sc_nexttx)
445 break;
446 sc->sc_inq--;
447 if (++sc->sc_lastack == TXDESCS)
448 sc->sc_lastack = 0;
449
450 if ((zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_DT) ==
451 ZE_TDES1_DT_SETUP)
452 continue;
453 /* XXX collect statistics */
454 if (zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_LS)
455 ifp->if_opackets++;
456 bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
457 if (sc->sc_txmbuf[idx]) {
458 m_freem(sc->sc_txmbuf[idx]);
459 sc->sc_txmbuf[idx] = 0;
460 }
461 }
462 if (sc->sc_inq == 0)
463 ifp->if_timer = 0;
464 ifp->if_flags &= ~IFF_OACTIVE;
465 zestart(ifp); /* Put in more in queue */
466 }
467 return 1;
468 }
469
470 /*
471 * Process an ioctl request.
472 */
473 int
474 zeioctl(ifp, cmd, data)
475 struct ifnet *ifp;
476 u_long cmd;
477 caddr_t data;
478 {
479 struct ze_softc *sc = ifp->if_softc;
480 struct ifreq *ifr = (struct ifreq *)data;
481 struct ifaddr *ifa = (struct ifaddr *)data;
482 int s = splnet(), error = 0;
483
484 switch (cmd) {
485
486 case SIOCSIFADDR:
487 ifp->if_flags |= IFF_UP;
488 switch(ifa->ifa_addr->sa_family) {
489 #ifdef INET
490 case AF_INET:
491 zeinit(sc);
492 arp_ifinit(ifp, ifa);
493 break;
494 #endif
495 }
496 break;
497
498 case SIOCSIFFLAGS:
499 if ((ifp->if_flags & IFF_UP) == 0 &&
500 (ifp->if_flags & IFF_RUNNING) != 0) {
501 /*
502 * If interface is marked down and it is running,
503 * stop it. (by disabling receive mechanism).
504 */
505 ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
506 ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
507 ifp->if_flags &= ~IFF_RUNNING;
508 } else if ((ifp->if_flags & IFF_UP) != 0 &&
509 (ifp->if_flags & IFF_RUNNING) == 0) {
510 /*
511 * If interface it marked up and it is stopped, then
512 * start it.
513 */
514 zeinit(sc);
515 } else if ((ifp->if_flags & IFF_UP) != 0) {
516 /*
517 * Send a new setup packet to match any new changes.
518 * (Like IFF_PROMISC etc)
519 */
520 ze_setup(sc);
521 }
522 break;
523
524 case SIOCADDMULTI:
525 case SIOCDELMULTI:
526 /*
527 * Update our multicast list.
528 */
529 error = (cmd == SIOCADDMULTI) ?
530 ether_addmulti(ifr, &sc->sc_ec):
531 ether_delmulti(ifr, &sc->sc_ec);
532
533 if (error == ENETRESET) {
534 /*
535 * Multicast list has changed; set the hardware filter
536 * accordingly.
537 */
538 ze_setup(sc);
539 error = 0;
540 }
541 break;
542
543 default:
544 error = EINVAL;
545
546 }
547 splx(s);
548 return (error);
549 }
550
551 /*
552 * Add a receive buffer to the indicated descriptor.
553 */
554 int
555 ze_add_rxbuf(sc, i)
556 struct ze_softc *sc;
557 int i;
558 {
559 struct mbuf *m;
560 struct ze_rdes *rp;
561 int error;
562
563 MGETHDR(m, M_DONTWAIT, MT_DATA);
564 if (m == NULL)
565 return (ENOBUFS);
566
567 MCLGET(m, M_DONTWAIT);
568 if ((m->m_flags & M_EXT) == 0) {
569 m_freem(m);
570 return (ENOBUFS);
571 }
572
573 if (sc->sc_rxmbuf[i] != NULL)
574 bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
575
576 error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
577 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
578 if (error)
579 panic("%s: can't load rx DMA map %d, error = %d\n",
580 sc->sc_dev.dv_xname, i, error);
581 sc->sc_rxmbuf[i] = m;
582
583 bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
584 sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
585
586 /*
587 * We know that the mbuf cluster is page aligned. Also, be sure
588 * that the IP header will be longword aligned.
589 */
590 m->m_data += 2;
591 rp = &sc->sc_zedata->zc_recv[i];
592 rp->ze_bufsize = (m->m_ext.ext_size - 2);
593 rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
594 rp->ze_framelen = ZE_FRAMELEN_OW;
595
596 return (0);
597 }
598
599 /*
600 * Create a setup packet and put in queue for sending.
601 */
602 void
603 ze_setup(sc)
604 struct ze_softc *sc;
605 {
606 struct ether_multi *enm;
607 struct ether_multistep step;
608 struct ze_cdata *zc = sc->sc_zedata;
609 struct ifnet *ifp = &sc->sc_if;
610 u_int8_t *enaddr = LLADDR(ifp->if_sadl);
611 int j, idx, s, reg;
612
613 s = splimp();
614 if (sc->sc_inq == (TXDESCS - 1)) {
615 sc->sc_setup = 1;
616 splx(s);
617 return;
618 }
619 sc->sc_setup = 0;
620 /*
621 * Init the setup packet with valid info.
622 */
623 memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
624 bcopy(enaddr, zc->zc_setup, ETHER_ADDR_LEN);
625
626 /*
627 * Multicast handling. The SGEC can handle up to 16 direct
628 * ethernet addresses.
629 */
630 j = 16;
631 ifp->if_flags &= ~IFF_ALLMULTI;
632 ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
633 while (enm != NULL) {
634 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
635 ifp->if_flags |= IFF_ALLMULTI;
636 break;
637 }
638 bcopy(enm->enm_addrlo, &zc->zc_setup[j], ETHER_ADDR_LEN);
639 j += 8;
640 ETHER_NEXT_MULTI(step, enm);
641 if ((enm != NULL)&& (j == 128)) {
642 ifp->if_flags |= IFF_ALLMULTI;
643 break;
644 }
645 }
646
647 /*
648 * ALLMULTI implies PROMISC in this driver.
649 */
650 if (ifp->if_flags & IFF_ALLMULTI)
651 ifp->if_flags |= IFF_PROMISC;
652 else if (ifp->if_pcount == 0)
653 ifp->if_flags &= ~IFF_PROMISC;
654
655 /*
656 * Fiddle with the receive logic.
657 */
658 reg = ZE_RCSR(ZE_CSR6);
659 DELAY(10);
660 ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
661 reg &= ~ZE_NICSR6_AF;
662 if (ifp->if_flags & IFF_PROMISC)
663 reg |= ZE_NICSR6_AF_PROM;
664 else if (ifp->if_flags & IFF_ALLMULTI)
665 reg |= ZE_NICSR6_AF_ALLM;
666 DELAY(10);
667 ZE_WCSR(ZE_CSR6, reg);
668 /*
669 * Only send a setup packet if needed.
670 */
671 if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
672 idx = sc->sc_nexttx;
673 zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
674 zc->zc_xmit[idx].ze_bufsize = 128;
675 zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
676 zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
677
678 if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
679 ZE_WCSR(ZE_CSR1, -1);
680
681 sc->sc_inq++;
682 if (++sc->sc_nexttx == TXDESCS)
683 sc->sc_nexttx = 0;
684 }
685 splx(s);
686 }
687
688 /*
689 * Check for dead transmit logic.
690 */
691 void
692 zetimeout(ifp)
693 struct ifnet *ifp;
694 {
695 struct ze_softc *sc = ifp->if_softc;
696
697 if (sc->sc_inq == 0)
698 return;
699
700 printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
701 /*
702 * Do a reset of interface, to get it going again.
703 * Will it work by just restart the transmit logic?
704 */
705 zeinit(sc);
706 }
707
708 /*
709 * Reset chip:
710 * Set/reset the reset flag.
711 * Write interrupt vector.
712 * Write ring buffer addresses.
713 * Write SBR.
714 */
715 int
716 zereset(sc)
717 struct ze_softc *sc;
718 {
719 int reg, i, s;
720
721 ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
722 DELAY(50000);
723 if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
724 printf("%s: selftest failed\n", sc->sc_dev.dv_xname);
725 return 1;
726 }
727
728 /*
729 * Get the vector that were set at match time, and remember it.
730 * WHICH VECTOR TO USE? Take one unused. XXX
731 * Funny way to set vector described in the programmers manual.
732 */
733 reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
734 i = 10;
735 s = splimp();
736 do {
737 if (i-- == 0) {
738 printf("Failing SGEC CSR0 init\n");
739 splx(s);
740 return 1;
741 }
742 ZE_WCSR(ZE_CSR0, reg);
743 } while (ZE_RCSR(ZE_CSR0) != reg);
744 splx(s);
745
746 ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
747 ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
748 return 0;
749 }
750