am7990.c revision 1.3 1 /* $NetBSD: am7990.c,v 1.3 1995/07/24 04:34:51 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1995 Charles M. Hannum. All rights reserved.
5 * Copyright (c) 1992, 1993
6 * The Regents of the University of California. All rights reserved.
7 *
8 * This code is derived from software contributed to Berkeley by
9 * Ralph Campbell and Rick Macklem.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * @(#)if_le.c 8.2 (Berkeley) 11/16/93
40 */
41
42 #include <sys/ioctl.h>
43 #include <sys/errno.h>
44
45 #ifdef INET
46 #include <netinet/in_systm.h>
47 #include <netinet/in_var.h>
48 #include <netinet/ip.h>
49 #endif
50
51 #ifdef NS
52 #include <netns/ns.h>
53 #include <netns/ns_if.h>
54 #endif
55
56 #if defined(CCITT) && defined(LLC)
57 #include <sys/socketvar.h>
58 #include <netccitt/x25.h>
59 extern llc_ctlinput(), cons_rtrequest();
60 #endif
61
62 #if NBPFILTER > 0
63 #include <net/bpf.h>
64 #include <net/bpfdesc.h>
65 #endif
66
67 #ifdef LEDEBUG
68 void recv_print __P((struct le_softc *, int));
69 void xmit_print __P((struct le_softc *, int));
70 #endif
71
72 void
73 leconfig(sc)
74 struct le_softc *sc;
75 {
76 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
77 int mem;
78
79 /* Make sure the chip is stopped. */
80 lestop(sc);
81
82 /* Initialize ifnet structure. */
83 ifp->if_unit = sc->sc_dev.dv_unit;
84 ifp->if_start = lestart;
85 ifp->if_ioctl = leioctl;
86 ifp->if_watchdog = lewatchdog;
87 ifp->if_flags =
88 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
89
90 /* Attach the interface. */
91 if_attach(ifp);
92 ether_ifattach(ifp);
93
94 #if NBPFILTER > 0
95 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
96 #endif
97
98 switch (sc->sc_memsize) {
99 case 8192:
100 sc->sc_nrbuf = 4;
101 sc->sc_ntbuf = 1;
102 break;
103 case 16384:
104 sc->sc_nrbuf = 8;
105 sc->sc_ntbuf = 2;
106 break;
107 case 32768:
108 sc->sc_nrbuf = 16;
109 sc->sc_ntbuf = 4;
110 break;
111 case 65536:
112 sc->sc_nrbuf = 32;
113 sc->sc_ntbuf = 8;
114 break;
115 default:
116 panic("leconfig: weird memory size");
117 }
118
119 printf(": address %s, %d receive buffers, %d transmit buffers\n",
120 ether_sprintf(sc->sc_arpcom.ac_enaddr),
121 sc->sc_nrbuf, sc->sc_ntbuf);
122
123 mem = 0;
124 sc->sc_initaddr = mem;
125 mem += sizeof(struct leinit);
126 sc->sc_rmdaddr = mem;
127 mem += sizeof(struct lermd) * sc->sc_nrbuf;
128 sc->sc_tmdaddr = mem;
129 mem += sizeof(struct letmd) * sc->sc_ntbuf;
130 sc->sc_rbufaddr = mem;
131 mem += LEBLEN * sc->sc_nrbuf;
132 sc->sc_tbufaddr = mem;
133 mem += LEBLEN * sc->sc_ntbuf;
134 #ifdef notyet
135 if (mem > ...)
136 panic(...);
137 #endif
138 }
139
140 void
141 lereset(sc)
142 struct le_softc *sc;
143 {
144 int s;
145
146 s = splimp();
147 leinit(sc);
148 splx(s);
149 }
150
151 void
152 lewatchdog(unit)
153 short unit;
154 {
155 struct le_softc *sc = LE_SOFTC(unit);
156
157 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
158 ++sc->sc_arpcom.ac_if.if_oerrors;
159
160 lereset(sc);
161 }
162
163 /*
164 * Set up the initialization block and the descriptor rings.
165 */
166 void
167 lememinit(sc)
168 register struct le_softc *sc;
169 {
170 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
171 u_long a;
172 int bix;
173 struct leinit init;
174 struct lermd rmd;
175 struct letmd tmd;
176
177 #if NBPFILTER > 0
178 if (ifp->if_flags & IFF_PROMISC)
179 init.init_mode = LE_MODE_NORMAL | LE_MODE_PROM;
180 else
181 #endif
182 init.init_mode = LE_MODE_NORMAL;
183 init.init_padr[0] =
184 (sc->sc_arpcom.ac_enaddr[1] << 8) | sc->sc_arpcom.ac_enaddr[0];
185 init.init_padr[1] =
186 (sc->sc_arpcom.ac_enaddr[3] << 8) | sc->sc_arpcom.ac_enaddr[2];
187 init.init_padr[2] =
188 (sc->sc_arpcom.ac_enaddr[5] << 8) | sc->sc_arpcom.ac_enaddr[4];
189 lesetladrf(&sc->sc_arpcom, init.init_ladrf);
190
191 sc->sc_last_rd = 0;
192 sc->sc_first_td = sc->sc_last_td = sc->sc_no_td = 0;
193
194 a = sc->sc_addr + LE_RMDADDR(sc, 0);
195 init.init_rdra = a;
196 init.init_rlen = (a >> 16) | ((ffs(sc->sc_nrbuf) - 1) << 13);
197
198 a = sc->sc_addr + LE_TMDADDR(sc, 0);
199 init.init_tdra = a;
200 init.init_tlen = (a >> 16) | ((ffs(sc->sc_ntbuf) - 1) << 13);
201
202 (*sc->sc_copytodesc)(sc, &init, LE_INITADDR(sc), sizeof(init));
203
204 /*
205 * Set up receive ring descriptors.
206 */
207 for (bix = 0; bix < sc->sc_nrbuf; bix++) {
208 a = sc->sc_addr + LE_RBUFADDR(sc, bix);
209 rmd.rmd0 = a;
210 rmd.rmd1_hadr = a >> 16;
211 rmd.rmd1_bits = LE_R1_OWN;
212 rmd.rmd2 = -LEBLEN | LE_XMD2_ONES;
213 rmd.rmd3 = 0;
214 (*sc->sc_copytodesc)(sc, &rmd, LE_RMDADDR(sc, bix),
215 sizeof(rmd));
216 }
217
218 /*
219 * Set up transmit ring descriptors.
220 */
221 for (bix = 0; bix < sc->sc_ntbuf; bix++) {
222 a = sc->sc_addr + LE_TBUFADDR(sc, bix);
223 tmd.tmd0 = a;
224 tmd.tmd1_hadr = a >> 16;
225 tmd.tmd1_bits = 0;
226 tmd.tmd2 = 0 | LE_XMD2_ONES;
227 tmd.tmd3 = 0;
228 (*sc->sc_copytodesc)(sc, &tmd, LE_TMDADDR(sc, bix),
229 sizeof(tmd));
230 }
231 }
232
233 void
234 lestop(sc)
235 struct le_softc *sc;
236 {
237
238 lewrcsr(sc, LE_CSR0, LE_C0_STOP);
239 }
240
241 /*
242 * Initialization of interface; set up initialization block
243 * and transmit/receive descriptor rings.
244 */
245 void
246 leinit(sc)
247 register struct le_softc *sc;
248 {
249 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
250 register int timo;
251 u_long a;
252
253 lewrcsr(sc, LE_CSR0, LE_C0_STOP);
254 LE_DELAY(100);
255
256 /* Set the correct byte swapping mode, etc. */
257 lewrcsr(sc, LE_CSR3, sc->sc_conf3);
258
259 /* Set up LANCE init block. */
260 lememinit(sc);
261
262 /* Give LANCE the physical address of its init block. */
263 a = sc->sc_addr + LE_INITADDR(sc);
264 lewrcsr(sc, LE_CSR1, a);
265 lewrcsr(sc, LE_CSR2, a >> 16);
266
267 /* Try to initialize the LANCE. */
268 LE_DELAY(100);
269 lewrcsr(sc, LE_CSR0, LE_C0_INIT);
270
271 /* Wait for initialization to finish. */
272 for (timo = 100000; timo; timo--)
273 if (lerdcsr(sc, LE_CSR0) & LE_C0_IDON)
274 break;
275
276 if (lerdcsr(sc, LE_CSR0) & LE_C0_IDON) {
277 /* Start the LANCE. */
278 lewrcsr(sc, LE_CSR0, LE_C0_INEA | LE_C0_STRT | LE_C0_IDON);
279 ifp->if_flags |= IFF_RUNNING;
280 ifp->if_flags &= ~IFF_OACTIVE;
281 ifp->if_timer = 0;
282 lestart(ifp);
283 } else
284 printf("%s: card failed to initialize\n", sc->sc_dev.dv_xname);
285 }
286
287 /*
288 * Routine to copy from mbuf chain to transmit buffer in
289 * network buffer memory.
290 */
291 integrate int
292 leput(sc, boff, m)
293 struct le_softc *sc;
294 int boff;
295 register struct mbuf *m;
296 {
297 register struct mbuf *n;
298 register int len, tlen = 0;
299
300 for (; m; m = n) {
301 len = m->m_len;
302 if (len == 0) {
303 MFREE(m, n);
304 continue;
305 }
306 (*sc->sc_copytobuf)(sc, mtod(m, caddr_t), boff, len);
307 boff += len;
308 tlen += len;
309 MFREE(m, n);
310 }
311 if (tlen < LEMINSIZE) {
312 (*sc->sc_zerobuf)(sc, boff, LEMINSIZE - tlen);
313 tlen = LEMINSIZE;
314 }
315 return (tlen);
316 }
317
318 /*
319 * Pull data off an interface.
320 * Len is length of data, with local net header stripped.
321 * We copy the data into mbufs. When full cluster sized units are present
322 * we copy into clusters.
323 */
324 integrate struct mbuf *
325 leget(sc, boff, totlen)
326 struct le_softc *sc;
327 int boff, totlen;
328 {
329 register struct mbuf *m;
330 struct mbuf *top, **mp;
331 int len, pad;
332
333 MGETHDR(m, M_DONTWAIT, MT_DATA);
334 if (m == 0)
335 return (0);
336 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
337 m->m_pkthdr.len = totlen;
338 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
339 m->m_data += pad;
340 len = MHLEN - pad;
341 top = 0;
342 mp = ⊤
343
344 while (totlen > 0) {
345 if (top) {
346 MGET(m, M_DONTWAIT, MT_DATA);
347 if (m == 0) {
348 m_freem(top);
349 return 0;
350 }
351 len = MLEN;
352 }
353 if (top && totlen >= MINCLSIZE) {
354 MCLGET(m, M_DONTWAIT);
355 if (m->m_flags & M_EXT)
356 len = MCLBYTES;
357 }
358 m->m_len = len = min(totlen, len);
359 (*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), boff, len);
360 boff += len;
361 totlen -= len;
362 *mp = m;
363 mp = &m->m_next;
364 }
365
366 return (top);
367 }
368
369 /*
370 * Pass a packet to the higher levels.
371 */
372 integrate void
373 leread(sc, boff, len)
374 register struct le_softc *sc;
375 int boff, len;
376 {
377 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
378 struct mbuf *m;
379 struct ether_header *eh;
380
381 if (len <= sizeof(struct ether_header) ||
382 len > ETHERMTU + sizeof(struct ether_header)) {
383 printf("%s: invalid packet size %d; dropping\n",
384 sc->sc_dev.dv_xname, len);
385 ifp->if_ierrors++;
386 return;
387 }
388
389 /* Pull packet off interface. */
390 m = leget(sc, boff, len);
391 if (m == 0) {
392 ifp->if_ierrors++;
393 return;
394 }
395
396 ifp->if_ipackets++;
397
398 /* We assume that the header fit entirely in one mbuf. */
399 eh = mtod(m, struct ether_header *);
400
401 #if NBPFILTER > 0
402 /*
403 * Check if there's a BPF listener on this interface.
404 * If so, hand off the raw packet to BPF.
405 */
406 if (ifp->if_bpf) {
407 bpf_mtap(ifp->if_bpf, m);
408
409 /*
410 * Note that the interface cannot be in promiscuous mode if
411 * there are no BPF listeners. And if we are in promiscuous
412 * mode, we have to check if this packet is really ours.
413 */
414 if ((ifp->if_flags & IFF_PROMISC) != 0 &&
415 (eh->ether_dhost[0] & 1) == 0 && /* !mcast and !bcast */
416 bcmp(eh->ether_dhost, sc->sc_arpcom.ac_enaddr,
417 sizeof(eh->ether_dhost)) != 0) {
418 m_freem(m);
419 return;
420 }
421 }
422 #endif
423
424 /* Pass the packet up, with the ether header sort-of removed. */
425 m_adj(m, sizeof(struct ether_header));
426 ether_input(ifp, eh, m);
427 }
428
429 integrate void
430 lerint(sc)
431 struct le_softc *sc;
432 {
433 register int bix;
434 int rp;
435 struct lermd rmd;
436
437 bix = sc->sc_last_rd;
438
439 /* Process all buffers with valid data. */
440 for (;;) {
441 rp = LE_RMDADDR(sc, bix);
442 (*sc->sc_copyfromdesc)(sc, &rmd, rp, sizeof(rmd));
443
444 if (rmd.rmd1_bits & LE_R1_OWN)
445 break;
446
447 if (rmd.rmd1_bits & LE_R1_ERR) {
448 if (rmd.rmd1_bits & LE_R1_ENP) {
449 if ((rmd.rmd1_bits & LE_R1_OFLO) == 0) {
450 if (rmd.rmd1_bits & LE_R1_FRAM)
451 printf("%s: framing error\n",
452 sc->sc_dev.dv_xname);
453 if (rmd.rmd1_bits & LE_R1_CRC)
454 printf("%s: crc mismatch\n",
455 sc->sc_dev.dv_xname);
456 }
457 } else {
458 if (rmd.rmd1_bits & LE_R1_OFLO)
459 printf("%s: overflow\n",
460 sc->sc_dev.dv_xname);
461 }
462 if (rmd.rmd1_bits & LE_R1_BUFF)
463 printf("%s: receive buffer error\n",
464 sc->sc_dev.dv_xname);
465 } else if (rmd.rmd1_bits & (LE_R1_STP | LE_R1_ENP) !=
466 (LE_R1_STP | LE_R1_ENP)) {
467 printf("%s: dropping chained buffer\n",
468 sc->sc_dev.dv_xname);
469 } else {
470 #ifdef LEDEBUG
471 if (sc->sc_debug)
472 recv_print(sc, sc->sc_last_rd);
473 #endif
474 leread(sc, LE_RBUFADDR(sc, bix), (int)rmd.rmd3 - 4);
475 }
476
477 rmd.rmd1_bits = LE_R1_OWN;
478 rmd.rmd2 = -LEBLEN | LE_XMD2_ONES;
479 rmd.rmd3 = 0;
480 (*sc->sc_copytodesc)(sc, &rmd, rp, sizeof(rmd));
481
482 #ifdef LEDEBUG
483 if (sc->sc_debug)
484 printf("sc->sc_last_rd = %x, rmd = %x\n",
485 sc->sc_last_rd, rmd);
486 #endif
487
488 if (++bix == sc->sc_nrbuf)
489 bix = 0;
490 }
491
492 sc->sc_last_rd = bix;
493 }
494
495 integrate void
496 letint(sc)
497 register struct le_softc *sc;
498 {
499 register struct ifnet *ifp = &sc->sc_arpcom.ac_if;
500 register int bix;
501 struct letmd tmd;
502
503 bix = sc->sc_first_td;
504
505 for (;;) {
506 if (sc->sc_no_td <= 0)
507 break;
508
509 #ifdef LEDEBUG
510 if (sc->sc_debug)
511 printf("trans tmd = %x\n", tmd);
512 #endif
513
514 (*sc->sc_copyfromdesc)(sc, &tmd, LE_TMDADDR(sc, bix),
515 sizeof(tmd));
516
517 if (tmd.tmd1_bits & LE_T1_OWN)
518 break;
519
520 ifp->if_flags &= ~IFF_OACTIVE;
521
522 if (tmd.tmd1_bits & LE_T1_ERR) {
523 if (tmd.tmd3 & LE_T3_BUFF)
524 printf("%s: transmit buffer error\n", sc->sc_dev.dv_xname);
525 else if (tmd.tmd3 & LE_T3_UFLO)
526 printf("%s: underflow\n", sc->sc_dev.dv_xname);
527 if (tmd.tmd3 & (LE_T3_BUFF | LE_T3_UFLO)) {
528 lereset(sc);
529 return;
530 }
531 if (tmd.tmd3 & LE_T3_LCAR)
532 printf("%s: lost carrier\n", sc->sc_dev.dv_xname);
533 if (tmd.tmd3 & LE_T3_LCOL)
534 ifp->if_collisions++;
535 if (tmd.tmd3 & LE_T3_RTRY) {
536 printf("%s: excessive collisions, tdr %d\n",
537 sc->sc_dev.dv_xname, tmd.tmd3 & LE_T3_TDR_MASK);
538 ifp->if_collisions += 16;
539 }
540 ifp->if_oerrors++;
541 } else {
542 if (tmd.tmd1_bits & LE_T1_ONE)
543 ifp->if_collisions++;
544 else if (tmd.tmd1_bits & LE_T1_MORE)
545 /* Real number is unknown. */
546 ifp->if_collisions += 2;
547 ifp->if_opackets++;
548 }
549
550 if (++bix == sc->sc_ntbuf)
551 bix = 0;
552
553 --sc->sc_no_td;
554 }
555
556 sc->sc_first_td = bix;
557
558 lestart(ifp);
559
560 if (sc->sc_no_td == 0)
561 ifp->if_timer = 0;
562 }
563
564 /*
565 * Controller interrupt.
566 */
567 #ifdef LEINTR_UNIT
568 int
569 leintr(unit)
570 int unit;
571 {
572 register struct le_softc *sc = LE_SOFTC(unit);
573 #else
574 int
575 leintr(arg)
576 register void *arg;
577 {
578 register struct le_softc *sc = arg;
579 #endif
580 register u_int16_t isr;
581
582 isr = lerdcsr(sc, LE_CSR0);
583 #ifdef LEDEBUG
584 if (sc->sc_debug)
585 printf("%s: leintr entering with isr=%04x\n",
586 sc->sc_dev.dv_xname, isr);
587 #endif
588 if ((isr & LE_C0_INTR) == 0)
589 return (0);
590
591 lewrcsr(sc, LE_CSR0,
592 isr & (LE_C0_INEA | LE_C0_BABL | LE_C0_MISS | LE_C0_MERR |
593 LE_C0_RINT | LE_C0_TINT | LE_C0_IDON));
594 if (isr & LE_C0_ERR) {
595 if (isr & LE_C0_BABL) {
596 printf("%s: babble\n", sc->sc_dev.dv_xname);
597 sc->sc_arpcom.ac_if.if_oerrors++;
598 }
599 #if 0
600 if (isr & LE_C0_CERR) {
601 printf("%s: collision error\n", sc->sc_dev.dv_xname);
602 sc->sc_arpcom.ac_if.if_collisions++;
603 }
604 #endif
605 if (isr & LE_C0_MISS)
606 sc->sc_arpcom.ac_if.if_ierrors++;
607 if (isr & LE_C0_MERR) {
608 printf("%s: memory error\n", sc->sc_dev.dv_xname);
609 lereset(sc);
610 return (1);
611 }
612 }
613
614 if ((isr & LE_C0_RXON) == 0) {
615 printf("%s: receiver disabled\n", sc->sc_dev.dv_xname);
616 sc->sc_arpcom.ac_if.if_ierrors++;
617 lereset(sc);
618 return (1);
619 }
620 if ((isr & LE_C0_TXON) == 0) {
621 printf("%s: transmitter disabled\n", sc->sc_dev.dv_xname);
622 sc->sc_arpcom.ac_if.if_oerrors++;
623 lereset(sc);
624 return (1);
625 }
626
627 if (isr & LE_C0_RINT)
628 lerint(sc);
629 if (isr & LE_C0_TINT)
630 letint(sc);
631
632 return (1);
633 }
634
635 /*
636 * Setup output on interface.
637 * Get another datagram to send off of the interface queue, and map it to the
638 * interface before starting the output.
639 * Called only at splimp or interrupt level.
640 */
641 void
642 lestart(ifp)
643 register struct ifnet *ifp;
644 {
645 register struct le_softc *sc = LE_SOFTC(ifp->if_unit);
646 register int bix;
647 register struct mbuf *m;
648 struct letmd tmd;
649 int rp;
650 int len;
651
652 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
653 return;
654
655 bix = sc->sc_last_td;
656
657 for (;;) {
658 rp = LE_TMDADDR(sc, bix);
659 (*sc->sc_copyfromdesc)(sc, &tmd, rp, sizeof(tmd));
660
661 if (tmd.tmd1_bits & LE_T1_OWN) {
662 ifp->if_flags |= IFF_OACTIVE;
663 printf("missing buffer, no_td = %d, last_td = %d\n",
664 sc->sc_no_td, sc->sc_last_td);
665 }
666
667 IF_DEQUEUE(&ifp->if_snd, m);
668 if (m == 0)
669 break;
670
671 #if NBPFILTER > 0
672 /*
673 * If BPF is listening on this interface, let it see the packet
674 * before we commit it to the wire.
675 */
676 if (ifp->if_bpf)
677 bpf_mtap(ifp->if_bpf, m);
678 #endif
679
680 /*
681 * Copy the mbuf chain into the transmit buffer.
682 */
683 len = leput(sc, LE_TBUFADDR(sc, bix), m);
684
685 #ifdef LEDEBUG
686 if (len > ETHERMTU + sizeof(struct ether_header))
687 printf("packet length %d\n", len);
688 #endif
689
690 ifp->if_timer = 5;
691
692 /*
693 * Init transmit registers, and set transmit start flag.
694 */
695 tmd.tmd1_bits = LE_T1_OWN | LE_T1_STP | LE_T1_ENP;
696 tmd.tmd2 = -len | LE_XMD2_ONES;
697 tmd.tmd3 = 0;
698
699 (*sc->sc_copytodesc)(sc, &tmd, rp, sizeof(tmd));
700
701 #ifdef LEDEBUG
702 if (sc->sc_debug)
703 xmit_print(sc, sc->sc_last_td);
704 #endif
705
706 lewrcsr(sc, LE_CSR0, LE_C0_INEA | LE_C0_TDMD);
707
708 if (++bix == sc->sc_ntbuf)
709 bix = 0;
710
711 if (++sc->sc_no_td == sc->sc_ntbuf) {
712 ifp->if_flags |= IFF_OACTIVE;
713 break;
714 }
715
716 }
717
718 sc->sc_last_td = bix;
719 }
720
721 /*
722 * Process an ioctl request.
723 */
724 int
725 leioctl(ifp, cmd, data)
726 register struct ifnet *ifp;
727 u_long cmd;
728 caddr_t data;
729 {
730 struct le_softc *sc = LE_SOFTC(ifp->if_unit);
731 struct ifaddr *ifa = (struct ifaddr *)data;
732 struct ifreq *ifr = (struct ifreq *)data;
733 int s, error = 0;
734
735 s = splimp();
736
737 switch (cmd) {
738
739 case SIOCSIFADDR:
740 ifp->if_flags |= IFF_UP;
741
742 switch (ifa->ifa_addr->sa_family) {
743 #ifdef INET
744 case AF_INET:
745 leinit(sc);
746 arp_ifinit(&sc->sc_arpcom, ifa);
747 break;
748 #endif
749 #ifdef NS
750 case AF_NS:
751 {
752 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
753
754 if (ns_nullhost(*ina))
755 ina->x_host =
756 *(union ns_host *)(sc->sc_arpcom.ac_enaddr);
757 else
758 bcopy(ina->x_host.c_host,
759 sc->sc_arpcom.ac_enaddr,
760 sizeof(sc->sc_arpcom.ac_enaddr));
761 /* Set new address. */
762 leinit(sc);
763 break;
764 }
765 #endif
766 default:
767 leinit(sc);
768 break;
769 }
770 break;
771
772 #if defined(CCITT) && defined(LLC)
773 case SIOCSIFCONF_X25:
774 ifp->if_flags |= IFF_UP;
775 ifa->ifa_rtrequest = (void (*)())cons_rtrequest; /* XXX */
776 error = x25_llcglue(PRC_IFUP, ifa->ifa_addr);
777 if (error == 0)
778 leinit(sc);
779 break;
780 #endif /* CCITT && LLC */
781
782 case SIOCSIFFLAGS:
783 if ((ifp->if_flags & IFF_UP) == 0 &&
784 (ifp->if_flags & IFF_RUNNING) != 0) {
785 /*
786 * If interface is marked down and it is running, then
787 * stop it.
788 */
789 lestop(sc);
790 ifp->if_flags &= ~IFF_RUNNING;
791 } else if ((ifp->if_flags & IFF_UP) != 0 &&
792 (ifp->if_flags & IFF_RUNNING) == 0) {
793 /*
794 * If interface is marked up and it is stopped, then
795 * start it.
796 */
797 leinit(sc);
798 } else {
799 /*
800 * Reset the interface to pick up changes in any other
801 * flags that affect hardware registers.
802 */
803 /*lestop(sc);*/
804 leinit(sc);
805 }
806 #ifdef LEDEBUG
807 if (ifp->if_flags & IFF_DEBUG)
808 sc->sc_debug = 1;
809 else
810 sc->sc_debug = 0;
811 #endif
812 break;
813
814 case SIOCADDMULTI:
815 case SIOCDELMULTI:
816 error = (cmd == SIOCADDMULTI) ?
817 ether_addmulti(ifr, &sc->sc_arpcom) :
818 ether_delmulti(ifr, &sc->sc_arpcom);
819
820 if (error == ENETRESET) {
821 /*
822 * Multicast list has changed; set the hardware filter
823 * accordingly.
824 */
825 lereset(sc);
826 error = 0;
827 }
828 break;
829
830 default:
831 error = EINVAL;
832 break;
833 }
834
835 splx(s);
836 return (error);
837 }
838
839 #ifdef LEDEBUG
840 void
841 recv_print(sc, no)
842 struct le_softc *sc;
843 int no;
844 {
845 struct lermd rmd;
846 u_int16_t len;
847 struct ether_header eh;
848
849 (*sc->sc_copyfromdesc)(sc, &rmd, LE_RMDADDR(sc, no), sizeof(rmd));
850 len = rmd.rmd3;
851 printf("%s: receive buffer %d, len = %d\n", sc->sc_dev.dv_xname, no,
852 len);
853 printf("%s: status %04x\n", sc->sc_dev.dv_xname, lerdcsr(sc, LE_CSR0));
854 printf("%s: ladr %04x, hadr %02x, flags %02x, bcnt %04x, mcnt %04x\n",
855 sc->sc_dev.dv_xname,
856 rmd.rmd0, rmd.rmd1_hadr, rmd.rmd1_bits, rmd.rmd2, rmd.rmd3);
857 if (len >= sizeof(eh)) {
858 (*sc->sc_copyfrombuf)(sc, &eh, LE_RBUFADDR(sc, no), sizeof(eh));
859 printf("%s: dst %s", ether_sprintf(eh.ether_dhost));
860 printf(" src %s type %04x\n", ether_sprintf(eh.ether_shost),
861 ntohs(eh.ether_type));
862 }
863 }
864
865 void
866 xmit_print(sc, no)
867 struct le_softc *sc;
868 int no;
869 {
870 struct letmd tmd;
871 u_int16_t len;
872 struct ether_header eh;
873
874 (*sc->sc_copyfromdesc)(sc, &tmd, LE_TMDADDR(sc, no), sizeof(tmd));
875 len = -tmd.tmd2;
876 printf("%s: transmit buffer %d, len = %d\n", sc->sc_dev.dv_xname, no,
877 len);
878 printf("%s: status %04x\n", sc->sc_dev.dv_xname, lerdcsr(sc, LE_CSR0));
879 printf("%s: ladr %04x, hadr %02x, flags %02x, bcnt %04x, mcnt %04x\n",
880 sc->sc_dev.dv_xname,
881 tmd.tmd0, tmd.tmd1_hadr, tmd.tmd1_bits, tmd.tmd2, tmd.tmd3);
882 if (len >= sizeof(eh)) {
883 (*sc->sc_copyfrombuf)(sc, &eh, LE_TBUFADDR(sc, no), sizeof(eh));
884 printf("%s: dst %s", ether_sprintf(eh.ether_dhost));
885 printf(" src %s type %04x\n", ether_sprintf(eh.ether_shost),
886 ntohs(eh.ether_type));
887 }
888 }
889 #endif /* LEDEBUG */
890
891 /*
892 * Set up the logical address filter.
893 */
894 void
895 lesetladrf(ac, af)
896 struct arpcom *ac;
897 u_int16_t *af;
898 {
899 struct ifnet *ifp = &ac->ac_if;
900 struct ether_multi *enm;
901 register u_char *cp, c;
902 register u_int32_t crc;
903 register int i, len;
904 struct ether_multistep step;
905
906 /*
907 * Set up multicast address filter by passing all multicast addresses
908 * through a crc generator, and then using the high order 6 bits as an
909 * index into the 64 bit logical address filter. The high order bit
910 * selects the word, while the rest of the bits select the bit within
911 * the word.
912 */
913
914 if (ifp->if_flags & IFF_PROMISC)
915 goto allmulti;
916
917 af[0] = af[1] = af[2] = af[3] = 0x0000;
918 ETHER_FIRST_MULTI(step, ac, enm);
919 while (enm != NULL) {
920 if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
921 sizeof(enm->enm_addrlo)) != 0) {
922 /*
923 * We must listen to a range of multicast addresses.
924 * For now, just accept all multicasts, rather than
925 * trying to set only those filter bits needed to match
926 * the range. (At this time, the only use of address
927 * ranges is for IP multicast routing, for which the
928 * range is big enough to require all bits set.)
929 */
930 goto allmulti;
931 }
932
933 cp = enm->enm_addrlo;
934 crc = 0xffffffff;
935 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
936 c = *cp++;
937 for (i = 8; --i >= 0;) {
938 if ((crc & 0x01) ^ (c & 0x01)) {
939 crc >>= 1;
940 crc ^= 0xedb88320;
941 } else
942 crc >>= 1;
943 c >>= 1;
944 }
945 }
946 /* Just want the 6 most significant bits. */
947 crc >>= 26;
948
949 /* Set the corresponding bit in the filter. */
950 af[crc >> 4] |= 1 << (crc & 0xf);
951
952 ETHER_NEXT_MULTI(step, enm);
953 }
954 ifp->if_flags &= ~IFF_ALLMULTI;
955 return;
956
957 allmulti:
958 ifp->if_flags |= IFF_ALLMULTI;
959 af[0] = af[1] = af[2] = af[3] = 0xffff;
960 }
961
962
963 #if 0 /* USE OF THE FOLLOWING IS MACHINE-SPECIFIC */
964 /*
965 * Routines for accessing the transmit and receive buffers. Unfortunately,
966 * CPU addressing of these buffers is done in one of 3 ways:
967 * - contiguous (for the 3max and turbochannel option card)
968 * - gap2, which means shorts (2 bytes) interspersed with short (2 byte)
969 * spaces (for the pmax)
970 * - gap16, which means 16bytes interspersed with 16byte spaces
971 * for buffers which must begin on a 32byte boundary (for 3min and maxine)
972 * The buffer offset is the logical byte offset, assuming contiguous storage.
973 */
974 void
975 copytodesc_contig(sc, from, boff, len)
976 struct le_softc *sc;
977 caddr_t from;
978 int boff, len;
979 {
980 volatile caddr_t buf = sc->sc_mem;
981
982 /*
983 * Just call bcopy() to do the work.
984 */
985 bcopy(from, buf + boff, len);
986 }
987
988 void
989 copyfromdesc_contig(sc, to, boff, len)
990 struct le_softc *sc;
991 caddr_t to;
992 int boff, len;
993 {
994 volatile caddr_t buf = sc->sc_mem;
995
996 /*
997 * Just call bcopy() to do the work.
998 */
999 bcopy(buf + boff, to, len);
1000 }
1001
1002 void
1003 copytobuf_contig(sc, from, boff, len)
1004 struct le_softc *sc;
1005 caddr_t from;
1006 int boff, len;
1007 {
1008 volatile caddr_t buf = sc->sc_mem;
1009
1010 /*
1011 * Just call bcopy() to do the work.
1012 */
1013 bcopy(from, buf + boff, len);
1014 }
1015
1016 void
1017 copyfrombuf_contig(sc, to, boff, len)
1018 struct le_softc *sc;
1019 caddr_t to;
1020 int boff, len;
1021 {
1022 volatile caddr_t buf = sc->sc_mem;
1023
1024 /*
1025 * Just call bcopy() to do the work.
1026 */
1027 bcopy(buf + boff, to, len);
1028 }
1029
1030 void
1031 zerobuf_contig(sc, boff, len)
1032 struct le_softc *sc;
1033 int boff, len;
1034 {
1035 volatile caddr_t buf = sc->sc_mem;
1036
1037 /*
1038 * Just let bzero() do the work
1039 */
1040 bzero(buf + boff, len);
1041 }
1042
1043 /*
1044 * For the pmax the buffer consists of shorts (2 bytes) interspersed with
1045 * short (2 byte) spaces and must be accessed with halfword load/stores.
1046 * (don't worry about doing an extra byte)
1047 */
1048 void
1049 copytobuf_gap2(sc, from, boff, len)
1050 struct le_softc *sc;
1051 register caddr_t from;
1052 int boff;
1053 register int len;
1054 {
1055 volatile caddr_t buf = sc->sc_mem;
1056 register volatile u_short *bptr;
1057 register int xfer;
1058
1059 if (boff & 0x1) {
1060 /* handle unaligned first byte */
1061 bptr = ((volatile u_short *)buf) + (boff - 1);
1062 *bptr = (*from++ << 8) | (*bptr & 0xff);
1063 bptr += 2;
1064 len--;
1065 } else
1066 bptr = ((volatile u_short *)buf) + boff;
1067 if ((unsigned)from & 0x1) {
1068 while (len > 1) {
1069 *bptr = (from[1] << 8) | (from[0] & 0xff);
1070 bptr += 2;
1071 from += 2;
1072 len -= 2;
1073 }
1074 } else {
1075 /* optimize for aligned transfers */
1076 xfer = (int)((unsigned)len & ~0x1);
1077 CopyToBuffer((u_short *)from, bptr, xfer);
1078 bptr += xfer;
1079 from += xfer;
1080 len -= xfer;
1081 }
1082 if (len == 1)
1083 *bptr = (u_short)*from;
1084 }
1085
1086 void
1087 copyfrombuf_gap2(sc, to, boff, len)
1088 struct le_softc *sc;
1089 register caddr_t to;
1090 int boff, len;
1091 {
1092 volatile caddr_t buf = sc->sc_mem;
1093 register volatile u_short *bptr;
1094 register u_short tmp;
1095 register int xfer;
1096
1097 if (boff & 0x1) {
1098 /* handle unaligned first byte */
1099 bptr = ((volatile u_short *)buf) + (boff - 1);
1100 *to++ = (*bptr >> 8) & 0xff;
1101 bptr += 2;
1102 len--;
1103 } else
1104 bptr = ((volatile u_short *)buf) + boff;
1105 if ((unsigned)to & 0x1) {
1106 while (len > 1) {
1107 tmp = *bptr;
1108 *to++ = tmp & 0xff;
1109 *to++ = (tmp >> 8) & 0xff;
1110 bptr += 2;
1111 len -= 2;
1112 }
1113 } else {
1114 /* optimize for aligned transfers */
1115 xfer = (int)((unsigned)len & ~0x1);
1116 CopyFromBuffer(bptr, to, xfer);
1117 bptr += xfer;
1118 to += xfer;
1119 len -= xfer;
1120 }
1121 if (len == 1)
1122 *to = *bptr & 0xff;
1123 }
1124
1125 void
1126 zerobuf_gap2(sc, boff, len)
1127 struct le_softc *sc;
1128 int boff, len;
1129 {
1130 volatile caddr_t buf = sc->sc_mem;
1131 register volatile u_short *bptr;
1132
1133 if ((unsigned)boff & 0x1) {
1134 bptr = ((volatile u_short *)buf) + (boff - 1);
1135 *bptr &= 0xff;
1136 bptr += 2;
1137 len--;
1138 } else
1139 bptr = ((volatile u_short *)buf) + boff;
1140 while (len > 0) {
1141 *bptr = 0;
1142 bptr += 2;
1143 len -= 2;
1144 }
1145 }
1146
1147 /*
1148 * For the 3min and maxine, the buffers are in main memory filled in with
1149 * 16byte blocks interspersed with 16byte spaces.
1150 */
1151 void
1152 copytobuf_gap16(sc, from, boff, len)
1153 struct le_softc *sc;
1154 register caddr_t from;
1155 int boff;
1156 register int len;
1157 {
1158 volatile caddr_t buf = sc->sc_mem;
1159 register caddr_t bptr;
1160 register int xfer;
1161
1162 bptr = buf + ((boff << 1) & ~0x1f);
1163 boff &= 0xf;
1164 xfer = min(len, 16 - boff);
1165 while (len > 0) {
1166 bcopy(from, bptr + boff, xfer);
1167 from += xfer;
1168 bptr += 32;
1169 boff = 0;
1170 len -= xfer;
1171 xfer = min(len, 16);
1172 }
1173 }
1174
1175 void
1176 copyfrombuf_gap16(sc, to, boff, len)
1177 struct le_softc *sc;
1178 register caddr_t to;
1179 int boff, len;
1180 {
1181 volatile caddr_t buf = sc->sc_mem;
1182 register caddr_t bptr;
1183 register int xfer;
1184
1185 bptr = buf + ((boff << 1) & ~0x1f);
1186 boff &= 0xf;
1187 xfer = min(len, 16 - boff);
1188 while (len > 0) {
1189 bcopy(bptr + boff, to, xfer);
1190 to += xfer;
1191 bptr += 32;
1192 boff = 0;
1193 len -= xfer;
1194 xfer = min(len, 16);
1195 }
1196 }
1197
1198 void
1199 zerobuf_gap16(sc, boff, len)
1200 struct le_softc *sc;
1201 int boff, len;
1202 {
1203 volatile caddr_t buf = sc->sc_mem;
1204 register caddr_t bptr;
1205 register int xfer;
1206
1207 bptr = buf + ((boff << 1) & ~0x1f);
1208 boff &= 0xf;
1209 xfer = min(len, 16 - boff);
1210 while (len > 0) {
1211 bzero(bptr + boff, xfer);
1212 bptr += 32;
1213 boff = 0;
1214 len -= xfer;
1215 xfer = min(len, 16);
1216 }
1217 }
1218 #endif
1219