if_ie.c revision 1.12 1 /* $NetBSD: if_ie.c,v 1.12 1996/05/09 21:15:47 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1993, 1994, 1995 Charles Hannum.
5 * Copyright (c) 1992, 1993, University of Vermont and State
6 * Agricultural College.
7 * Copyright (c) 1992, 1993, Garrett A. Wollman.
8 *
9 * Portions:
10 * Copyright (c) 1994, 1995, Rafal K. Boni
11 * Copyright (c) 1990, 1991, William F. Jolitz
12 * Copyright (c) 1990, The Regents of the University of California
13 *
14 * All rights reserved.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles Hannum, by the
27 * University of Vermont and State Agricultural College and Garrett A.
28 * Wollman, by William F. Jolitz, and by the University of California,
29 * Berkeley, Lawrence Berkeley Laboratory, and its contributors.
30 * 4. Neither the names of the Universities nor the names of the authors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 * SUCH DAMAGE.
45 */
46
47 /*
48 * Intel 82586 Ethernet chip
49 * Register, bit, and structure definitions.
50 *
51 * Original StarLAN driver written by Garrett Wollman with reference to the
52 * Clarkson Packet Driver code for this chip written by Russ Nelson and others.
53 *
54 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump.
55 *
56 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni.
57 *
58 * Majorly cleaned up and 3C507 code merged by Charles Hannum.
59 *
60 * Converted to SUN ie driver by Charles D. Cranor,
61 * October 1994, January 1995.
62 * This sun version based on i386 version 1.30.
63 */
64
65 /*
66 * The i82586 is a very painful chip, found in sun3's, sun-4/100's
67 * sun-4/200's, and VME based suns. The byte order is all wrong for a
68 * SUN, making life difficult. Programming this chip is mostly the same,
69 * but certain details differ from system to system. This driver is
70 * written so that different "ie" interfaces can be controled by the same
71 * driver.
72 */
73
74 /*
75 Mode of operation:
76
77 We run the 82586 in a standard Ethernet mode. We keep NFRAMES
78 received frame descriptors around for the receiver to use, and
79 NRXBUF associated receive buffer descriptors, both in a circular
80 list. Whenever a frame is received, we rotate both lists as
81 necessary. (The 586 treats both lists as a simple queue.) We also
82 keep a transmit command around so that packets can be sent off
83 quickly.
84
85 We configure the adapter in AL-LOC = 1 mode, which means that the
86 Ethernet/802.3 MAC header is placed at the beginning of the receive
87 buffer rather than being split off into various fields in the RFD.
88 This also means that we must include this header in the transmit
89 buffer as well.
90
91 By convention, all transmit commands, and only transmit commands,
92 shall have the I (IE_CMD_INTR) bit set in the command. This way,
93 when an interrupt arrives at ieintr(), it is immediately possible
94 to tell what precisely caused it. ANY OTHER command-sending
95 routines should run at splnet(), and should post an acknowledgement
96 to every interrupt they generate.
97 */
98
99 #include "bpfilter.h"
100
101 #include <sys/param.h>
102 #include <sys/systm.h>
103 #include <sys/mbuf.h>
104 #include <sys/buf.h>
105 #include <sys/protosw.h>
106 #include <sys/socket.h>
107 #include <sys/ioctl.h>
108 #include <sys/errno.h>
109 #include <sys/syslog.h>
110 #include <sys/device.h>
111
112 #include <net/if.h>
113 #include <net/if_types.h>
114 #include <net/if_dl.h>
115 #include <net/netisr.h>
116 #include <net/route.h>
117
118 #if NBPFILTER > 0
119 #include <net/bpf.h>
120 #include <net/bpfdesc.h>
121 #endif
122
123 #ifdef INET
124 #include <netinet/in.h>
125 #include <netinet/in_systm.h>
126 #include <netinet/in_var.h>
127 #include <netinet/ip.h>
128 #include <netinet/if_ether.h>
129 #endif
130
131 #ifdef NS
132 #include <netns/ns.h>
133 #include <netns/ns_if.h>
134 #endif
135
136 #include <vm/vm.h>
137
138 /*
139 * ugly byte-order hack for SUNs
140 */
141
142 #define SWAP(x) ((u_short)(XSWAP((u_short)(x))))
143 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) )
144
145 #include <machine/autoconf.h>
146 #include <machine/cpu.h>
147 #include <machine/pmap.h>
148
149 #include "i82586.h"
150 #include "if_iereg.h"
151 #include "if_ievar.h"
152
153 static struct mbuf *last_not_for_us;
154
155 /*
156 * IED: ie debug flags
157 */
158
159 #define IED_RINT 0x01
160 #define IED_TINT 0x02
161 #define IED_RNR 0x04
162 #define IED_CNA 0x08
163 #define IED_READFRAME 0x10
164 #define IED_ALL 0x1f
165
166 #define ETHER_MIN_LEN 64
167 #define ETHER_MAX_LEN 1518
168 #define ETHER_ADDR_LEN 6
169
170 void iewatchdog __P((struct ifnet *));
171 int ieinit __P((struct ie_softc *));
172 int ieioctl __P((struct ifnet *, u_long, caddr_t));
173 void iestart __P((struct ifnet *));
174 void iereset __P((struct ie_softc *));
175 static void ie_readframe __P((struct ie_softc *, int));
176 static void ie_drop_packet_buffer __P((struct ie_softc *));
177 static int command_and_wait __P((struct ie_softc *, int,
178 void volatile *, int));
179 static void ierint __P((struct ie_softc *));
180 static void ietint __P((struct ie_softc *));
181 static void setup_bufs __P((struct ie_softc *));
182 static int mc_setup __P((struct ie_softc *, void *));
183 static void mc_reset __P((struct ie_softc *));
184
185 #ifdef IEDEBUG
186 void print_rbd __P((volatile struct ie_recv_buf_desc *));
187 int in_ierint = 0;
188 int in_ietint = 0;
189 #endif
190
191
192 struct cfdriver ie_cd = {
193 NULL, "ie", DV_IFNET
194 };
195
196
197 /*
198 * address generation macros
199 * MK_24 = KVA -> 24 bit address in SUN byte order
200 * MK_16 = KVA -> 16 bit address in INTEL byte order
201 * ST_24 = store a 24 bit address in SUN byte order to INTEL byte order
202 */
203 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base))
204 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) ))
205 #define ST_24(to, from) { \
206 u_long fval = (u_long)(from); \
207 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \
208 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0];*/ \
209 }
210
211 /*
212 * Here are a few useful functions. We could have done these as macros,
213 * but since we have the inline facility, it makes sense to use that
214 * instead.
215 */
216 static inline void
217 ie_setup_config(cmd, promiscuous, manchester)
218 volatile struct ie_config_cmd *cmd;
219 int promiscuous, manchester;
220 {
221
222 /*
223 * these are all char's so no need to byte-swap
224 */
225 cmd->ie_config_count = 0x0c;
226 cmd->ie_fifo = 8;
227 cmd->ie_save_bad = 0x40;
228 cmd->ie_addr_len = 0x2e;
229 cmd->ie_priority = 0;
230 cmd->ie_ifs = 0x60;
231 cmd->ie_slot_low = 0;
232 cmd->ie_slot_high = 0xf2;
233 cmd->ie_promisc = !!promiscuous | manchester << 2;
234 cmd->ie_crs_cdt = 0;
235 cmd->ie_min_len = 64;
236 cmd->ie_junk = 0xff;
237 }
238
239 static inline caddr_t
240 Align(ptr)
241 caddr_t ptr;
242 {
243 u_long l = (u_long)ptr;
244
245 l = (l + 3) & ~3L;
246 return (caddr_t)l;
247 }
248
249 static inline void
250 ie_ack(sc, mask)
251 struct ie_softc *sc;
252 u_int mask;
253 {
254 volatile struct ie_sys_ctl_block *scb = sc->scb;
255
256 command_and_wait(sc, scb->ie_status & mask, 0, 0);
257 }
258
259
260 /*
261 * Taken almost exactly from Bill's if_is.c,
262 * then modified beyond recognition...
263 */
264 void
265 ie_attach(sc)
266 struct ie_softc *sc;
267 {
268 struct ifnet *ifp = &sc->sc_if;
269 int off;
270
271 /* MD code has done its part before calling this. */
272 printf(" hwaddr %s\n", ether_sprintf(sc->sc_addr));
273
274 /* Allocate from end of buffer space for ISCP, SCB */
275 off = sc->buf_area_sz;
276 off &= ~3;
277
278 /* Space for ISCP */
279 off -= sizeof(*sc->iscp);
280 sc->iscp = (volatile void *) (sc->buf_area + off);
281
282 /* Space for SCB */
283 off -= sizeof(*sc->scb);
284 sc->scb = (volatile void *) (sc->buf_area + off);
285
286 /* Remainder is for buffers, etc. */
287 sc->buf_area_sz = off;
288
289 /*
290 * Setup RAM for transmit/receive
291 */
292 if (ie_setupram(sc) == 0) {
293 printf(": RAM CONFIG FAILED!\n");
294 /* XXX should reclaim resources? */
295 return;
296 }
297
298 /*
299 * Initialize and attach S/W interface
300 */
301 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
302 ifp->if_softc = sc;
303 ifp->if_start = iestart;
304 ifp->if_ioctl = ieioctl;
305 ifp->if_watchdog = iewatchdog;
306 ifp->if_flags =
307 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
308
309 /* Attach the interface. */
310 if_attach(ifp);
311 ether_ifattach(ifp);
312 #if NBPFILTER > 0
313 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
314 #endif
315 }
316
317 /*
318 * Device timeout/watchdog routine. Entered if the device neglects to
319 * generate an interrupt after a transmit has been started on it.
320 */
321 void
322 iewatchdog(ifp)
323 struct ifnet *ifp;
324 {
325 struct ie_softc *sc = ifp->if_softc;
326
327 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
328 ++sc->sc_arpcom.ac_if.if_oerrors;
329
330 iereset(sc);
331 }
332
333 /*
334 * What to do upon receipt of an interrupt.
335 */
336 int
337 ie_intr(v)
338 void *v;
339 {
340 struct ie_softc *sc = v;
341 register u_short status;
342
343 status = sc->scb->ie_status;
344
345 /*
346 * check for parity error
347 */
348 if (sc->hard_type == IE_VME) {
349 volatile struct ievme *iev = (volatile struct ievme *)sc->sc_reg;
350 if (iev->status & IEVME_PERR) {
351 printf("%s: parity error (ctrl %x @ %02x%04x)\n",
352 iev->pectrl, iev->pectrl & IEVME_HADDR,
353 iev->peaddr);
354 iev->pectrl = iev->pectrl | IEVME_PARACK;
355 }
356 }
357
358 loop:
359 /* Ack interrupts FIRST in case we receive more during the ISR. */
360 ie_ack(sc, IE_ST_WHENCE & status);
361
362 if (status & (IE_ST_RECV | IE_ST_RNR)) {
363 #ifdef IEDEBUG
364 in_ierint++;
365 if (sc->sc_debug & IED_RINT)
366 printf("%s: rint\n", sc->sc_dev.dv_xname);
367 #endif
368 ierint(sc);
369 #ifdef IEDEBUG
370 in_ierint--;
371 #endif
372 }
373
374 if (status & IE_ST_DONE) {
375 #ifdef IEDEBUG
376 in_ietint++;
377 if (sc->sc_debug & IED_TINT)
378 printf("%s: tint\n", sc->sc_dev.dv_xname);
379 #endif
380 ietint(sc);
381 #ifdef IEDEBUG
382 in_ietint--;
383 #endif
384 }
385
386 if (status & IE_ST_RNR) {
387 printf("%s: receiver not ready\n", sc->sc_dev.dv_xname);
388 sc->sc_arpcom.ac_if.if_ierrors++;
389 iereset(sc);
390 }
391
392 #ifdef IEDEBUG
393 if ((status & IE_ST_ALLDONE) && (sc->sc_debug & IED_CNA))
394 printf("%s: cna\n", sc->sc_dev.dv_xname);
395 #endif
396
397 if ((status = sc->scb->ie_status) & IE_ST_WHENCE)
398 goto loop;
399
400 return 1;
401 }
402
403 /*
404 * Process a received-frame interrupt.
405 */
406 void
407 ierint(sc)
408 struct ie_softc *sc;
409 {
410 volatile struct ie_sys_ctl_block *scb = sc->scb;
411 int i, status;
412 static int timesthru = 1024;
413
414 i = sc->rfhead;
415 for (;;) {
416 status = sc->rframes[i]->ie_fd_status;
417
418 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
419 sc->sc_arpcom.ac_if.if_ipackets++;
420 if (!--timesthru) {
421 sc->sc_arpcom.ac_if.if_ierrors +=
422 SWAP(scb->ie_err_crc) +
423 SWAP(scb->ie_err_align) +
424 SWAP(scb->ie_err_resource) +
425 SWAP(scb->ie_err_overrun);
426 scb->ie_err_crc = 0;
427 scb->ie_err_align = 0;
428 scb->ie_err_resource = 0;
429 scb->ie_err_overrun = 0;
430 timesthru = 1024;
431 }
432 ie_readframe(sc, i);
433 } else {
434 if ((status & IE_FD_RNR) != 0 &&
435 (scb->ie_status & IE_RU_READY) == 0) {
436 sc->rframes[0]->ie_fd_buf_desc =
437 MK_16(sc->sc_maddr, sc->rbuffs[0]);
438 scb->ie_recv_list =
439 MK_16(sc->sc_maddr, sc->rframes[0]);
440 command_and_wait(sc, IE_RU_START, 0, 0);
441 }
442 break;
443 }
444 i = (i + 1) % sc->nframes;
445 }
446 }
447
448 /*
449 * Process a command-complete interrupt. These are only generated by
450 * the transmission of frames. This routine is deceptively simple, since
451 * most of the real work is done by iestart().
452 */
453 void
454 ietint(sc)
455 struct ie_softc *sc;
456 {
457 int status;
458 int i;
459
460 sc->sc_arpcom.ac_if.if_timer = 0;
461 sc->sc_arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
462
463 status = sc->xmit_cmds[sc->xctail]->ie_xmit_status;
464
465 if (!(status & IE_STAT_COMPL) || (status & IE_STAT_BUSY))
466 printf("ietint: command still busy!\n");
467
468 if (status & IE_STAT_OK) {
469 sc->sc_arpcom.ac_if.if_opackets++;
470 sc->sc_arpcom.ac_if.if_collisions +=
471 SWAP(status & IE_XS_MAXCOLL);
472 } else if (status & IE_STAT_ABORT) {
473 printf("%s: send aborted\n", sc->sc_dev.dv_xname);
474 sc->sc_arpcom.ac_if.if_oerrors++;
475 } else if (status & IE_XS_NOCARRIER) {
476 printf("%s: no carrier\n", sc->sc_dev.dv_xname);
477 sc->sc_arpcom.ac_if.if_oerrors++;
478 } else if (status & IE_XS_LOSTCTS) {
479 printf("%s: lost CTS\n", sc->sc_dev.dv_xname);
480 sc->sc_arpcom.ac_if.if_oerrors++;
481 } else if (status & IE_XS_UNDERRUN) {
482 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname);
483 sc->sc_arpcom.ac_if.if_oerrors++;
484 } else if (status & IE_XS_EXCMAX) {
485 printf("%s: too many collisions\n", sc->sc_dev.dv_xname);
486 sc->sc_arpcom.ac_if.if_collisions += 16;
487 sc->sc_arpcom.ac_if.if_oerrors++;
488 }
489
490 /*
491 * If multicast addresses were added or deleted while we
492 * were transmitting, mc_reset() set the want_mcsetup flag
493 * indicating that we should do it.
494 */
495 if (sc->want_mcsetup) {
496 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[sc->xctail]);
497 sc->want_mcsetup = 0;
498 }
499
500 /* Done with the buffer. */
501 sc->xmit_free++;
502 sc->xmit_busy = 0;
503 sc->xctail = (sc->xctail + 1) % NTXBUF;
504
505 iestart(&sc->sc_arpcom.ac_if);
506 }
507
508 /*
509 * Compare two Ether/802 addresses for equality, inlined and
510 * unrolled for speed. I'd love to have an inline assembler
511 * version of this... XXX: Who wanted that? mycroft?
512 * I wrote one, but the following is just as efficient.
513 * This expands to 10 short m68k instructions! -gwr
514 * Note: use this like bcmp()
515 */
516 static inline u_short
517 ether_cmp(one, two)
518 u_char *one, *two;
519 {
520 register u_short *a = (u_short *) one;
521 register u_short *b = (u_short *) two;
522 register u_short diff;
523
524 diff = *a++ - *b++;
525 diff |= *a++ - *b++;
526 diff |= *a++ - *b++;
527
528 return (diff);
529 }
530 #define ether_equal !ether_cmp
531
532 /*
533 * Check for a valid address. to_bpf is filled in with one of the following:
534 * 0 -> BPF doesn't get this packet
535 * 1 -> BPF does get this packet
536 * 2 -> BPF does get this packet, but we don't
537 * Return value is true if the packet is for us, and false otherwise.
538 *
539 * This routine is a mess, but it's also critical that it be as fast
540 * as possible. It could be made cleaner if we can assume that the
541 * only client which will fiddle with IFF_PROMISC is BPF. This is
542 * probably a good assumption, but we do not make it here. (Yet.)
543 */
544 static inline int
545 check_eh(sc, eh, to_bpf)
546 struct ie_softc *sc;
547 struct ether_header *eh;
548 int *to_bpf;
549 {
550 int i;
551
552 switch (sc->promisc) {
553 case IFF_ALLMULTI:
554 /*
555 * Receiving all multicasts, but no unicasts except those
556 * destined for us.
557 */
558 #if NBPFILTER > 0
559 /* BPF gets this packet if anybody cares */
560 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
561 #endif
562 if (eh->ether_dhost[0] & 1)
563 return 1;
564 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
565 return 1;
566 return 0;
567
568 case IFF_PROMISC:
569 /*
570 * Receiving all packets. These need to be passed on to BPF.
571 */
572 #if NBPFILTER > 0
573 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
574 #endif
575 /* If for us, accept and hand up to BPF */
576 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
577 return 1;
578
579 #if NBPFILTER > 0
580 if (*to_bpf)
581 *to_bpf = 2; /* we don't need to see it */
582 #endif
583
584 /*
585 * Not a multicast, so BPF wants to see it but we don't.
586 */
587 if (!(eh->ether_dhost[0] & 1))
588 return 1;
589
590 /*
591 * If it's one of our multicast groups, accept it and pass it
592 * up.
593 */
594 for (i = 0; i < sc->mcast_count; i++) {
595 if (ether_equal(eh->ether_dhost,
596 (u_char *)&sc->mcast_addrs[i])) {
597 #if NBPFILTER > 0
598 if (*to_bpf)
599 *to_bpf = 1;
600 #endif
601 return 1;
602 }
603 }
604 return 1;
605
606 case IFF_ALLMULTI | IFF_PROMISC:
607 /*
608 * Acting as a multicast router, and BPF running at the same
609 * time. Whew! (Hope this is a fast machine...)
610 */
611 #if NBPFILTER > 0
612 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
613 #endif
614 /* We want to see multicasts. */
615 if (eh->ether_dhost[0] & 1)
616 return 1;
617
618 /* We want to see our own packets */
619 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
620 return 1;
621
622 /* Anything else goes to BPF but nothing else. */
623 #if NBPFILTER > 0
624 if (*to_bpf)
625 *to_bpf = 2;
626 #endif
627 return 1;
628
629 default:
630 /*
631 * Only accept unicast packets destined for us, or multicasts
632 * for groups that we belong to. For now, we assume that the
633 * '586 will only return packets that we asked it for. This
634 * isn't strictly true (it uses hashing for the multicast filter),
635 * but it will do in this case, and we want to get out of here
636 * as quickly as possible.
637 */
638 #if NBPFILTER > 0
639 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
640 #endif
641 return 1;
642 }
643 return 0;
644 }
645
646 /*
647 * We want to isolate the bits that have meaning... This assumes that
648 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
649 * the size of the buffer, then we are screwed anyway.
650 */
651 static inline int
652 ie_buflen(sc, head)
653 struct ie_softc *sc;
654 int head;
655 {
656
657 return (SWAP(sc->rbuffs[head]->ie_rbd_actual)
658 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
659 }
660
661 static inline int
662 ie_packet_len(sc)
663 struct ie_softc *sc;
664 {
665 int i;
666 int head = sc->rbhead;
667 int acc = 0;
668
669 do {
670 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
671 #ifdef IEDEBUG
672 print_rbd(sc->rbuffs[sc->rbhead]);
673 #endif
674 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
675 sc->sc_dev.dv_xname, sc->rbhead);
676 iereset(sc);
677 return -1;
678 }
679
680 i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST;
681
682 acc += ie_buflen(sc, head);
683 head = (head + 1) % sc->nrxbuf;
684 } while (!i);
685
686 return acc;
687 }
688
689 /*
690 * Setup all necessary artifacts for an XMIT command, and then pass the XMIT
691 * command to the chip to be executed. On the way, if we have a BPF listener
692 * also give him a copy.
693 */
694 inline static void
695 iexmit(sc)
696 struct ie_softc *sc;
697 {
698
699 #if NBPFILTER > 0
700 /*
701 * If BPF is listening on this interface, let it see the packet before
702 * we push it on the wire.
703 */
704 if (sc->sc_arpcom.ac_if.if_bpf)
705 bpf_tap(sc->sc_arpcom.ac_if.if_bpf,
706 sc->xmit_cbuffs[sc->xctail],
707 SWAP(sc->xmit_buffs[sc->xctail]->ie_xmit_flags));
708 #endif
709
710 sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= IE_XMIT_LAST;
711 sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff);
712 ST_24(sc->xmit_buffs[sc->xctail]->ie_xmit_buf,
713 MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xctail]));
714
715 sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff);
716 sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd =
717 IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST;
718
719 sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0);
720 sc->xmit_cmds[sc->xctail]->ie_xmit_desc =
721 MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xctail]);
722
723 sc->scb->ie_command_list =
724 MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xctail]);
725 command_and_wait(sc, IE_CU_START, 0, 0);
726
727 sc->xmit_busy = 1;
728 sc->sc_arpcom.ac_if.if_timer = 5;
729 }
730
731 /*
732 * Read data off the interface, and turn it into an mbuf chain.
733 *
734 * This code is DRAMATICALLY different from the previous version; this
735 * version tries to allocate the entire mbuf chain up front, given the
736 * length of the data available. This enables us to allocate mbuf
737 * clusters in many situations where before we would have had a long
738 * chain of partially-full mbufs. This should help to speed up the
739 * operation considerably. (Provided that it works, of course.)
740 */
741 static inline int
742 ieget(sc, mp, ehp, to_bpf)
743 struct ie_softc *sc;
744 struct mbuf **mp;
745 struct ether_header *ehp;
746 int *to_bpf;
747 {
748 struct mbuf *m, *top, **mymp;
749 int i;
750 int offset;
751 int totlen, resid;
752 int thismboff;
753 int head;
754
755 totlen = ie_packet_len(sc);
756 if (totlen <= 0)
757 return -1;
758
759 i = sc->rbhead;
760
761 /*
762 * Snarf the Ethernet header.
763 */
764 (sc->sc_bcopy)((caddr_t)sc->cbuffs[i], (caddr_t)ehp, sizeof *ehp);
765
766 /*
767 * As quickly as possible, check if this packet is for us.
768 * If not, don't waste a single cycle copying the rest of the
769 * packet in.
770 * This is only a consideration when FILTER is defined; i.e., when
771 * we are either running BPF or doing multicasting.
772 */
773 if (!check_eh(sc, ehp, to_bpf)) {
774 ie_drop_packet_buffer(sc);
775 /* just this case, it's not an error */
776 sc->sc_arpcom.ac_if.if_ierrors--;
777 return -1;
778 }
779 totlen -= (offset = sizeof *ehp);
780
781 MGETHDR(*mp, M_DONTWAIT, MT_DATA);
782 if (!*mp) {
783 ie_drop_packet_buffer(sc);
784 return -1;
785 }
786
787 m = *mp;
788 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
789 m->m_len = MHLEN;
790 resid = m->m_pkthdr.len = totlen;
791 top = 0;
792 mymp = ⊤
793
794 /*
795 * This loop goes through and allocates mbufs for all the data we will
796 * be copying in. It does not actually do the copying yet.
797 */
798 do { /* while (resid > 0) */
799 /*
800 * Try to allocate an mbuf to hold the data that we have. If
801 * we already allocated one, just get another one and stick it
802 * on the end (eventually). If we don't already have one, try
803 * to allocate an mbuf cluster big enough to hold the whole
804 * packet, if we think it's reasonable, or a single mbuf which
805 * may or may not be big enough. Got that?
806 */
807 if (top) {
808 MGET(m, M_DONTWAIT, MT_DATA);
809 if (!m) {
810 m_freem(top);
811 ie_drop_packet_buffer(sc);
812 return -1;
813 }
814 m->m_len = MLEN;
815 }
816
817 if (resid >= MINCLSIZE) {
818 MCLGET(m, M_DONTWAIT);
819 if (m->m_flags & M_EXT)
820 m->m_len = min(resid, MCLBYTES);
821 } else {
822 if (resid < m->m_len) {
823 if (!top && resid + max_linkhdr <= m->m_len)
824 m->m_data += max_linkhdr;
825 m->m_len = resid;
826 }
827 }
828 resid -= m->m_len;
829 *mymp = m;
830 mymp = &m->m_next;
831 } while (resid > 0);
832
833 resid = totlen;
834 m = top;
835 thismboff = 0;
836 head = sc->rbhead;
837
838 /*
839 * Now we take the mbuf chain (hopefully only one mbuf most of the
840 * time) and stuff the data into it. There are no possible failures
841 * at or after this point.
842 */
843 while (resid > 0) { /* while there's stuff left */
844 int thislen = ie_buflen(sc, head) - offset;
845
846 /*
847 * If too much data for the current mbuf, then fill the current one
848 * up, go to the next one, and try again.
849 */
850 if (thislen > m->m_len - thismboff) {
851 int newlen = m->m_len - thismboff;
852 (sc->sc_bcopy)((caddr_t)(sc->cbuffs[head] + offset),
853 mtod(m, caddr_t) + thismboff, (u_int)newlen);
854 m = m->m_next;
855 thismboff = 0; /* new mbuf, so no offset */
856 offset += newlen; /* we are now this far into
857 * the packet */
858 resid -= newlen; /* so there is this much left
859 * to get */
860 continue;
861 }
862
863 /*
864 * If there is more than enough space in the mbuf to hold the
865 * contents of this buffer, copy everything in, advance pointers,
866 * and so on.
867 */
868 if (thislen < m->m_len - thismboff) {
869 (sc->sc_bcopy)((caddr_t)(sc->cbuffs[head] + offset),
870 mtod(m, caddr_t) + thismboff, (u_int)thislen);
871 thismboff += thislen; /* we are this far into the
872 * mbuf */
873 resid -= thislen; /* and this much is left */
874 goto nextbuf;
875 }
876
877 /*
878 * Otherwise, there is exactly enough space to put this buffer's
879 * contents into the current mbuf. Do the combination of the above
880 * actions.
881 */
882 (sc->sc_bcopy)((caddr_t)(sc->cbuffs[head] + offset),
883 mtod(m, caddr_t) + thismboff, (u_int)thislen);
884 m = m->m_next;
885 thismboff = 0; /* new mbuf, start at the beginning */
886 resid -= thislen; /* and we are this far through */
887
888 /*
889 * Advance all the pointers. We can get here from either of the
890 * last two cases, but never the first.
891 */
892 nextbuf:
893 offset = 0;
894 sc->rbuffs[head]->ie_rbd_actual = SWAP(0);
895 sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST;
896 sc->rbhead = head = (head + 1) % sc->nrxbuf;
897 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
898 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
899 }
900
901 /*
902 * Unless something changed strangely while we were doing the copy,
903 * we have now copied everything in from the shared memory.
904 * This means that we are done.
905 */
906 return 0;
907 }
908
909 /*
910 * Read frame NUM from unit UNIT (pre-cached as IE).
911 *
912 * This routine reads the RFD at NUM, and copies in the buffers from
913 * the list of RBD, then rotates the RBD and RFD lists so that the receiver
914 * doesn't start complaining. Trailers are DROPPED---there's no point
915 * in wasting time on confusing code to deal with them. Hopefully,
916 * this machine will never ARP for trailers anyway.
917 */
918 static void
919 ie_readframe(sc, num)
920 struct ie_softc *sc;
921 int num; /* frame number to read */
922 {
923 int status;
924 struct mbuf *m = 0;
925 struct ether_header eh;
926 #if NBPFILTER > 0
927 int bpf_gets_it = 0;
928 #endif
929
930 status = sc->rframes[num]->ie_fd_status;
931
932 /* Immediately advance the RFD list, since we have copied ours now. */
933 sc->rframes[num]->ie_fd_status = SWAP(0);
934 sc->rframes[num]->ie_fd_last |= IE_FD_LAST;
935 sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST;
936 sc->rftail = (sc->rftail + 1) % sc->nframes;
937 sc->rfhead = (sc->rfhead + 1) % sc->nframes;
938
939 if (status & IE_FD_OK) {
940 #if NBPFILTER > 0
941 if (ieget(sc, &m, &eh, &bpf_gets_it)) {
942 #else
943 if (ieget(sc, &m, &eh, 0)) {
944 #endif
945 sc->sc_arpcom.ac_if.if_ierrors++;
946 return;
947 }
948 }
949
950 #ifdef IEDEBUG
951 if (sc->sc_debug & IED_READFRAME)
952 printf("%s: frame from ether %s type %x\n", sc->sc_dev.dv_xname,
953 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type);
954 #endif
955
956 if (!m)
957 return;
958
959 if (last_not_for_us) {
960 m_freem(last_not_for_us);
961 last_not_for_us = 0;
962 }
963
964 #if NBPFILTER > 0
965 /*
966 * Check for a BPF filter; if so, hand it up.
967 * Note that we have to stick an extra mbuf up front, because
968 * bpf_mtap expects to have the ether header at the front.
969 * It doesn't matter that this results in an ill-formatted mbuf chain,
970 * since BPF just looks at the data. (It doesn't try to free the mbuf,
971 * tho' it will make a copy for tcpdump.)
972 */
973 if (bpf_gets_it) {
974 struct mbuf m0;
975 m0.m_len = sizeof eh;
976 m0.m_data = (caddr_t)&eh;
977 m0.m_next = m;
978
979 /* Pass it up */
980 bpf_mtap(sc->sc_arpcom.ac_if.if_bpf, &m0);
981 }
982 /*
983 * A signal passed up from the filtering code indicating that the
984 * packet is intended for BPF but not for the protocol machinery.
985 * We can save a few cycles by not handing it off to them.
986 */
987 if (bpf_gets_it == 2) {
988 last_not_for_us = m;
989 return;
990 }
991 #endif /* NBPFILTER > 0 */
992
993 /*
994 * In here there used to be code to check destination addresses upon
995 * receipt of a packet. We have deleted that code, and replaced it
996 * with code to check the address much earlier in the cycle, before
997 * copying the data in; this saves us valuable cycles when operating
998 * as a multicast router or when using BPF.
999 */
1000
1001 /*
1002 * Finally pass this packet up to higher layers.
1003 */
1004 ether_input(&sc->sc_arpcom.ac_if, &eh, m);
1005 }
1006
1007 static void
1008 ie_drop_packet_buffer(sc)
1009 struct ie_softc *sc;
1010 {
1011 int i;
1012
1013 do {
1014 /*
1015 * This means we are somehow out of sync. So, we reset the
1016 * adapter.
1017 */
1018 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
1019 #ifdef IEDEBUG
1020 print_rbd(sc->rbuffs[sc->rbhead]);
1021 #endif
1022 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
1023 sc->sc_dev.dv_xname, sc->rbhead);
1024 iereset(sc);
1025 return;
1026 }
1027
1028 i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST;
1029
1030 sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST;
1031 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0);
1032 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf;
1033 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
1034 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
1035 } while (!i);
1036 }
1037
1038 /*
1039 * Start transmission on an interface.
1040 */
1041 void
1042 iestart(ifp)
1043 struct ifnet *ifp;
1044 {
1045 struct ie_softc *sc = ifp->if_softc;
1046 struct mbuf *m0, *m;
1047 u_char *buffer;
1048 u_short len;
1049
1050 if ((ifp->if_flags & IFF_RUNNING) == 0)
1051 return;
1052
1053 if (sc->xmit_free == 0) {
1054 ifp->if_flags |= IFF_OACTIVE;
1055 if (!sc->xmit_busy)
1056 iexmit(sc);
1057 return;
1058 }
1059
1060 do {
1061 IF_DEQUEUE(&sc->sc_arpcom.ac_if.if_snd, m);
1062 if (!m)
1063 break;
1064
1065 len = 0;
1066 buffer = sc->xmit_cbuffs[sc->xchead];
1067
1068 for (m0 = m; m && (len + m->m_len) < IE_TBUF_SIZE; m = m->m_next) {
1069 (sc->sc_bcopy)(mtod(m, caddr_t), buffer, m->m_len);
1070 buffer += m->m_len;
1071 len += m->m_len;
1072 }
1073 if (m)
1074 printf("%s: tbuf overflow\n", sc->sc_dev.dv_xname);
1075
1076 m_freem(m0);
1077 len = max(len, ETHER_MIN_LEN);
1078 sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len);
1079
1080 sc->xmit_free--;
1081 sc->xchead = (sc->xchead + 1) % NTXBUF;
1082 } while (sc->xmit_free > 0);
1083
1084 /* If we stuffed any packets into the card's memory, send now. */
1085 if ((sc->xmit_free < NTXBUF) && (!sc->xmit_busy))
1086 iexmit(sc);
1087
1088 return;
1089 }
1090
1091 /*
1092 * set up IE's ram space
1093 */
1094 int
1095 ie_setupram(sc)
1096 struct ie_softc *sc;
1097 {
1098 volatile struct ie_sys_conf_ptr *scp;
1099 volatile struct ie_int_sys_conf_ptr *iscp;
1100 volatile struct ie_sys_ctl_block *scb;
1101 int s;
1102
1103 s = splnet();
1104
1105 scp = sc->scp;
1106 (sc->sc_bzero)((char *) scp, sizeof *scp);
1107
1108 iscp = sc->iscp;
1109 (sc->sc_bzero)((char *) iscp, sizeof *iscp);
1110
1111 scb = sc->scb;
1112 (sc->sc_bzero)((char *) scb, sizeof *scb);
1113
1114 scp->ie_bus_use = 0; /* 16-bit */
1115 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp));
1116
1117 iscp->ie_busy = 1; /* ie_busy == char */
1118 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb);
1119 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr));
1120
1121 (sc->reset_586) (sc);
1122 (sc->chan_attn) (sc);
1123
1124 delay(100); /* wait a while... */
1125
1126 if (iscp->ie_busy) {
1127 splx(s);
1128 return 0;
1129 }
1130 /*
1131 * Acknowledge any interrupts we may have caused...
1132 */
1133 ie_ack(sc, IE_ST_WHENCE);
1134 splx(s);
1135
1136 return 1;
1137 }
1138
1139 void
1140 iereset(sc)
1141 struct ie_softc *sc;
1142 {
1143 int s = splnet();
1144
1145 printf("%s: reset\n", sc->sc_dev.dv_xname);
1146
1147 /* Clear OACTIVE in case we're called from watchdog (frozen xmit). */
1148 sc->sc_arpcom.ac_if.if_flags &= ~(IFF_UP | IFF_OACTIVE);
1149 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0);
1150
1151 /*
1152 * Stop i82586 dead in its tracks.
1153 */
1154 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1155 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname);
1156
1157 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1158 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname);
1159
1160 #ifdef notdef
1161 if (!check_ie_present(sc, sc->sc_maddr, sc->sc_msize))
1162 panic("ie disappeared!\n");
1163 #endif
1164
1165 sc->sc_arpcom.ac_if.if_flags |= IFF_UP;
1166 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0);
1167
1168 splx(s);
1169 }
1170
1171 /*
1172 * This is called if we time out.
1173 */
1174 static void
1175 chan_attn_timeout(rock)
1176 caddr_t rock;
1177 {
1178 *(int *) rock = 1;
1179 }
1180
1181 /*
1182 * Send a command to the controller and wait for it to either
1183 * complete or be accepted, depending on the command. If the
1184 * command pointer is null, then pretend that the command is
1185 * not an action command. If the command pointer is not null,
1186 * and the command is an action command, wait for
1187 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1188 * to become true.
1189 */
1190 static int
1191 command_and_wait(sc, cmd, pcmd, mask)
1192 struct ie_softc *sc;
1193 int cmd;
1194 volatile void *pcmd;
1195 int mask;
1196 {
1197 volatile struct ie_cmd_common *cc = pcmd;
1198 volatile struct ie_sys_ctl_block *scb = sc->scb;
1199 volatile int timedout = 0;
1200 extern int hz;
1201
1202 scb->ie_command = (u_short)cmd;
1203
1204 if (IE_ACTION_COMMAND(cmd) && pcmd) {
1205 (sc->chan_attn)(sc);
1206
1207 /*
1208 * XXX
1209 * I don't think this timeout works on suns.
1210 * we are at splnet() in the loop, and the timeout
1211 * stuff runs at software spl (so it is masked off?).
1212 */
1213
1214 /*
1215 * According to the packet driver, the minimum timeout should be
1216 * .369 seconds, which we round up to .4.
1217 */
1218
1219 timeout(chan_attn_timeout, (caddr_t)&timedout, 2 * hz / 5);
1220
1221 /*
1222 * Now spin-lock waiting for status. This is not a very nice
1223 * thing to do, but I haven't figured out how, or indeed if, we
1224 * can put the process waiting for action to sleep. (We may
1225 * be getting called through some other timeout running in the
1226 * kernel.)
1227 */
1228 for (;;)
1229 if ((cc->ie_cmd_status & mask) || timedout)
1230 break;
1231
1232 untimeout(chan_attn_timeout, (caddr_t)&timedout);
1233
1234 return timedout;
1235 } else {
1236 /*
1237 * Otherwise, just wait for the command to be accepted.
1238 */
1239 (sc->chan_attn)(sc);
1240
1241 while (scb->ie_command)
1242 ; /* spin lock */
1243
1244 return 0;
1245 }
1246 }
1247
1248 /*
1249 * Run the time-domain reflectometer...
1250 */
1251 static void
1252 run_tdr(sc, cmd)
1253 struct ie_softc *sc;
1254 struct ie_tdr_cmd *cmd;
1255 {
1256 int result;
1257
1258 cmd->com.ie_cmd_status = SWAP(0);
1259 cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST;
1260 cmd->com.ie_cmd_link = SWAP(0xffff);
1261
1262 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1263 cmd->ie_tdr_time = SWAP(0);
1264
1265 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1266 !(cmd->com.ie_cmd_status & IE_STAT_OK))
1267 result = 0x10000; /* XXX */
1268 else
1269 result = cmd->ie_tdr_time;
1270
1271 ie_ack(sc, IE_ST_WHENCE);
1272
1273 if (result & IE_TDR_SUCCESS)
1274 return;
1275
1276 if (result & 0x10000) {
1277 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname);
1278 } else if (result & IE_TDR_XCVR) {
1279 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname);
1280 } else if (result & IE_TDR_OPEN) {
1281 printf("%s: TDR detected an open %d clocks away\n",
1282 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME));
1283 } else if (result & IE_TDR_SHORT) {
1284 printf("%s: TDR detected a short %d clocks away\n",
1285 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME));
1286 } else {
1287 printf("%s: TDR returned unknown status %x\n",
1288 sc->sc_dev.dv_xname, result);
1289 }
1290 }
1291
1292 /*
1293 * setup_bufs: set up the buffers
1294 *
1295 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz.
1296 * this is to be used for the buffers. the chip indexs its control data
1297 * structures with 16 bit offsets, and it indexes actual buffers with
1298 * 24 bit addresses. so we should allocate control buffers first so that
1299 * we don't overflow the 16 bit offset field. The number of transmit
1300 * buffers is fixed at compile time.
1301 *
1302 * note: this function was written to be easy to understand, rather than
1303 * highly efficient (it isn't in the critical path).
1304 */
1305 static void
1306 setup_bufs(sc)
1307 struct ie_softc *sc;
1308 {
1309 caddr_t ptr = sc->buf_area; /* memory pool */
1310 volatile struct ie_recv_frame_desc *rfd = (void *) ptr;
1311 volatile struct ie_recv_buf_desc *rbd;
1312 int n, r;
1313
1314 /*
1315 * step 0: zero memory and figure out how many recv buffers and
1316 * frames we can have. XXX CURRENTLY HARDWIRED AT MAX
1317 */
1318 (sc->sc_bzero)(ptr, sc->buf_area_sz);
1319 ptr = Align(ptr); /* set alignment and stick with it */
1320
1321 n = (int)Align(sizeof(struct ie_xmit_cmd)) +
1322 (int)Align(sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE;
1323 n *= NTXBUF; /* n = total size of xmit area */
1324
1325 n = sc->buf_area_sz - n;/* n = free space for recv stuff */
1326
1327 r = (int)Align(sizeof(struct ie_recv_frame_desc)) +
1328 (((int)Align(sizeof(struct ie_recv_buf_desc)) + IE_RBUF_SIZE) * B_PER_F);
1329
1330 /* r = size of one R frame */
1331
1332 sc->nframes = n / r;
1333 if (sc->nframes <= 0)
1334 panic("ie: bogus buffer calc\n");
1335 if (sc->nframes > MXFRAMES)
1336 sc->nframes = MXFRAMES;
1337
1338 sc->nrxbuf = sc->nframes * B_PER_F;
1339
1340 #ifdef IEDEBUG
1341 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf);
1342 #endif
1343
1344 /*
1345 * step 1a: lay out and zero frame data structures for transmit and recv
1346 */
1347 for (n = 0; n < NTXBUF; n++) {
1348 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr;
1349 ptr = Align(ptr + sizeof(struct ie_xmit_cmd));
1350 }
1351
1352 for (n = 0; n < sc->nframes; n++) {
1353 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr;
1354 ptr = Align(ptr + sizeof(struct ie_recv_frame_desc));
1355 }
1356
1357 /*
1358 * step 1b: link together the recv frames and set EOL on last one
1359 */
1360 for (n = 0; n < sc->nframes; n++) {
1361 sc->rframes[n]->ie_fd_next =
1362 MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]);
1363 }
1364 sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST;
1365
1366 /*
1367 * step 2a: lay out and zero frame buffer structures for xmit and recv
1368 */
1369 for (n = 0; n < NTXBUF; n++) {
1370 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr;
1371 ptr = Align(ptr + sizeof(struct ie_xmit_buf));
1372 }
1373
1374 for (n = 0; n < sc->nrxbuf; n++) {
1375 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr;
1376 ptr = Align(ptr + sizeof(struct ie_recv_buf_desc));
1377 }
1378
1379 /*
1380 * step 2b: link together recv bufs and set EOL on last one
1381 */
1382 for (n = 0; n < sc->nrxbuf; n++) {
1383 sc->rbuffs[n]->ie_rbd_next =
1384 MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]);
1385 }
1386 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= IE_RBD_LAST;
1387
1388 /*
1389 * step 3: allocate the actual data buffers for xmit and recv
1390 * recv buffer gets linked into recv_buf_desc list here
1391 */
1392 for (n = 0; n < NTXBUF; n++) {
1393 sc->xmit_cbuffs[n] = (u_char *) ptr;
1394 ptr = Align(ptr + IE_TBUF_SIZE);
1395 }
1396
1397 /* Pointers to last packet sent and next available transmit buffer. */
1398 sc->xchead = sc->xctail = 0;
1399
1400 /* Clear transmit-busy flag and set number of free transmit buffers. */
1401 sc->xmit_busy = 0;
1402 sc->xmit_free = NTXBUF;
1403
1404 for (n = 0; n < sc->nrxbuf; n++) {
1405 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */
1406 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE);
1407 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr));
1408 ptr = Align(ptr + IE_RBUF_SIZE);
1409 }
1410
1411 /*
1412 * step 4: set the head and tail pointers on receive to keep track of
1413 * the order in which RFDs and RBDs are used. link in recv frames
1414 * and buffer into the scb.
1415 */
1416
1417 sc->rfhead = 0;
1418 sc->rftail = sc->nframes - 1;
1419 sc->rbhead = 0;
1420 sc->rbtail = sc->nrxbuf - 1;
1421
1422 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1423 sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]);
1424
1425 #ifdef IEDEBUG
1426 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area);
1427 #endif
1428 }
1429
1430 /*
1431 * Run the multicast setup command.
1432 * Called at splnet().
1433 */
1434 static int
1435 mc_setup(sc, ptr)
1436 struct ie_softc *sc;
1437 void *ptr;
1438 {
1439 volatile struct ie_mcast_cmd *cmd = ptr;
1440
1441 cmd->com.ie_cmd_status = SWAP(0);
1442 cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST;
1443 cmd->com.ie_cmd_link = SWAP(0xffff);
1444
1445 (sc->sc_bcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs,
1446 sc->mcast_count * sizeof *sc->mcast_addrs);
1447
1448 cmd->ie_mcast_bytes =
1449 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */
1450
1451 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1452 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1453 !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1454 printf("%s: multicast address setup command failed\n",
1455 sc->sc_dev.dv_xname);
1456 return 0;
1457 }
1458 return 1;
1459 }
1460
1461 /*
1462 * This routine inits the ie.
1463 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands,
1464 * starting the receiver unit, and clearing interrupts.
1465 *
1466 * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER.
1467 */
1468 int
1469 ieinit(sc)
1470 struct ie_softc *sc;
1471 {
1472 volatile struct ie_sys_ctl_block *scb = sc->scb;
1473 void *ptr;
1474 int n;
1475
1476 ptr = sc->buf_area;
1477
1478 /*
1479 * Send the configure command first.
1480 */
1481 {
1482 volatile struct ie_config_cmd *cmd = ptr;
1483
1484 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1485 cmd->com.ie_cmd_status = SWAP(0);
1486 cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST;
1487 cmd->com.ie_cmd_link = SWAP(0xffff);
1488
1489 ie_setup_config(cmd, sc->promisc, 0);
1490
1491 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1492 !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1493 printf("%s: configure command failed\n",
1494 sc->sc_dev.dv_xname);
1495 return 0;
1496 }
1497 }
1498
1499 /*
1500 * Now send the Individual Address Setup command.
1501 */
1502 {
1503 volatile struct ie_iasetup_cmd *cmd = ptr;
1504
1505 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1506 cmd->com.ie_cmd_status = SWAP(0);
1507 cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST;
1508 cmd->com.ie_cmd_link = SWAP(0xffff);
1509
1510 (sc->sc_bcopy)(sc->sc_arpcom.ac_enaddr,
1511 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address);
1512
1513 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1514 !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1515 printf("%s: individual address setup command failed\n",
1516 sc->sc_dev.dv_xname);
1517 return 0;
1518 }
1519 }
1520
1521 /*
1522 * Now run the time-domain reflectometer.
1523 */
1524 run_tdr(sc, ptr);
1525
1526 /*
1527 * Acknowledge any interrupts we have generated thus far.
1528 */
1529 ie_ack(sc, IE_ST_WHENCE);
1530
1531 /*
1532 * Set up the transmit and recv buffers.
1533 */
1534 setup_bufs(sc);
1535
1536 /* tell higher levels that we are here */
1537 sc->sc_arpcom.ac_if.if_flags |= IFF_RUNNING;
1538
1539 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1540 command_and_wait(sc, IE_RU_START, 0, 0);
1541
1542 ie_ack(sc, IE_ST_WHENCE);
1543
1544 if (sc->run_586)
1545 (sc->run_586)(sc);
1546
1547 return 0;
1548 }
1549
1550 static void
1551 iestop(sc)
1552 struct ie_softc *sc;
1553 {
1554
1555 command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1556 }
1557
1558 int
1559 ieioctl(ifp, cmd, data)
1560 register struct ifnet *ifp;
1561 u_long cmd;
1562 caddr_t data;
1563 {
1564 struct ie_softc *sc = ifp->if_softc;
1565 struct ifaddr *ifa = (struct ifaddr *) data;
1566 struct ifreq *ifr = (struct ifreq *) data;
1567 int s, error = 0;
1568
1569 s = splnet();
1570
1571 switch (cmd) {
1572
1573 case SIOCSIFADDR:
1574 ifp->if_flags |= IFF_UP;
1575
1576 switch (ifa->ifa_addr->sa_family) {
1577 #ifdef INET
1578 case AF_INET:
1579 ieinit(sc);
1580 arp_ifinit(&sc->sc_arpcom, ifa);
1581 break;
1582 #endif
1583 #ifdef NS
1584 /* XXX - This code is probably wrong. */
1585 case AF_NS:
1586 {
1587 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1588
1589 if (ns_nullhost(*ina))
1590 ina->x_host =
1591 *(union ns_host *)(sc->sc_arpcom.ac_enaddr);
1592 else
1593 bcopy(ina->x_host.c_host,
1594 sc->sc_arpcom.ac_enaddr,
1595 sizeof(sc->sc_arpcom.ac_enaddr));
1596 /* Set new address. */
1597 ieinit(sc);
1598 break;
1599 }
1600 #endif /* NS */
1601 default:
1602 ieinit(sc);
1603 break;
1604 }
1605 break;
1606
1607 case SIOCSIFFLAGS:
1608 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1609
1610 if ((ifp->if_flags & IFF_UP) == 0 &&
1611 (ifp->if_flags & IFF_RUNNING) != 0) {
1612 /*
1613 * If interface is marked down and it is running, then
1614 * stop it.
1615 */
1616 iestop(sc);
1617 ifp->if_flags &= ~IFF_RUNNING;
1618 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1619 (ifp->if_flags & IFF_RUNNING) == 0) {
1620 /*
1621 * If interface is marked up and it is stopped, then
1622 * start it.
1623 */
1624 ieinit(sc);
1625 } else {
1626 /*
1627 * Reset the interface to pick up changes in any other
1628 * flags that affect hardware registers.
1629 */
1630 iestop(sc);
1631 ieinit(sc);
1632 }
1633 #ifdef IEDEBUG
1634 if (ifp->if_flags & IFF_DEBUG)
1635 sc->sc_debug = IED_ALL;
1636 else
1637 sc->sc_debug = 0;
1638 #endif
1639 break;
1640
1641 case SIOCADDMULTI:
1642 case SIOCDELMULTI:
1643 error = (cmd == SIOCADDMULTI) ?
1644 ether_addmulti(ifr, &sc->sc_arpcom) :
1645 ether_delmulti(ifr, &sc->sc_arpcom);
1646
1647 if (error == ENETRESET) {
1648 /*
1649 * Multicast list has changed; set the hardware filter
1650 * accordingly.
1651 */
1652 mc_reset(sc);
1653 error = 0;
1654 }
1655 break;
1656
1657 default:
1658 error = EINVAL;
1659 }
1660 splx(s);
1661 return error;
1662 }
1663
1664 static void
1665 mc_reset(sc)
1666 struct ie_softc *sc;
1667 {
1668 struct ether_multi *enm;
1669 struct ether_multistep step;
1670
1671 /*
1672 * Step through the list of addresses.
1673 */
1674 sc->mcast_count = 0;
1675 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
1676 while (enm) {
1677 if (sc->mcast_count >= MAXMCAST ||
1678 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
1679 sc->sc_arpcom.ac_if.if_flags |= IFF_ALLMULTI;
1680 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, (void *)0);
1681 goto setflag;
1682 }
1683 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6);
1684 sc->mcast_count++;
1685 ETHER_NEXT_MULTI(step, enm);
1686 }
1687 setflag:
1688 sc->want_mcsetup = 1;
1689 }
1690
1691 #ifdef IEDEBUG
1692 void
1693 print_rbd(rbd)
1694 volatile struct ie_recv_buf_desc *rbd;
1695 {
1696
1697 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n"
1698 "length %04x, mbz %04x\n", (u_long)rbd, rbd->ie_rbd_actual,
1699 rbd->ie_rbd_next, rbd->ie_rbd_buffer, rbd->ie_rbd_length,
1700 rbd->mbz);
1701 }
1702 #endif
1703