if_ie.c revision 1.16 1 /* $NetBSD: if_ie.c,v 1.16 1996/12/17 21:10:44 gwr Exp $ */
2
3 /*-
4 * Copyright (c) 1993, 1994, 1995 Charles Hannum.
5 * Copyright (c) 1992, 1993, University of Vermont and State
6 * Agricultural College.
7 * Copyright (c) 1992, 1993, Garrett A. Wollman.
8 *
9 * Portions:
10 * Copyright (c) 1994, 1995, Rafal K. Boni
11 * Copyright (c) 1990, 1991, William F. Jolitz
12 * Copyright (c) 1990, The Regents of the University of California
13 *
14 * All rights reserved.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 * must display the following acknowledgement:
26 * This product includes software developed by Charles Hannum, by the
27 * University of Vermont and State Agricultural College and Garrett A.
28 * Wollman, by William F. Jolitz, and by the University of California,
29 * Berkeley, Lawrence Berkeley Laboratory, and its contributors.
30 * 4. Neither the names of the Universities nor the names of the authors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 * SUCH DAMAGE.
45 */
46
47 /*
48 * Intel 82586 Ethernet chip
49 * Register, bit, and structure definitions.
50 *
51 * Original StarLAN driver written by Garrett Wollman with reference to the
52 * Clarkson Packet Driver code for this chip written by Russ Nelson and others.
53 *
54 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump.
55 *
56 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni.
57 *
58 * Majorly cleaned up and 3C507 code merged by Charles Hannum.
59 *
60 * Converted to SUN ie driver by Charles D. Cranor,
61 * October 1994, January 1995.
62 * This sun version based on i386 version 1.30.
63 */
64
65 /*
66 * The i82586 is a very painful chip, found in sun3's, sun-4/100's
67 * sun-4/200's, and VME based suns. The byte order is all wrong for a
68 * SUN, making life difficult. Programming this chip is mostly the same,
69 * but certain details differ from system to system. This driver is
70 * written so that different "ie" interfaces can be controled by the same
71 * driver.
72 */
73
74 /*
75 Mode of operation:
76
77 We run the 82586 in a standard Ethernet mode. We keep NFRAMES
78 received frame descriptors around for the receiver to use, and
79 NRXBUF associated receive buffer descriptors, both in a circular
80 list. Whenever a frame is received, we rotate both lists as
81 necessary. (The 586 treats both lists as a simple queue.) We also
82 keep a transmit command around so that packets can be sent off
83 quickly.
84
85 We configure the adapter in AL-LOC = 1 mode, which means that the
86 Ethernet/802.3 MAC header is placed at the beginning of the receive
87 buffer rather than being split off into various fields in the RFD.
88 This also means that we must include this header in the transmit
89 buffer as well.
90
91 By convention, all transmit commands, and only transmit commands,
92 shall have the I (IE_CMD_INTR) bit set in the command. This way,
93 when an interrupt arrives at ieintr(), it is immediately possible
94 to tell what precisely caused it. ANY OTHER command-sending
95 routines should run at splnet(), and should post an acknowledgement
96 to every interrupt they generate.
97 */
98
99 #include "bpfilter.h"
100
101 #include <sys/param.h>
102 #include <sys/systm.h>
103 #include <sys/mbuf.h>
104 #include <sys/buf.h>
105 #include <sys/protosw.h>
106 #include <sys/socket.h>
107 #include <sys/ioctl.h>
108 #include <sys/errno.h>
109 #include <sys/syslog.h>
110 #include <sys/device.h>
111
112 #include <net/if.h>
113 #include <net/if_types.h>
114 #include <net/if_dl.h>
115 #include <net/netisr.h>
116 #include <net/route.h>
117
118 #if NBPFILTER > 0
119 #include <net/bpf.h>
120 #include <net/bpfdesc.h>
121 #endif
122
123 #ifdef INET
124 #include <netinet/in.h>
125 #include <netinet/in_systm.h>
126 #include <netinet/in_var.h>
127 #include <netinet/ip.h>
128 #include <netinet/if_ether.h>
129 #endif
130
131 #ifdef NS
132 #include <netns/ns.h>
133 #include <netns/ns_if.h>
134 #endif
135
136 #include <vm/vm.h>
137
138 #include <machine/autoconf.h>
139 #include <machine/cpu.h>
140 #include <machine/pmap.h>
141
142 /*
143 * ugly byte-order hack for SUNs
144 */
145
146 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) )
147 #define SWAP(x) ((u_short)(XSWAP((u_short)(x))))
148
149 #include "i82586.h"
150 #include "if_iereg.h"
151 #include "if_ievar.h"
152
153 static struct mbuf *last_not_for_us;
154
155 /*
156 * IED: ie debug flags
157 */
158
159 #define IED_RINT 0x01
160 #define IED_TINT 0x02
161 #define IED_RNR 0x04
162 #define IED_CNA 0x08
163 #define IED_READFRAME 0x10
164 #define IED_ALL 0x1f
165
166 #define ETHER_MIN_LEN 64
167 #define ETHER_MAX_LEN 1518
168 #define ETHER_ADDR_LEN 6
169
170 void iewatchdog __P((struct ifnet *));
171 int ieinit __P((struct ie_softc *));
172 int ieioctl __P((struct ifnet *, u_long, caddr_t));
173 void iestart __P((struct ifnet *));
174 void iereset __P((struct ie_softc *));
175 int ie_setupram __P((struct ie_softc *sc));
176
177 static void chan_attn_timeout __P((void *arg));
178 static int cmd_and_wait __P((struct ie_softc *, int, void *, int));
179
180 static void ie_drop_packet_buffer __P((struct ie_softc *));
181 static void ie_readframe __P((struct ie_softc *, int));
182 static void ie_setup_config __P((struct ie_config_cmd *, int, int));
183
184 static void ierint __P((struct ie_softc *));
185 static void iestop __P((struct ie_softc *));
186 static void ietint __P((struct ie_softc *));
187 static void iexmit __P((struct ie_softc *));
188
189 static int mc_setup __P((struct ie_softc *, void *));
190 static void mc_reset __P((struct ie_softc *));
191 static void run_tdr __P((struct ie_softc *, struct ie_tdr_cmd *));
192 static void setup_bufs __P((struct ie_softc *));
193
194 static inline caddr_t Align __P((caddr_t ptr));
195 static inline void ie_ack __P((struct ie_softc *, u_int));
196 static inline u_short ether_cmp __P((u_char *, u_char *));
197 static inline int check_eh __P((struct ie_softc *,
198 struct ether_header *eh, int *));
199 static inline int ie_buflen __P((struct ie_softc *, int));
200 static inline int ie_packet_len __P((struct ie_softc *));
201 static inline int ieget __P((struct ie_softc *sc,
202 struct mbuf **mp, struct ether_header *ehp, int *to_bpf));
203
204
205 #ifdef IEDEBUG
206 void print_rbd __P((volatile struct ie_recv_buf_desc *));
207 int in_ierint = 0;
208 int in_ietint = 0;
209 #endif
210
211
212 struct cfdriver ie_cd = {
213 NULL, "ie", DV_IFNET
214 };
215
216
217 /*
218 * address generation macros
219 * MK_24 = KVA -> 24 bit address in SUN byte order
220 * MK_16 = KVA -> 16 bit address in INTEL byte order
221 * ST_24 = store a 24 bit address in SUN byte order to INTEL byte order
222 */
223 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base))
224 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) ))
225 #define ST_24(to, from) { \
226 u_long fval = (u_long)(from); \
227 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \
228 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0];*/ \
229 }
230
231 /*
232 * Here are a few useful functions. We could have done these as macros,
233 * but since we have the inline facility, it makes sense to use that
234 * instead.
235 */
236 static inline void
237 ie_setup_config(cmd, promiscuous, manchester)
238 struct ie_config_cmd *cmd;
239 int promiscuous, manchester;
240 {
241
242 /*
243 * these are all char's so no need to byte-swap
244 */
245 cmd->ie_config_count = 0x0c;
246 cmd->ie_fifo = 8;
247 cmd->ie_save_bad = 0x40;
248 cmd->ie_addr_len = 0x2e;
249 cmd->ie_priority = 0;
250 cmd->ie_ifs = 0x60;
251 cmd->ie_slot_low = 0;
252 cmd->ie_slot_high = 0xf2;
253 cmd->ie_promisc = !!promiscuous | manchester << 2;
254 cmd->ie_crs_cdt = 0;
255 cmd->ie_min_len = 64;
256 cmd->ie_junk = 0xff;
257 }
258
259 static inline caddr_t
260 Align(ptr)
261 caddr_t ptr;
262 {
263 u_long l = (u_long)ptr;
264
265 l = (l + 3) & ~3L;
266 return (caddr_t)l;
267 }
268
269 static inline void
270 ie_ack(sc, mask)
271 struct ie_softc *sc;
272 u_int mask;
273 {
274 volatile struct ie_sys_ctl_block *scb = sc->scb;
275
276 cmd_and_wait(sc, scb->ie_status & mask, 0, 0);
277 }
278
279
280 /*
281 * Taken almost exactly from Bill's if_is.c,
282 * then modified beyond recognition...
283 */
284 void
285 ie_attach(sc)
286 struct ie_softc *sc;
287 {
288 struct ifnet *ifp = &sc->sc_if;
289 int off;
290
291 /* MD code has done its part before calling this. */
292 printf(": hwaddr %s\n", ether_sprintf(sc->sc_addr));
293
294 /* Allocate from end of buffer space for ISCP, SCB */
295 off = sc->buf_area_sz;
296 off &= ~3;
297
298 /* Space for ISCP */
299 off -= sizeof(*sc->iscp);
300 sc->iscp = (volatile void *) (sc->buf_area + off);
301
302 /* Space for SCB */
303 off -= sizeof(*sc->scb);
304 sc->scb = (volatile void *) (sc->buf_area + off);
305
306 /* Remainder is for buffers, etc. */
307 sc->buf_area_sz = off;
308
309 /*
310 * Setup RAM for transmit/receive
311 */
312 if (ie_setupram(sc) == 0) {
313 printf(": RAM CONFIG FAILED!\n");
314 /* XXX should reclaim resources? */
315 return;
316 }
317
318 /*
319 * Initialize and attach S/W interface
320 */
321 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
322 ifp->if_softc = sc;
323 ifp->if_start = iestart;
324 ifp->if_ioctl = ieioctl;
325 ifp->if_watchdog = iewatchdog;
326 ifp->if_flags =
327 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
328
329 /* Attach the interface. */
330 if_attach(ifp);
331 ether_ifattach(ifp);
332 #if NBPFILTER > 0
333 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
334 #endif
335 }
336
337 /*
338 * Device timeout/watchdog routine. Entered if the device neglects to
339 * generate an interrupt after a transmit has been started on it.
340 */
341 void
342 iewatchdog(ifp)
343 struct ifnet *ifp;
344 {
345 struct ie_softc *sc = ifp->if_softc;
346
347 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
348 ++sc->sc_arpcom.ac_if.if_oerrors;
349
350 iereset(sc);
351 }
352
353 /*
354 * What to do upon receipt of an interrupt.
355 */
356 int
357 ie_intr(v)
358 void *v;
359 {
360 struct ie_softc *sc = v;
361 register u_short status;
362
363 status = sc->scb->ie_status;
364
365 /*
366 * check for parity error
367 */
368 if (sc->hard_type == IE_VME) {
369 volatile struct ievme *iev = (volatile struct ievme *)sc->sc_reg;
370 if (iev->status & IEVME_PERR) {
371 printf("%s: parity error (ctrl %x @ %02x%04x)\n",
372 sc->sc_dev.dv_xname, iev->pectrl,
373 iev->pectrl & IEVME_HADDR, iev->peaddr);
374 iev->pectrl = iev->pectrl | IEVME_PARACK;
375 }
376 }
377
378 loop:
379 /* Ack interrupts FIRST in case we receive more during the ISR. */
380 ie_ack(sc, IE_ST_WHENCE & status);
381
382 if (status & (IE_ST_RECV | IE_ST_RNR)) {
383 #ifdef IEDEBUG
384 in_ierint++;
385 if (sc->sc_debug & IED_RINT)
386 printf("%s: rint\n", sc->sc_dev.dv_xname);
387 #endif
388 ierint(sc);
389 #ifdef IEDEBUG
390 in_ierint--;
391 #endif
392 }
393
394 if (status & IE_ST_DONE) {
395 #ifdef IEDEBUG
396 in_ietint++;
397 if (sc->sc_debug & IED_TINT)
398 printf("%s: tint\n", sc->sc_dev.dv_xname);
399 #endif
400 ietint(sc);
401 #ifdef IEDEBUG
402 in_ietint--;
403 #endif
404 }
405
406 if (status & IE_ST_RNR) {
407 printf("%s: receiver not ready\n", sc->sc_dev.dv_xname);
408 sc->sc_arpcom.ac_if.if_ierrors++;
409 iereset(sc);
410 }
411
412 #ifdef IEDEBUG
413 if ((status & IE_ST_ALLDONE) && (sc->sc_debug & IED_CNA))
414 printf("%s: cna\n", sc->sc_dev.dv_xname);
415 #endif
416
417 if ((status = sc->scb->ie_status) & IE_ST_WHENCE)
418 goto loop;
419
420 return 1;
421 }
422
423 /*
424 * Process a received-frame interrupt.
425 */
426 void
427 ierint(sc)
428 struct ie_softc *sc;
429 {
430 volatile struct ie_sys_ctl_block *scb = sc->scb;
431 int i, status;
432 static int timesthru = 1024;
433
434 i = sc->rfhead;
435 for (;;) {
436 status = sc->rframes[i]->ie_fd_status;
437
438 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
439 sc->sc_arpcom.ac_if.if_ipackets++;
440 if (!--timesthru) {
441 sc->sc_arpcom.ac_if.if_ierrors +=
442 SWAP(scb->ie_err_crc) +
443 SWAP(scb->ie_err_align) +
444 SWAP(scb->ie_err_resource) +
445 SWAP(scb->ie_err_overrun);
446 scb->ie_err_crc = 0;
447 scb->ie_err_align = 0;
448 scb->ie_err_resource = 0;
449 scb->ie_err_overrun = 0;
450 timesthru = 1024;
451 }
452 ie_readframe(sc, i);
453 } else {
454 if ((status & IE_FD_RNR) != 0 &&
455 (scb->ie_status & IE_RU_READY) == 0) {
456 sc->rframes[0]->ie_fd_buf_desc =
457 MK_16(sc->sc_maddr, sc->rbuffs[0]);
458 scb->ie_recv_list =
459 MK_16(sc->sc_maddr, sc->rframes[0]);
460 cmd_and_wait(sc, IE_RU_START, 0, 0);
461 }
462 break;
463 }
464 i = (i + 1) % sc->nframes;
465 }
466 }
467
468 /*
469 * Process a command-complete interrupt. These are only generated by
470 * the transmission of frames. This routine is deceptively simple, since
471 * most of the real work is done by iestart().
472 */
473 void
474 ietint(sc)
475 struct ie_softc *sc;
476 {
477 int status;
478
479 sc->sc_arpcom.ac_if.if_timer = 0;
480 sc->sc_arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
481
482 status = sc->xmit_cmds[sc->xctail]->ie_xmit_status;
483
484 if (!(status & IE_STAT_COMPL) || (status & IE_STAT_BUSY))
485 printf("ietint: command still busy!\n");
486
487 if (status & IE_STAT_OK) {
488 sc->sc_arpcom.ac_if.if_opackets++;
489 sc->sc_arpcom.ac_if.if_collisions +=
490 SWAP(status & IE_XS_MAXCOLL);
491 } else if (status & IE_STAT_ABORT) {
492 printf("%s: send aborted\n", sc->sc_dev.dv_xname);
493 sc->sc_arpcom.ac_if.if_oerrors++;
494 } else if (status & IE_XS_NOCARRIER) {
495 printf("%s: no carrier\n", sc->sc_dev.dv_xname);
496 sc->sc_arpcom.ac_if.if_oerrors++;
497 } else if (status & IE_XS_LOSTCTS) {
498 printf("%s: lost CTS\n", sc->sc_dev.dv_xname);
499 sc->sc_arpcom.ac_if.if_oerrors++;
500 } else if (status & IE_XS_UNDERRUN) {
501 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname);
502 sc->sc_arpcom.ac_if.if_oerrors++;
503 } else if (status & IE_XS_EXCMAX) {
504 printf("%s: too many collisions\n", sc->sc_dev.dv_xname);
505 sc->sc_arpcom.ac_if.if_collisions += 16;
506 sc->sc_arpcom.ac_if.if_oerrors++;
507 }
508
509 /*
510 * If multicast addresses were added or deleted while we
511 * were transmitting, mc_reset() set the want_mcsetup flag
512 * indicating that we should do it.
513 */
514 if (sc->want_mcsetup) {
515 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[sc->xctail]);
516 sc->want_mcsetup = 0;
517 }
518
519 /* Done with the buffer. */
520 sc->xmit_free++;
521 sc->xmit_busy = 0;
522 sc->xctail = (sc->xctail + 1) % NTXBUF;
523
524 iestart(&sc->sc_arpcom.ac_if);
525 }
526
527 /*
528 * Compare two Ether/802 addresses for equality, inlined and
529 * unrolled for speed. I'd love to have an inline assembler
530 * version of this... XXX: Who wanted that? mycroft?
531 * I wrote one, but the following is just as efficient.
532 * This expands to 10 short m68k instructions! -gwr
533 * Note: use this like bcmp()
534 */
535 static inline u_short
536 ether_cmp(one, two)
537 u_char *one, *two;
538 {
539 register u_short *a = (u_short *) one;
540 register u_short *b = (u_short *) two;
541 register u_short diff;
542
543 diff = *a++ - *b++;
544 diff |= *a++ - *b++;
545 diff |= *a++ - *b++;
546
547 return (diff);
548 }
549 #define ether_equal !ether_cmp
550
551 /*
552 * Check for a valid address. to_bpf is filled in with one of the following:
553 * 0 -> BPF doesn't get this packet
554 * 1 -> BPF does get this packet
555 * 2 -> BPF does get this packet, but we don't
556 * Return value is true if the packet is for us, and false otherwise.
557 *
558 * This routine is a mess, but it's also critical that it be as fast
559 * as possible. It could be made cleaner if we can assume that the
560 * only client which will fiddle with IFF_PROMISC is BPF. This is
561 * probably a good assumption, but we do not make it here. (Yet.)
562 */
563 static inline int
564 check_eh(sc, eh, to_bpf)
565 struct ie_softc *sc;
566 struct ether_header *eh;
567 int *to_bpf;
568 {
569 int i;
570
571 switch (sc->promisc) {
572 case IFF_ALLMULTI:
573 /*
574 * Receiving all multicasts, but no unicasts except those
575 * destined for us.
576 */
577 #if NBPFILTER > 0
578 /* BPF gets this packet if anybody cares */
579 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
580 #endif
581 if (eh->ether_dhost[0] & 1)
582 return 1;
583 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
584 return 1;
585 return 0;
586
587 case IFF_PROMISC:
588 /*
589 * Receiving all packets. These need to be passed on to BPF.
590 */
591 #if NBPFILTER > 0
592 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
593 #endif
594 /* If for us, accept and hand up to BPF */
595 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
596 return 1;
597
598 #if NBPFILTER > 0
599 if (*to_bpf)
600 *to_bpf = 2; /* we don't need to see it */
601 #endif
602
603 /*
604 * Not a multicast, so BPF wants to see it but we don't.
605 */
606 if (!(eh->ether_dhost[0] & 1))
607 return 1;
608
609 /*
610 * If it's one of our multicast groups, accept it and pass it
611 * up.
612 */
613 for (i = 0; i < sc->mcast_count; i++) {
614 if (ether_equal(eh->ether_dhost,
615 (u_char *)&sc->mcast_addrs[i])) {
616 #if NBPFILTER > 0
617 if (*to_bpf)
618 *to_bpf = 1;
619 #endif
620 return 1;
621 }
622 }
623 return 1;
624
625 case IFF_ALLMULTI | IFF_PROMISC:
626 /*
627 * Acting as a multicast router, and BPF running at the same
628 * time. Whew! (Hope this is a fast machine...)
629 */
630 #if NBPFILTER > 0
631 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
632 #endif
633 /* We want to see multicasts. */
634 if (eh->ether_dhost[0] & 1)
635 return 1;
636
637 /* We want to see our own packets */
638 if (ether_equal(eh->ether_dhost, sc->sc_arpcom.ac_enaddr))
639 return 1;
640
641 /* Anything else goes to BPF but nothing else. */
642 #if NBPFILTER > 0
643 if (*to_bpf)
644 *to_bpf = 2;
645 #endif
646 return 1;
647
648 default:
649 /*
650 * Only accept unicast packets destined for us, or multicasts
651 * for groups that we belong to. For now, we assume that the
652 * '586 will only return packets that we asked it for. This
653 * isn't strictly true (it uses hashing for the multicast filter),
654 * but it will do in this case, and we want to get out of here
655 * as quickly as possible.
656 */
657 #if NBPFILTER > 0
658 *to_bpf = (sc->sc_arpcom.ac_if.if_bpf != 0);
659 #endif
660 return 1;
661 }
662 return 0;
663 }
664
665 /*
666 * We want to isolate the bits that have meaning... This assumes that
667 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
668 * the size of the buffer, then we are screwed anyway.
669 */
670 static inline int
671 ie_buflen(sc, head)
672 struct ie_softc *sc;
673 int head;
674 {
675
676 return (SWAP(sc->rbuffs[head]->ie_rbd_actual)
677 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
678 }
679
680 static inline int
681 ie_packet_len(sc)
682 struct ie_softc *sc;
683 {
684 int i;
685 int head = sc->rbhead;
686 int acc = 0;
687
688 do {
689 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
690 #ifdef IEDEBUG
691 print_rbd(sc->rbuffs[sc->rbhead]);
692 #endif
693 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
694 sc->sc_dev.dv_xname, sc->rbhead);
695 iereset(sc);
696 return -1;
697 }
698
699 i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST;
700
701 acc += ie_buflen(sc, head);
702 head = (head + 1) % sc->nrxbuf;
703 } while (!i);
704
705 return acc;
706 }
707
708 /*
709 * Setup all necessary artifacts for an XMIT command, and then pass the XMIT
710 * command to the chip to be executed. On the way, if we have a BPF listener
711 * also give him a copy.
712 */
713 static void
714 iexmit(sc)
715 struct ie_softc *sc;
716 {
717
718 #if NBPFILTER > 0
719 /*
720 * If BPF is listening on this interface, let it see the packet before
721 * we push it on the wire.
722 */
723 if (sc->sc_arpcom.ac_if.if_bpf)
724 bpf_tap(sc->sc_arpcom.ac_if.if_bpf,
725 sc->xmit_cbuffs[sc->xctail],
726 SWAP(sc->xmit_buffs[sc->xctail]->ie_xmit_flags));
727 #endif
728
729 sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= IE_XMIT_LAST;
730 sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff);
731 ST_24(sc->xmit_buffs[sc->xctail]->ie_xmit_buf,
732 MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xctail]));
733
734 sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff);
735 sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd =
736 IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST;
737
738 sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0);
739 sc->xmit_cmds[sc->xctail]->ie_xmit_desc =
740 MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xctail]);
741
742 sc->scb->ie_command_list =
743 MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xctail]);
744 cmd_and_wait(sc, IE_CU_START, 0, 0);
745
746 sc->xmit_busy = 1;
747 sc->sc_arpcom.ac_if.if_timer = 5;
748 }
749
750 /*
751 * Read data off the interface, and turn it into an mbuf chain.
752 *
753 * This code is DRAMATICALLY different from the previous version; this
754 * version tries to allocate the entire mbuf chain up front, given the
755 * length of the data available. This enables us to allocate mbuf
756 * clusters in many situations where before we would have had a long
757 * chain of partially-full mbufs. This should help to speed up the
758 * operation considerably. (Provided that it works, of course.)
759 */
760 static inline int
761 ieget(sc, mp, ehp, to_bpf)
762 struct ie_softc *sc;
763 struct mbuf **mp;
764 struct ether_header *ehp;
765 int *to_bpf;
766 {
767 struct mbuf *m, *top, **mymp;
768 int i;
769 int offset;
770 int totlen, resid;
771 int thismboff;
772 int head;
773
774 totlen = ie_packet_len(sc);
775 if (totlen <= 0)
776 return -1;
777
778 i = sc->rbhead;
779
780 /*
781 * Snarf the Ethernet header.
782 */
783 (sc->sc_bcopy)((caddr_t)sc->cbuffs[i], (caddr_t)ehp, sizeof *ehp);
784
785 /*
786 * As quickly as possible, check if this packet is for us.
787 * If not, don't waste a single cycle copying the rest of the
788 * packet in.
789 * This is only a consideration when FILTER is defined; i.e., when
790 * we are either running BPF or doing multicasting.
791 */
792 if (!check_eh(sc, ehp, to_bpf)) {
793 ie_drop_packet_buffer(sc);
794 /* just this case, it's not an error */
795 sc->sc_arpcom.ac_if.if_ierrors--;
796 return -1;
797 }
798 totlen -= (offset = sizeof *ehp);
799
800 MGETHDR(*mp, M_DONTWAIT, MT_DATA);
801 if (!*mp) {
802 ie_drop_packet_buffer(sc);
803 return -1;
804 }
805
806 m = *mp;
807 m->m_pkthdr.rcvif = &sc->sc_arpcom.ac_if;
808 m->m_len = MHLEN;
809 resid = m->m_pkthdr.len = totlen;
810 top = 0;
811 mymp = ⊤
812
813 /*
814 * This loop goes through and allocates mbufs for all the data we will
815 * be copying in. It does not actually do the copying yet.
816 */
817 do { /* while (resid > 0) */
818 /*
819 * Try to allocate an mbuf to hold the data that we have. If
820 * we already allocated one, just get another one and stick it
821 * on the end (eventually). If we don't already have one, try
822 * to allocate an mbuf cluster big enough to hold the whole
823 * packet, if we think it's reasonable, or a single mbuf which
824 * may or may not be big enough. Got that?
825 */
826 if (top) {
827 MGET(m, M_DONTWAIT, MT_DATA);
828 if (!m) {
829 m_freem(top);
830 ie_drop_packet_buffer(sc);
831 return -1;
832 }
833 m->m_len = MLEN;
834 }
835
836 if (resid >= MINCLSIZE) {
837 MCLGET(m, M_DONTWAIT);
838 if (m->m_flags & M_EXT)
839 m->m_len = min(resid, MCLBYTES);
840 } else {
841 if (resid < m->m_len) {
842 if (!top && resid + max_linkhdr <= m->m_len)
843 m->m_data += max_linkhdr;
844 m->m_len = resid;
845 }
846 }
847 resid -= m->m_len;
848 *mymp = m;
849 mymp = &m->m_next;
850 } while (resid > 0);
851
852 resid = totlen;
853 m = top;
854 thismboff = 0;
855 head = sc->rbhead;
856
857 /*
858 * Now we take the mbuf chain (hopefully only one mbuf most of the
859 * time) and stuff the data into it. There are no possible failures
860 * at or after this point.
861 */
862 while (resid > 0) { /* while there's stuff left */
863 int thislen = ie_buflen(sc, head) - offset;
864
865 /*
866 * If too much data for the current mbuf, then fill the current one
867 * up, go to the next one, and try again.
868 */
869 if (thislen > m->m_len - thismboff) {
870 int newlen = m->m_len - thismboff;
871 (sc->sc_bcopy)((caddr_t)(sc->cbuffs[head] + offset),
872 mtod(m, caddr_t) + thismboff, (u_int)newlen);
873 m = m->m_next;
874 thismboff = 0; /* new mbuf, so no offset */
875 offset += newlen; /* we are now this far into
876 * the packet */
877 resid -= newlen; /* so there is this much left
878 * to get */
879 continue;
880 }
881
882 /*
883 * If there is more than enough space in the mbuf to hold the
884 * contents of this buffer, copy everything in, advance pointers,
885 * and so on.
886 */
887 if (thislen < m->m_len - thismboff) {
888 (sc->sc_bcopy)((caddr_t)(sc->cbuffs[head] + offset),
889 mtod(m, caddr_t) + thismboff, (u_int)thislen);
890 thismboff += thislen; /* we are this far into the
891 * mbuf */
892 resid -= thislen; /* and this much is left */
893 goto nextbuf;
894 }
895
896 /*
897 * Otherwise, there is exactly enough space to put this buffer's
898 * contents into the current mbuf. Do the combination of the above
899 * actions.
900 */
901 (sc->sc_bcopy)((caddr_t)(sc->cbuffs[head] + offset),
902 mtod(m, caddr_t) + thismboff, (u_int)thislen);
903 m = m->m_next;
904 thismboff = 0; /* new mbuf, start at the beginning */
905 resid -= thislen; /* and we are this far through */
906
907 /*
908 * Advance all the pointers. We can get here from either of the
909 * last two cases, but never the first.
910 */
911 nextbuf:
912 offset = 0;
913 sc->rbuffs[head]->ie_rbd_actual = SWAP(0);
914 sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST;
915 sc->rbhead = head = (head + 1) % sc->nrxbuf;
916 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
917 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
918 }
919
920 /*
921 * Unless something changed strangely while we were doing the copy,
922 * we have now copied everything in from the shared memory.
923 * This means that we are done.
924 */
925 return 0;
926 }
927
928 /*
929 * Read frame NUM from unit UNIT (pre-cached as IE).
930 *
931 * This routine reads the RFD at NUM, and copies in the buffers from
932 * the list of RBD, then rotates the RBD and RFD lists so that the receiver
933 * doesn't start complaining. Trailers are DROPPED---there's no point
934 * in wasting time on confusing code to deal with them. Hopefully,
935 * this machine will never ARP for trailers anyway.
936 */
937 static void
938 ie_readframe(sc, num)
939 struct ie_softc *sc;
940 int num; /* frame number to read */
941 {
942 int status;
943 struct mbuf *m = 0;
944 struct ether_header eh;
945 #if NBPFILTER > 0
946 int bpf_gets_it = 0;
947 #endif
948
949 status = sc->rframes[num]->ie_fd_status;
950
951 /* Immediately advance the RFD list, since we have copied ours now. */
952 sc->rframes[num]->ie_fd_status = SWAP(0);
953 sc->rframes[num]->ie_fd_last |= IE_FD_LAST;
954 sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST;
955 sc->rftail = (sc->rftail + 1) % sc->nframes;
956 sc->rfhead = (sc->rfhead + 1) % sc->nframes;
957
958 if (status & IE_FD_OK) {
959 #if NBPFILTER > 0
960 if (ieget(sc, &m, &eh, &bpf_gets_it)) {
961 #else
962 if (ieget(sc, &m, &eh, 0)) {
963 #endif
964 sc->sc_arpcom.ac_if.if_ierrors++;
965 return;
966 }
967 }
968
969 #ifdef IEDEBUG
970 if (sc->sc_debug & IED_READFRAME)
971 printf("%s: frame from ether %s type %x\n", sc->sc_dev.dv_xname,
972 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type);
973 #endif
974
975 if (!m)
976 return;
977
978 if (last_not_for_us) {
979 m_freem(last_not_for_us);
980 last_not_for_us = 0;
981 }
982
983 #if NBPFILTER > 0
984 /*
985 * Check for a BPF filter; if so, hand it up.
986 * Note that we have to stick an extra mbuf up front, because
987 * bpf_mtap expects to have the ether header at the front.
988 * It doesn't matter that this results in an ill-formatted mbuf chain,
989 * since BPF just looks at the data. (It doesn't try to free the mbuf,
990 * tho' it will make a copy for tcpdump.)
991 */
992 if (bpf_gets_it) {
993 struct mbuf m0;
994 m0.m_len = sizeof eh;
995 m0.m_data = (caddr_t)&eh;
996 m0.m_next = m;
997
998 /* Pass it up */
999 bpf_mtap(sc->sc_arpcom.ac_if.if_bpf, &m0);
1000 }
1001 /*
1002 * A signal passed up from the filtering code indicating that the
1003 * packet is intended for BPF but not for the protocol machinery.
1004 * We can save a few cycles by not handing it off to them.
1005 */
1006 if (bpf_gets_it == 2) {
1007 last_not_for_us = m;
1008 return;
1009 }
1010 #endif /* NBPFILTER > 0 */
1011
1012 /*
1013 * In here there used to be code to check destination addresses upon
1014 * receipt of a packet. We have deleted that code, and replaced it
1015 * with code to check the address much earlier in the cycle, before
1016 * copying the data in; this saves us valuable cycles when operating
1017 * as a multicast router or when using BPF.
1018 */
1019
1020 /*
1021 * Finally pass this packet up to higher layers.
1022 */
1023 ether_input(&sc->sc_arpcom.ac_if, &eh, m);
1024 }
1025
1026 static void
1027 ie_drop_packet_buffer(sc)
1028 struct ie_softc *sc;
1029 {
1030 int i;
1031
1032 do {
1033 /*
1034 * This means we are somehow out of sync. So, we reset the
1035 * adapter.
1036 */
1037 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
1038 #ifdef IEDEBUG
1039 print_rbd(sc->rbuffs[sc->rbhead]);
1040 #endif
1041 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
1042 sc->sc_dev.dv_xname, sc->rbhead);
1043 iereset(sc);
1044 return;
1045 }
1046
1047 i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST;
1048
1049 sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST;
1050 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0);
1051 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf;
1052 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
1053 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
1054 } while (!i);
1055 }
1056
1057 /*
1058 * Start transmission on an interface.
1059 */
1060 void
1061 iestart(ifp)
1062 struct ifnet *ifp;
1063 {
1064 struct ie_softc *sc = ifp->if_softc;
1065 struct mbuf *m0, *m;
1066 u_char *buffer;
1067 u_short len;
1068
1069 if ((ifp->if_flags & IFF_RUNNING) == 0)
1070 return;
1071
1072 if (sc->xmit_free == 0) {
1073 ifp->if_flags |= IFF_OACTIVE;
1074 if (!sc->xmit_busy)
1075 iexmit(sc);
1076 return;
1077 }
1078
1079 do {
1080 IF_DEQUEUE(&sc->sc_arpcom.ac_if.if_snd, m);
1081 if (!m)
1082 break;
1083
1084 len = 0;
1085 buffer = sc->xmit_cbuffs[sc->xchead];
1086
1087 for (m0 = m; m && (len + m->m_len) < IE_TBUF_SIZE; m = m->m_next) {
1088 (sc->sc_bcopy)(mtod(m, caddr_t), buffer, m->m_len);
1089 buffer += m->m_len;
1090 len += m->m_len;
1091 }
1092 if (m)
1093 printf("%s: tbuf overflow\n", sc->sc_dev.dv_xname);
1094
1095 m_freem(m0);
1096 len = max(len, ETHER_MIN_LEN);
1097 sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len);
1098
1099 sc->xmit_free--;
1100 sc->xchead = (sc->xchead + 1) % NTXBUF;
1101 } while (sc->xmit_free > 0);
1102
1103 /* If we stuffed any packets into the card's memory, send now. */
1104 if ((sc->xmit_free < NTXBUF) && (!sc->xmit_busy))
1105 iexmit(sc);
1106
1107 return;
1108 }
1109
1110 /*
1111 * set up IE's ram space
1112 */
1113 int
1114 ie_setupram(sc)
1115 struct ie_softc *sc;
1116 {
1117 volatile struct ie_sys_conf_ptr *scp;
1118 volatile struct ie_int_sys_conf_ptr *iscp;
1119 volatile struct ie_sys_ctl_block *scb;
1120 int s;
1121
1122 s = splnet();
1123
1124 scp = sc->scp;
1125 (sc->sc_bzero)((char *) scp, sizeof *scp);
1126
1127 iscp = sc->iscp;
1128 (sc->sc_bzero)((char *) iscp, sizeof *iscp);
1129
1130 scb = sc->scb;
1131 (sc->sc_bzero)((char *) scb, sizeof *scb);
1132
1133 scp->ie_bus_use = 0; /* 16-bit */
1134 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp));
1135
1136 iscp->ie_busy = 1; /* ie_busy == char */
1137 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb);
1138 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr));
1139
1140 (sc->reset_586) (sc);
1141 (sc->chan_attn) (sc);
1142
1143 delay(100); /* wait a while... */
1144
1145 if (iscp->ie_busy) {
1146 splx(s);
1147 return 0;
1148 }
1149 /*
1150 * Acknowledge any interrupts we may have caused...
1151 */
1152 ie_ack(sc, IE_ST_WHENCE);
1153 splx(s);
1154
1155 return 1;
1156 }
1157
1158 void
1159 iereset(sc)
1160 struct ie_softc *sc;
1161 {
1162 int s = splnet();
1163
1164 printf("%s: reset\n", sc->sc_dev.dv_xname);
1165
1166 /* Clear OACTIVE in case we're called from watchdog (frozen xmit). */
1167 sc->sc_arpcom.ac_if.if_flags &= ~(IFF_UP | IFF_OACTIVE);
1168 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0);
1169
1170 /*
1171 * Stop i82586 dead in its tracks.
1172 */
1173 if (cmd_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1174 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname);
1175
1176 if (cmd_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1177 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname);
1178
1179 #ifdef notdef
1180 if (!check_ie_present(sc, sc->sc_maddr, sc->sc_msize))
1181 panic("ie disappeared!\n");
1182 #endif
1183
1184 sc->sc_arpcom.ac_if.if_flags |= IFF_UP;
1185 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, 0);
1186
1187 splx(s);
1188 }
1189
1190 /*
1191 * This is called if we time out.
1192 */
1193 static void
1194 chan_attn_timeout(arg)
1195 void *arg;
1196 {
1197 *((int *)arg) = 1;
1198 }
1199
1200 /*
1201 * Send a command to the controller and wait for it to either
1202 * complete or be accepted, depending on the command. If the
1203 * command pointer is null, then pretend that the command is
1204 * not an action command. If the command pointer is not null,
1205 * and the command is an action command, wait for
1206 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1207 * to become true.
1208 */
1209 static int
1210 cmd_and_wait(sc, cmd, pcmd, mask)
1211 struct ie_softc *sc;
1212 int cmd;
1213 void * pcmd;
1214 int mask;
1215 {
1216 volatile struct ie_cmd_common *cc = pcmd;
1217 volatile struct ie_sys_ctl_block *scb = sc->scb;
1218 volatile int timedout = 0;
1219 extern int hz;
1220
1221 scb->ie_command = (u_short)cmd;
1222
1223 if (IE_ACTION_COMMAND(cmd) && pcmd) {
1224 (sc->chan_attn)(sc);
1225
1226 /*
1227 * XXX
1228 * I don't think this timeout works on suns.
1229 * we are at splnet() in the loop, and the timeout
1230 * stuff runs at software spl (so it is masked off?).
1231 */
1232
1233 /*
1234 * According to the packet driver, the minimum timeout should be
1235 * .369 seconds, which we round up to .4.
1236 */
1237
1238 timeout(chan_attn_timeout, (void *)&timedout, 2 * hz / 5);
1239
1240 /*
1241 * Now spin-lock waiting for status. This is not a very nice
1242 * thing to do, but I haven't figured out how, or indeed if, we
1243 * can put the process waiting for action to sleep. (We may
1244 * be getting called through some other timeout running in the
1245 * kernel.)
1246 */
1247 for (;;)
1248 if ((cc->ie_cmd_status & mask) || timedout)
1249 break;
1250
1251 untimeout(chan_attn_timeout, (void *)&timedout);
1252
1253 return timedout;
1254 } else {
1255 /*
1256 * Otherwise, just wait for the command to be accepted.
1257 */
1258 (sc->chan_attn)(sc);
1259
1260 while (scb->ie_command)
1261 ; /* spin lock */
1262
1263 return 0;
1264 }
1265 }
1266
1267 /*
1268 * Run the time-domain reflectometer...
1269 */
1270 static void
1271 run_tdr(sc, cmd)
1272 struct ie_softc *sc;
1273 struct ie_tdr_cmd *cmd;
1274 {
1275 int result;
1276
1277 cmd->com.ie_cmd_status = SWAP(0);
1278 cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST;
1279 cmd->com.ie_cmd_link = SWAP(0xffff);
1280
1281 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1282 cmd->ie_tdr_time = SWAP(0);
1283
1284 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1285 !(cmd->com.ie_cmd_status & IE_STAT_OK))
1286 result = 0x10000; /* XXX */
1287 else
1288 result = cmd->ie_tdr_time;
1289
1290 ie_ack(sc, IE_ST_WHENCE);
1291
1292 if (result & IE_TDR_SUCCESS)
1293 return;
1294
1295 if (result & 0x10000) {
1296 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname);
1297 } else if (result & IE_TDR_XCVR) {
1298 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname);
1299 } else if (result & IE_TDR_OPEN) {
1300 printf("%s: TDR detected an open %d clocks away\n",
1301 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME));
1302 } else if (result & IE_TDR_SHORT) {
1303 printf("%s: TDR detected a short %d clocks away\n",
1304 sc->sc_dev.dv_xname, SWAP(result & IE_TDR_TIME));
1305 } else {
1306 printf("%s: TDR returned unknown status %x\n",
1307 sc->sc_dev.dv_xname, result);
1308 }
1309 }
1310
1311 /*
1312 * setup_bufs: set up the buffers
1313 *
1314 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz.
1315 * this is to be used for the buffers. the chip indexs its control data
1316 * structures with 16 bit offsets, and it indexes actual buffers with
1317 * 24 bit addresses. so we should allocate control buffers first so that
1318 * we don't overflow the 16 bit offset field. The number of transmit
1319 * buffers is fixed at compile time.
1320 *
1321 * note: this function was written to be easy to understand, rather than
1322 * highly efficient (it isn't in the critical path).
1323 */
1324 static void
1325 setup_bufs(sc)
1326 struct ie_softc *sc;
1327 {
1328 caddr_t ptr = sc->buf_area; /* memory pool */
1329 u_int n, r;
1330
1331 /*
1332 * step 0: zero memory and figure out how many recv buffers and
1333 * frames we can have. XXX CURRENTLY HARDWIRED AT MAX
1334 */
1335 (sc->sc_bzero)(ptr, sc->buf_area_sz);
1336 ptr = Align(ptr); /* set alignment and stick with it */
1337
1338 n = ALIGN(sizeof(struct ie_xmit_cmd)) +
1339 ALIGN(sizeof(struct ie_xmit_buf)) +
1340 IE_TBUF_SIZE;
1341 n *= NTXBUF; /* n = total size of xmit area */
1342
1343 n = sc->buf_area_sz - n;/* n = free space for recv stuff */
1344
1345 r = ALIGN(sizeof(struct ie_recv_frame_desc)) +
1346 ((ALIGN(sizeof(struct ie_recv_buf_desc)) +
1347 IE_RBUF_SIZE) * B_PER_F);
1348
1349 /* r = size of one R frame */
1350
1351 sc->nframes = n / r;
1352 if (sc->nframes <= 0)
1353 panic("ie: bogus buffer calc\n");
1354 if (sc->nframes > MXFRAMES)
1355 sc->nframes = MXFRAMES;
1356
1357 sc->nrxbuf = sc->nframes * B_PER_F;
1358
1359 #ifdef IEDEBUG
1360 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf);
1361 #endif
1362
1363 /*
1364 * step 1a: lay out and zero frame data structures for transmit and recv
1365 */
1366 for (n = 0; n < NTXBUF; n++) {
1367 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr;
1368 ptr = Align(ptr + sizeof(struct ie_xmit_cmd));
1369 }
1370
1371 for (n = 0; n < sc->nframes; n++) {
1372 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr;
1373 ptr = Align(ptr + sizeof(struct ie_recv_frame_desc));
1374 }
1375
1376 /*
1377 * step 1b: link together the recv frames and set EOL on last one
1378 */
1379 for (n = 0; n < sc->nframes; n++) {
1380 sc->rframes[n]->ie_fd_next =
1381 MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]);
1382 }
1383 sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST;
1384
1385 /*
1386 * step 2a: lay out and zero frame buffer structures for xmit and recv
1387 */
1388 for (n = 0; n < NTXBUF; n++) {
1389 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr;
1390 ptr = Align(ptr + sizeof(struct ie_xmit_buf));
1391 }
1392
1393 for (n = 0; n < sc->nrxbuf; n++) {
1394 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr;
1395 ptr = Align(ptr + sizeof(struct ie_recv_buf_desc));
1396 }
1397
1398 /*
1399 * step 2b: link together recv bufs and set EOL on last one
1400 */
1401 for (n = 0; n < sc->nrxbuf; n++) {
1402 sc->rbuffs[n]->ie_rbd_next =
1403 MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]);
1404 }
1405 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= IE_RBD_LAST;
1406
1407 /*
1408 * step 3: allocate the actual data buffers for xmit and recv
1409 * recv buffer gets linked into recv_buf_desc list here
1410 */
1411 for (n = 0; n < NTXBUF; n++) {
1412 sc->xmit_cbuffs[n] = (u_char *) ptr;
1413 ptr = Align(ptr + IE_TBUF_SIZE);
1414 }
1415
1416 /* Pointers to last packet sent and next available transmit buffer. */
1417 sc->xchead = sc->xctail = 0;
1418
1419 /* Clear transmit-busy flag and set number of free transmit buffers. */
1420 sc->xmit_busy = 0;
1421 sc->xmit_free = NTXBUF;
1422
1423 for (n = 0; n < sc->nrxbuf; n++) {
1424 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */
1425 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE);
1426 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr));
1427 ptr = Align(ptr + IE_RBUF_SIZE);
1428 }
1429
1430 /*
1431 * step 4: set the head and tail pointers on receive to keep track of
1432 * the order in which RFDs and RBDs are used. link in recv frames
1433 * and buffer into the scb.
1434 */
1435
1436 sc->rfhead = 0;
1437 sc->rftail = sc->nframes - 1;
1438 sc->rbhead = 0;
1439 sc->rbtail = sc->nrxbuf - 1;
1440
1441 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1442 sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]);
1443
1444 #ifdef IEDEBUG
1445 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area);
1446 #endif
1447 }
1448
1449 /*
1450 * Run the multicast setup command.
1451 * Called at splnet().
1452 */
1453 static int
1454 mc_setup(sc, ptr)
1455 struct ie_softc *sc;
1456 void *ptr;
1457 {
1458 struct ie_mcast_cmd *cmd = ptr;
1459
1460 cmd->com.ie_cmd_status = SWAP(0);
1461 cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST;
1462 cmd->com.ie_cmd_link = SWAP(0xffff);
1463
1464 (sc->sc_bcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs,
1465 sc->mcast_count * sizeof *sc->mcast_addrs);
1466
1467 cmd->ie_mcast_bytes =
1468 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */
1469
1470 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1471 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1472 !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1473 printf("%s: multicast address setup command failed\n",
1474 sc->sc_dev.dv_xname);
1475 return 0;
1476 }
1477 return 1;
1478 }
1479
1480 /*
1481 * This routine inits the ie.
1482 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands,
1483 * starting the receiver unit, and clearing interrupts.
1484 *
1485 * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER.
1486 */
1487 int
1488 ieinit(sc)
1489 struct ie_softc *sc;
1490 {
1491 volatile struct ie_sys_ctl_block *scb = sc->scb;
1492 void *ptr;
1493
1494 ptr = sc->buf_area;
1495
1496 /*
1497 * Send the configure command first.
1498 */
1499 {
1500 struct ie_config_cmd *cmd = ptr;
1501
1502 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1503 cmd->com.ie_cmd_status = SWAP(0);
1504 cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST;
1505 cmd->com.ie_cmd_link = SWAP(0xffff);
1506
1507 ie_setup_config(cmd, sc->promisc, 0);
1508
1509 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1510 !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1511 printf("%s: configure command failed\n",
1512 sc->sc_dev.dv_xname);
1513 return 0;
1514 }
1515 }
1516
1517 /*
1518 * Now send the Individual Address Setup command.
1519 */
1520 {
1521 struct ie_iasetup_cmd *cmd = ptr;
1522
1523 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1524 cmd->com.ie_cmd_status = SWAP(0);
1525 cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST;
1526 cmd->com.ie_cmd_link = SWAP(0xffff);
1527
1528 (sc->sc_bcopy)(sc->sc_arpcom.ac_enaddr,
1529 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address);
1530
1531 if (cmd_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1532 !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1533 printf("%s: individual address setup command failed\n",
1534 sc->sc_dev.dv_xname);
1535 return 0;
1536 }
1537 }
1538
1539 /*
1540 * Now run the time-domain reflectometer.
1541 */
1542 run_tdr(sc, ptr);
1543
1544 /*
1545 * Acknowledge any interrupts we have generated thus far.
1546 */
1547 ie_ack(sc, IE_ST_WHENCE);
1548
1549 /*
1550 * Set up the transmit and recv buffers.
1551 */
1552 setup_bufs(sc);
1553
1554 /* tell higher levels that we are here */
1555 sc->sc_arpcom.ac_if.if_flags |= IFF_RUNNING;
1556
1557 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1558 cmd_and_wait(sc, IE_RU_START, 0, 0);
1559
1560 ie_ack(sc, IE_ST_WHENCE);
1561
1562 if (sc->run_586)
1563 (sc->run_586)(sc);
1564
1565 return 0;
1566 }
1567
1568 static void
1569 iestop(sc)
1570 struct ie_softc *sc;
1571 {
1572
1573 cmd_and_wait(sc, IE_RU_DISABLE, 0, 0);
1574 }
1575
1576 int
1577 ieioctl(ifp, cmd, data)
1578 register struct ifnet *ifp;
1579 u_long cmd;
1580 caddr_t data;
1581 {
1582 struct ie_softc *sc = ifp->if_softc;
1583 struct ifaddr *ifa = (struct ifaddr *) data;
1584 struct ifreq *ifr = (struct ifreq *) data;
1585 int s, error = 0;
1586
1587 s = splnet();
1588
1589 switch (cmd) {
1590
1591 case SIOCSIFADDR:
1592 ifp->if_flags |= IFF_UP;
1593
1594 switch (ifa->ifa_addr->sa_family) {
1595 #ifdef INET
1596 case AF_INET:
1597 ieinit(sc);
1598 arp_ifinit(&sc->sc_arpcom, ifa);
1599 break;
1600 #endif
1601 #ifdef NS
1602 /* XXX - This code is probably wrong. */
1603 case AF_NS:
1604 {
1605 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1606
1607 if (ns_nullhost(*ina))
1608 ina->x_host =
1609 *(union ns_host *)(sc->sc_arpcom.ac_enaddr);
1610 else
1611 bcopy(ina->x_host.c_host,
1612 sc->sc_arpcom.ac_enaddr,
1613 sizeof(sc->sc_arpcom.ac_enaddr));
1614 /* Set new address. */
1615 ieinit(sc);
1616 break;
1617 }
1618 #endif /* NS */
1619 default:
1620 ieinit(sc);
1621 break;
1622 }
1623 break;
1624
1625 case SIOCSIFFLAGS:
1626 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1627
1628 if ((ifp->if_flags & IFF_UP) == 0 &&
1629 (ifp->if_flags & IFF_RUNNING) != 0) {
1630 /*
1631 * If interface is marked down and it is running, then
1632 * stop it.
1633 */
1634 iestop(sc);
1635 ifp->if_flags &= ~IFF_RUNNING;
1636 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1637 (ifp->if_flags & IFF_RUNNING) == 0) {
1638 /*
1639 * If interface is marked up and it is stopped, then
1640 * start it.
1641 */
1642 ieinit(sc);
1643 } else {
1644 /*
1645 * Reset the interface to pick up changes in any other
1646 * flags that affect hardware registers.
1647 */
1648 iestop(sc);
1649 ieinit(sc);
1650 }
1651 #ifdef IEDEBUG
1652 if (ifp->if_flags & IFF_DEBUG)
1653 sc->sc_debug = IED_ALL;
1654 else
1655 sc->sc_debug = 0;
1656 #endif
1657 break;
1658
1659 case SIOCADDMULTI:
1660 case SIOCDELMULTI:
1661 error = (cmd == SIOCADDMULTI) ?
1662 ether_addmulti(ifr, &sc->sc_arpcom) :
1663 ether_delmulti(ifr, &sc->sc_arpcom);
1664
1665 if (error == ENETRESET) {
1666 /*
1667 * Multicast list has changed; set the hardware filter
1668 * accordingly.
1669 */
1670 mc_reset(sc);
1671 error = 0;
1672 }
1673 break;
1674
1675 default:
1676 error = EINVAL;
1677 }
1678 splx(s);
1679 return error;
1680 }
1681
1682 static void
1683 mc_reset(sc)
1684 struct ie_softc *sc;
1685 {
1686 struct ether_multi *enm;
1687 struct ether_multistep step;
1688
1689 /*
1690 * Step through the list of addresses.
1691 */
1692 sc->mcast_count = 0;
1693 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm);
1694 while (enm) {
1695 if (sc->mcast_count >= MAXMCAST ||
1696 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
1697 sc->sc_arpcom.ac_if.if_flags |= IFF_ALLMULTI;
1698 ieioctl(&sc->sc_arpcom.ac_if, SIOCSIFFLAGS, (void *)0);
1699 goto setflag;
1700 }
1701 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6);
1702 sc->mcast_count++;
1703 ETHER_NEXT_MULTI(step, enm);
1704 }
1705 setflag:
1706 sc->want_mcsetup = 1;
1707 }
1708
1709 #ifdef IEDEBUG
1710 void
1711 print_rbd(rbd)
1712 volatile struct ie_recv_buf_desc *rbd;
1713 {
1714
1715 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n"
1716 "length %04x, mbz %04x\n", (u_long)rbd, rbd->ie_rbd_actual,
1717 rbd->ie_rbd_next, rbd->ie_rbd_buffer, rbd->ie_rbd_length,
1718 rbd->mbz);
1719 }
1720 #endif
1721