i82586.c revision 1.3 1 /* $NetBSD: i82586.c,v 1.3 1997/07/28 22:26:13 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1997 Paul Kranenburg.
5 * Copyright (c) 1993, 1994, 1995 Charles Hannum.
6 * Copyright (c) 1992, 1993, University of Vermont and State
7 * Agricultural College.
8 * Copyright (c) 1992, 1993, Garrett A. Wollman.
9 *
10 * Portions:
11 * Copyright (c) 1994, 1995, Rafal K. Boni
12 * Copyright (c) 1990, 1991, William F. Jolitz
13 * Copyright (c) 1990, The Regents of the University of California
14 *
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by Charles Hannum, by the
28 * University of Vermont and State Agricultural College and Garrett A.
29 * Wollman, by William F. Jolitz, and by the University of California,
30 * Berkeley, Lawrence Berkeley Laboratory, and its contributors.
31 * 4. Neither the names of the Universities nor the names of the authors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 */
47
48 /*
49 * Intel 82586 Ethernet chip
50 * Register, bit, and structure definitions.
51 *
52 * Original StarLAN driver written by Garrett Wollman with reference to the
53 * Clarkson Packet Driver code for this chip written by Russ Nelson and others.
54 *
55 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump.
56 *
57 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni.
58 *
59 * Majorly cleaned up and 3C507 code merged by Charles Hannum.
60 *
61 * Converted to SUN ie driver by Charles D. Cranor,
62 * October 1994, January 1995.
63 * This sun version based on i386 version 1.30.
64 */
65
66 /*
67 * The i82586 is a very painful chip, found in sun3's, sun-4/100's
68 * sun-4/200's, and VME based suns. The byte order is all wrong for a
69 * SUN, making life difficult. Programming this chip is mostly the same,
70 * but certain details differ from system to system. This driver is
71 * written so that different "ie" interfaces can be controled by the same
72 * driver.
73 */
74
75 /*
76 Mode of operation:
77
78 We run the 82586 in a standard Ethernet mode. We keep NFRAMES
79 received frame descriptors around for the receiver to use, and
80 NRXBUF associated receive buffer descriptors, both in a circular
81 list. Whenever a frame is received, we rotate both lists as
82 necessary. (The 586 treats both lists as a simple queue.) We also
83 keep a transmit command around so that packets can be sent off
84 quickly.
85
86 We configure the adapter in AL-LOC = 1 mode, which means that the
87 Ethernet/802.3 MAC header is placed at the beginning of the receive
88 buffer rather than being split off into various fields in the RFD.
89 This also means that we must include this header in the transmit
90 buffer as well.
91
92 By convention, all transmit commands, and only transmit commands,
93 shall have the I (IE_CMD_INTR) bit set in the command. This way,
94 when an interrupt arrives at ieintr(), it is immediately possible
95 to tell what precisely caused it. ANY OTHER command-sending
96 routines should run at splnet(), and should post an acknowledgement
97 to every interrupt they generate.
98
99 */
100
101 #include "bpfilter.h"
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/buf.h>
107 #include <sys/protosw.h>
108 #include <sys/socket.h>
109 #include <sys/ioctl.h>
110 #include <sys/errno.h>
111 #include <sys/syslog.h>
112 #include <sys/device.h>
113
114 #include <net/if.h>
115 #include <net/if_types.h>
116 #include <net/if_dl.h>
117 #include <net/if_ether.h>
118
119 #if NBPFILTER > 0
120 #include <net/bpf.h>
121 #include <net/bpfdesc.h>
122 #endif
123
124 #ifdef INET
125 #include <netinet/in.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/in_var.h>
128 #include <netinet/ip.h>
129 #include <netinet/if_inarp.h>
130 #endif
131
132 #ifdef NS
133 #include <netns/ns.h>
134 #include <netns/ns_if.h>
135 #endif
136
137 #include <dev/ic/i82586reg.h>
138 #include <dev/ic/i82586var.h>
139
140 void iewatchdog __P((struct ifnet *));
141 int ieinit __P((struct ie_softc *));
142 int ieioctl __P((struct ifnet *, u_long, caddr_t));
143 void iestart __P((struct ifnet *));
144 void iereset __P((struct ie_softc *));
145 static void ie_readframe __P((struct ie_softc *, int));
146 static void ie_drop_packet_buffer __P((struct ie_softc *));
147 int ie_setupram __P((struct ie_softc *));
148 static int command_and_wait __P((struct ie_softc *, int,
149 void volatile *, int));
150 /*static*/ void ierint __P((struct ie_softc *));
151 /*static*/ void ietint __P((struct ie_softc *));
152 static struct mbuf *ieget __P((struct ie_softc *,
153 struct ether_header *, int *));
154 static void setup_bufs __P((struct ie_softc *));
155 static int mc_setup __P((struct ie_softc *, void *));
156 static void mc_reset __P((struct ie_softc *));
157 static __inline int ether_equal __P((u_char *, u_char *));
158 static __inline void ie_ack __P((struct ie_softc *, u_int));
159 static __inline void ie_setup_config __P((volatile struct ie_config_cmd *,
160 int, int));
161 static __inline int check_eh __P((struct ie_softc *, struct ether_header *,
162 int *));
163 static __inline int ie_buflen __P((struct ie_softc *, int));
164 static __inline int ie_packet_len __P((struct ie_softc *));
165 static __inline void iexmit __P((struct ie_softc *));
166
167 static void run_tdr __P((struct ie_softc *, struct ie_tdr_cmd *));
168 static void iestop __P((struct ie_softc *));
169
170 #ifdef IEDEBUG
171 void print_rbd __P((volatile struct ie_recv_buf_desc *));
172
173 int in_ierint = 0;
174 int in_ietint = 0;
175 #endif
176
177 struct cfdriver ie_cd = {
178 NULL, "ie", DV_IFNET
179 };
180
181 /*
182 * Address generation macros:
183 * MK_24 = KVA -> 24 bit address in native byte order
184 * MK_16 = KVA -> 16 bit address in INTEL byte order
185 * ST_24 = store a 24 bit address in native byte order to INTEL byte order
186 */
187 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base))
188
189 #if BYTE_ORDER == BIG_ENDIAN
190 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) )
191 #define SWAP(x) ({u_short _z=(x); (u_short)XSWAP(_z);})
192
193 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) ))
194 #define ST_24(to, from) { \
195 u_long fval = (u_long)(from); \
196 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \
197 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0] ;*/ \
198 }
199 #else
200 #define SWAP(x) x
201 #define MK_16(base, ptr) ((u_short)(u_long)MK_24(base, ptr))
202 #define ST_24(to, from) {to = (from);}
203 #endif
204
205 /*
206 * Here are a few useful functions. We could have done these as macros, but
207 * since we have the inline facility, it makes sense to use that instead.
208 */
209 static __inline void
210 ie_setup_config(cmd, promiscuous, manchester)
211 volatile struct ie_config_cmd *cmd;
212 int promiscuous, manchester;
213 {
214
215 cmd->ie_config_count = 0x0c;
216 cmd->ie_fifo = 8;
217 cmd->ie_save_bad = 0x40;
218 cmd->ie_addr_len = 0x2e;
219 cmd->ie_priority = 0;
220 cmd->ie_ifs = 0x60;
221 cmd->ie_slot_low = 0;
222 cmd->ie_slot_high = 0xf2;
223 cmd->ie_promisc = !!promiscuous | manchester << 2;
224 cmd->ie_crs_cdt = 0;
225 cmd->ie_min_len = 64;
226 cmd->ie_junk = 0xff;
227 }
228
229 static __inline void
230 ie_ack(sc, mask)
231 struct ie_softc *sc;
232 u_int mask; /* in native byte-order */
233 {
234 volatile struct ie_sys_ctl_block *scb = sc->scb;
235
236 command_and_wait(sc, SWAP(scb->ie_status) & mask, 0, 0);
237 }
238
239
240 /*
241 * Taken almost exactly from Bill's if_is.c, then modified beyond recognition.
242 */
243 void
244 ie_attach(sc, name, etheraddr)
245 struct ie_softc *sc;
246 char *name;
247 u_int8_t *etheraddr;
248 {
249 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
250
251 if (ie_setupram(sc) == 0) { /* XXX - ISA version? */
252 printf(": RAM CONFIG FAILED!\n");
253 /* XXX should reclaim resources? */
254 return;
255 }
256
257 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
258 ifp->if_softc = sc;
259 ifp->if_start = iestart;
260 ifp->if_ioctl = ieioctl;
261 ifp->if_watchdog = iewatchdog;
262 ifp->if_flags =
263 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
264
265 /* Attach the interface. */
266 if_attach(ifp);
267 ether_ifattach(ifp, etheraddr);
268
269 printf(" address %s, type %s\n", ether_sprintf(etheraddr), name);
270
271 #if NBPFILTER > 0
272 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
273 #endif
274 }
275
276
277 /*
278 * Device timeout/watchdog routine. Entered if the device neglects to generate
279 * an interrupt after a transmit has been started on it.
280 */
281 void
282 iewatchdog(ifp)
283 struct ifnet *ifp;
284 {
285 struct ie_softc *sc = ifp->if_softc;
286
287 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
288 ++ifp->if_oerrors;
289
290 iereset(sc);
291 }
292
293 /*
294 * What to do upon receipt of an interrupt.
295 */
296 int
297 ieintr(v)
298 void *v;
299 {
300 struct ie_softc *sc = v;
301 register u_short status;
302
303 status = SWAP(sc->scb->ie_status);
304
305 /*
306 * Implementation dependent interrupt handling.
307 */
308 if (sc->intrhook)
309 (*sc->intrhook)(sc);
310
311 loop:
312 /* Ack interrupts FIRST in case we receive more during the ISR. */
313 ie_ack(sc, IE_ST_WHENCE & status);
314
315 if (status & (IE_ST_FR | IE_ST_RNR)) {
316 #ifdef IEDEBUG
317 in_ierint++;
318 if (sc->sc_debug & IED_RINT)
319 printf("%s: rint\n", sc->sc_dev.dv_xname);
320 #endif
321 ierint(sc);
322 #ifdef IEDEBUG
323 in_ierint--;
324 #endif
325 }
326
327 if (status & IE_ST_CX) {
328 #ifdef IEDEBUG
329 in_ietint++;
330 if (sc->sc_debug & IED_TINT)
331 printf("%s: tint\n", sc->sc_dev.dv_xname);
332 #endif
333 ietint(sc);
334 #ifdef IEDEBUG
335 in_ietint--;
336 #endif
337 }
338
339 if (status & IE_ST_RNR) {
340 printf("%s: receiver not ready\n", sc->sc_dev.dv_xname);
341 sc->sc_ethercom.ec_if.if_ierrors++;
342 iereset(sc);
343 return (1);
344 }
345
346 #ifdef IEDEBUG
347 if ((status & IE_ST_CNA) && (sc->sc_debug & IED_CNA))
348 printf("%s: cna\n", sc->sc_dev.dv_xname);
349 #endif
350
351 status = SWAP(sc->scb->ie_status);
352 if (status & IE_ST_WHENCE)
353 goto loop;
354
355 return (1);
356 }
357
358 /*
359 * Process a received-frame interrupt.
360 */
361 void
362 ierint(sc)
363 struct ie_softc *sc;
364 {
365 volatile struct ie_sys_ctl_block *scb = sc->scb;
366 int i, status;
367 static int timesthru = 1024;
368
369 i = sc->rfhead;
370 for (;;) {
371 status = SWAP(sc->rframes[i]->ie_fd_status);
372
373 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
374 if (--timesthru == 0) {
375 sc->sc_ethercom.ec_if.if_ierrors +=
376 SWAP(scb->ie_err_crc) +
377 SWAP(scb->ie_err_align) +
378 SWAP(scb->ie_err_resource) +
379 SWAP(scb->ie_err_overrun);
380 scb->ie_err_crc = scb->ie_err_align =
381 scb->ie_err_resource = scb->ie_err_overrun =
382 SWAP(0);
383 timesthru = 1024;
384 }
385 ie_readframe(sc, i);
386 } else {
387 if ((status & IE_FD_RNR) != 0 &&
388 (SWAP(scb->ie_status) & IE_RU_READY) == 0) {
389 sc->rframes[0]->ie_fd_buf_desc =
390 MK_16(sc->sc_maddr, sc->rbuffs[0]);
391 scb->ie_recv_list =
392 MK_16(sc->sc_maddr, sc->rframes[0]);
393 command_and_wait(sc, IE_RU_START, 0, 0);
394 }
395 break;
396 }
397 i = (i + 1) % sc->nframes;
398 }
399 }
400
401 /*
402 * Process a command-complete interrupt. These are only generated by the
403 * transmission of frames. This routine is deceptively simple, since most of
404 * the real work is done by iestart().
405 */
406 void
407 ietint(sc)
408 struct ie_softc *sc;
409 {
410 int status;
411
412 sc->sc_ethercom.ec_if.if_timer = 0;
413 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
414
415 status = SWAP(sc->xmit_cmds[sc->xctail]->ie_xmit_status);
416
417 if ((status & IE_STAT_COMPL) == 0 || (status & IE_STAT_BUSY))
418 printf("ietint: command still busy!\n");
419
420 if (status & IE_STAT_OK) {
421 sc->sc_ethercom.ec_if.if_opackets++;
422 sc->sc_ethercom.ec_if.if_collisions += (status & IE_XS_MAXCOLL);
423 } else if (status & IE_STAT_ABORT) {
424 printf("%s: send aborted\n", sc->sc_dev.dv_xname);
425 sc->sc_ethercom.ec_if.if_oerrors++;
426 } else if (status & IE_XS_NOCARRIER) {
427 printf("%s: no carrier\n", sc->sc_dev.dv_xname);
428 sc->sc_ethercom.ec_if.if_oerrors++;
429 } else if (status & IE_XS_LOSTCTS) {
430 printf("%s: lost CTS\n", sc->sc_dev.dv_xname);
431 sc->sc_ethercom.ec_if.if_oerrors++;
432 } else if (status & IE_XS_UNDERRUN) {
433 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname);
434 sc->sc_ethercom.ec_if.if_oerrors++;
435 } else if (status & IE_XS_EXCMAX) {
436 printf("%s: too many collisions\n", sc->sc_dev.dv_xname);
437 sc->sc_ethercom.ec_if.if_collisions += 16;
438 sc->sc_ethercom.ec_if.if_oerrors++;
439 }
440
441 /*
442 * If multicast addresses were added or deleted while transmitting,
443 * mc_reset() set the want_mcsetup flag indicating that we should do
444 * it.
445 */
446 if (sc->want_mcsetup) {
447 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[sc->xctail]);
448 sc->want_mcsetup = 0;
449 }
450
451 /* Done with the buffer. */
452 sc->xmit_busy--;
453 sc->xctail = (sc->xctail + 1) % NTXBUF;
454
455 /* Start the next packet, if any, transmitting. */
456 if (sc->xmit_busy > 0)
457 iexmit(sc);
458
459 iestart(&sc->sc_ethercom.ec_if);
460 }
461
462 /*
463 * Compare two Ether/802 addresses for equality, inlined and unrolled for
464 * speed.
465 */
466 static __inline int
467 ether_equal(one, two)
468 u_char *one, *two;
469 {
470
471 if (one[5] != two[5] || one[4] != two[4] || one[3] != two[3] ||
472 one[2] != two[2] || one[1] != two[1] || one[0] != two[0])
473 return 0;
474 return 1;
475 }
476
477 /*
478 * Check for a valid address. to_bpf is filled in with one of the following:
479 * 0 -> BPF doesn't get this packet
480 * 1 -> BPF does get this packet
481 * 2 -> BPF does get this packet, but we don't
482 * Return value is true if the packet is for us, and false otherwise.
483 *
484 * This routine is a mess, but it's also critical that it be as fast
485 * as possible. It could be made cleaner if we can assume that the
486 * only client which will fiddle with IFF_PROMISC is BPF. This is
487 * probably a good assumption, but we do not make it here. (Yet.)
488 */
489 static __inline int
490 check_eh(sc, eh, to_bpf)
491 struct ie_softc *sc;
492 struct ether_header *eh;
493 int *to_bpf;
494 {
495 struct ifnet *ifp;
496 int i;
497
498 ifp = &sc->sc_ethercom.ec_if;
499
500 switch(sc->promisc) {
501 case IFF_ALLMULTI:
502 /*
503 * Receiving all multicasts, but no unicasts except those
504 * destined for us.
505 */
506 #if NBPFILTER > 0
507 /* BPF gets this packet if anybody cares */
508 *to_bpf = (ifp->if_bpf != 0);
509 #endif
510 if (eh->ether_dhost[0] & 1)
511 return 1;
512 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
513 return 1;
514 return 0;
515
516 case IFF_PROMISC:
517 /*
518 * Receiving all packets. These need to be passed on to BPF.
519 */
520 #if NBPFILTER > 0
521 *to_bpf = (ifp->if_bpf != 0);
522 #endif
523 /* If for us, accept and hand up to BPF */
524 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
525 return 1;
526
527 #if NBPFILTER > 0
528 if (*to_bpf)
529 *to_bpf = 2; /* we don't need to see it */
530 #endif
531
532 /*
533 * Not a multicast, so BPF wants to see it but we don't.
534 */
535 if ((eh->ether_dhost[0] & 1) == 0)
536 return 1;
537
538 /*
539 * If it's one of our multicast groups, accept it and pass it
540 * up.
541 */
542 for (i = 0; i < sc->mcast_count; i++) {
543 if (ether_equal(eh->ether_dhost,
544 (u_char *)&sc->mcast_addrs[i])) {
545 #if NBPFILTER > 0
546 if (*to_bpf)
547 *to_bpf = 1;
548 #endif
549 return 1;
550 }
551 }
552 return 1;
553
554 case IFF_ALLMULTI | IFF_PROMISC:
555 /*
556 * Acting as a multicast router, and BPF running at the same
557 * time. Whew! (Hope this is a fast machine...)
558 */
559 #if NBPFILTER > 0
560 *to_bpf = (ifp->if_bpf != 0);
561 #endif
562 /* We want to see multicasts. */
563 if (eh->ether_dhost[0] & 1)
564 return 1;
565
566 /* We want to see our own packets */
567 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
568 return 1;
569
570 /* Anything else goes to BPF but nothing else. */
571 #if NBPFILTER > 0
572 if (*to_bpf)
573 *to_bpf = 2;
574 #endif
575 return 1;
576
577 default:
578 /*
579 * Only accept unicast packets destined for us, or multicasts
580 * for groups that we belong to. For now, we assume that the
581 * '586 will only return packets that we asked it for. This
582 * isn't strictly true (it uses hashing for the multicast
583 * filter), but it will do in this case, and we want to get
584 * out of here as quickly as possible.
585 */
586 #if NBPFILTER > 0
587 *to_bpf = (ifp->if_bpf != 0);
588 #endif
589 return 1;
590 }
591 return 0;
592 }
593
594 /*
595 * We want to isolate the bits that have meaning... This assumes that
596 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
597 * the size of the buffer, then we are screwed anyway.
598 */
599 static __inline int
600 ie_buflen(sc, head)
601 struct ie_softc *sc;
602 int head;
603 {
604
605 return (SWAP(sc->rbuffs[head]->ie_rbd_actual)
606 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
607 }
608
609
610 static __inline int
611 ie_packet_len(sc)
612 struct ie_softc *sc;
613 {
614 int i;
615 int head = sc->rbhead;
616 int acc = 0;
617 int oldhead = head;
618
619 do {
620 i = SWAP(sc->rbuffs[head]->ie_rbd_actual);
621 if ((i & IE_RBD_USED) == 0) {
622 #ifdef IEDEBUG
623 print_rbd(sc->rbuffs[head]);
624 #endif
625 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
626 sc->sc_dev.dv_xname, sc->rbhead);
627 iereset(sc);
628 return -1;
629 }
630
631 i = (i & IE_RBD_LAST) != 0;
632
633 acc += ie_buflen(sc, head);
634 head = (head + 1) % sc->nrxbuf;
635 if (oldhead == head) {
636 printf("ie: packet len: looping: acc = %d (head=%d)\n",
637 acc, head);
638 iereset(sc);
639 return -1;
640 }
641 } while (!i);
642
643 return acc;
644 }
645
646 /*
647 * Setup all necessary artifacts for an XMIT command, and then pass the XMIT
648 * command to the chip to be executed. On the way, if we have a BPF listener
649 * also give him a copy.
650 */
651 static __inline void
652 iexmit(sc)
653 struct ie_softc *sc;
654 {
655
656 #ifdef IEDEBUG
657 if (sc->sc_debug & IED_XMIT)
658 printf("%s: xmit buffer %d\n", sc->sc_dev.dv_xname,
659 sc->xctail);
660 #endif
661
662 #if NBPFILTER > 0
663 /*
664 * If BPF is listening on this interface, let it see the packet before
665 * we push it on the wire.
666 */
667 if (sc->sc_ethercom.ec_if.if_bpf)
668 bpf_tap(sc->sc_ethercom.ec_if.if_bpf,
669 sc->xmit_cbuffs[sc->xctail],
670 SWAP(sc->xmit_buffs[sc->xctail]->ie_xmit_flags));
671 #endif
672
673 sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= SWAP(IE_XMIT_LAST);
674 sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff);
675 ST_24(sc->xmit_buffs[sc->xctail]->ie_xmit_buf,
676 MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xctail]));
677
678 sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff);
679 sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd =
680 SWAP(IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST);
681
682 sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0);
683 sc->xmit_cmds[sc->xctail]->ie_xmit_desc =
684 MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xctail]);
685
686 sc->scb->ie_command_list =
687 MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xctail]);
688
689 command_and_wait(sc, IE_CU_START, 0, 0);
690
691 sc->sc_ethercom.ec_if.if_timer = 5;
692 }
693
694 /*
695 * Read data off the interface, and turn it into an mbuf chain.
696 *
697 * This code is DRAMATICALLY different from the previous version; this
698 * version tries to allocate the entire mbuf chain up front, given the
699 * length of the data available. This enables us to allocate mbuf
700 * clusters in many situations where before we would have had a long
701 * chain of partially-full mbufs. This should help to speed up the
702 * operation considerably. (Provided that it works, of course.)
703 */
704 struct mbuf *
705 ieget(sc, ehp, to_bpf)
706 struct ie_softc *sc;
707 struct ether_header *ehp;
708 int *to_bpf;
709 {
710 struct mbuf *top, **mp, *m;
711 int len, totlen, resid;
712 int thisrboff, thismboff;
713 int head;
714
715 totlen = ie_packet_len(sc);
716 if (totlen <= 0)
717 return 0;
718
719 head = sc->rbhead;
720
721 /*
722 * Snarf the Ethernet header.
723 */
724 bcopy((caddr_t)sc->cbuffs[head], (caddr_t)ehp, sizeof *ehp);
725
726 /*
727 * As quickly as possible, check if this packet is for us.
728 * If not, don't waste a single cycle copying the rest of the
729 * packet in.
730 * This is only a consideration when FILTER is defined; i.e., when
731 * we are either running BPF or doing multicasting.
732 */
733 if (!check_eh(sc, ehp, to_bpf)) {
734 /* just this case, it's not an error */
735 sc->sc_ethercom.ec_if.if_ierrors--;
736 return 0;
737 }
738
739 resid = totlen -= (thisrboff = sizeof *ehp);
740
741 MGETHDR(m, M_DONTWAIT, MT_DATA);
742 if (m == 0)
743 return 0;
744 m->m_pkthdr.rcvif = &sc->sc_ethercom.ec_if;
745 m->m_pkthdr.len = totlen;
746 len = MHLEN;
747 top = 0;
748 mp = ⊤
749
750 /*
751 * This loop goes through and allocates mbufs for all the data we will
752 * be copying in. It does not actually do the copying yet.
753 */
754 while (totlen > 0) {
755 if (top) {
756 MGET(m, M_DONTWAIT, MT_DATA);
757 if (m == 0) {
758 m_freem(top);
759 return 0;
760 }
761 len = MLEN;
762 }
763 if (totlen >= MINCLSIZE) {
764 MCLGET(m, M_DONTWAIT);
765 if ((m->m_flags & M_EXT) == 0) {
766 m_freem(top);
767 return 0;
768 }
769 len = MCLBYTES;
770 }
771 m->m_len = len = min(totlen, len);
772 totlen -= len;
773 *mp = m;
774 mp = &m->m_next;
775 }
776
777 m = top;
778 thismboff = 0;
779
780 /*
781 * Now we take the mbuf chain (hopefully only one mbuf most of the
782 * time) and stuff the data into it. There are no possible failures at
783 * or after this point.
784 */
785 while (resid > 0) {
786 int thisrblen = ie_buflen(sc, head) - thisrboff,
787 thismblen = m->m_len - thismboff;
788 len = min(thisrblen, thismblen);
789
790 bcopy((caddr_t)(sc->cbuffs[head] + thisrboff),
791 mtod(m, caddr_t) + thismboff, (u_int)len);
792 resid -= len;
793
794 if (len == thismblen) {
795 m = m->m_next;
796 thismboff = 0;
797 } else
798 thismboff += len;
799
800 if (len == thisrblen) {
801 head = (head + 1) % sc->nrxbuf;
802 thisrboff = 0;
803 } else
804 thisrboff += len;
805 }
806
807 /*
808 * Unless something changed strangely while we were doing the copy, we
809 * have now copied everything in from the shared memory.
810 * This means that we are done.
811 */
812 return top;
813 }
814
815 /*
816 * Read frame NUM from unit UNIT (pre-cached as IE).
817 *
818 * This routine reads the RFD at NUM, and copies in the buffers from the list
819 * of RBD, then rotates the RBD and RFD lists so that the receiver doesn't
820 * start complaining. Trailers are DROPPED---there's no point in wasting time
821 * on confusing code to deal with them. Hopefully, this machine will never ARP
822 * for trailers anyway.
823 */
824 static void
825 ie_readframe(sc, num)
826 struct ie_softc *sc;
827 int num; /* frame number to read */
828 {
829 int status;
830 struct mbuf *m = 0;
831 struct ether_header eh;
832 #if NBPFILTER > 0
833 int bpf_gets_it = 0;
834 #endif
835
836 status = SWAP(sc->rframes[num]->ie_fd_status);
837
838 /* Immediately advance the RFD list, since we have copied ours now. */
839 sc->rframes[num]->ie_fd_status = SWAP(0);
840 sc->rframes[num]->ie_fd_last |= SWAP(IE_FD_LAST);
841 sc->rframes[sc->rftail]->ie_fd_last &= ~SWAP(IE_FD_LAST);
842 sc->rftail = (sc->rftail + 1) % sc->nframes;
843 sc->rfhead = (sc->rfhead + 1) % sc->nframes;
844
845 if (status & IE_FD_OK) {
846 #if NBPFILTER > 0
847 m = ieget(sc, &eh, &bpf_gets_it);
848 #else
849 m = ieget(sc, &eh, 0);
850 #endif
851 ie_drop_packet_buffer(sc);
852 }
853 if (m == 0) {
854 sc->sc_ethercom.ec_if.if_ierrors++;
855 return;
856 }
857
858 #ifdef IEDEBUG
859 if (sc->sc_debug & IED_READFRAME)
860 printf("%s: frame from ether %s type %x\n", sc->sc_dev.dv_xname,
861 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type);
862 #endif
863
864 #if NBPFILTER > 0
865 /*
866 * Check for a BPF filter; if so, hand it up.
867 * Note that we have to stick an extra mbuf up front, because bpf_mtap
868 * expects to have the ether header at the front.
869 * It doesn't matter that this results in an ill-formatted mbuf chain,
870 * since BPF just looks at the data. (It doesn't try to free the mbuf,
871 * tho' it will make a copy for tcpdump.)
872 */
873 if (bpf_gets_it) {
874 struct mbuf m0;
875 m0.m_len = sizeof eh;
876 m0.m_data = (caddr_t)&eh;
877 m0.m_next = m;
878
879 /* Pass it up. */
880 bpf_mtap(sc->sc_ethercom.ec_if.if_bpf, &m0);
881
882 /*
883 * A signal passed up from the filtering code indicating that
884 * the packet is intended for BPF but not for the protocol
885 * machinery. We can save a few cycles by not handing it off
886 * to them.
887 */
888 if (bpf_gets_it == 2) {
889 m_freem(m);
890 return;
891 }
892 }
893 #endif /* NBPFILTER > 0 */
894
895 /*
896 * In here there used to be code to check destination addresses upon
897 * receipt of a packet. We have deleted that code, and replaced it
898 * with code to check the address much earlier in the cycle, before
899 * copying the data in; this saves us valuable cycles when operating
900 * as a multicast router or when using BPF.
901 */
902
903 /*
904 * Finally pass this packet up to higher layers.
905 */
906 ether_input(&sc->sc_ethercom.ec_if, &eh, m);
907 sc->sc_ethercom.ec_if.if_ipackets++;
908 }
909
910 static void
911 ie_drop_packet_buffer(sc)
912 struct ie_softc *sc;
913 {
914 int i;
915
916 do {
917 i = SWAP(sc->rbuffs[sc->rbhead]->ie_rbd_actual);
918 if ((i & IE_RBD_USED) == 0) {
919 /*
920 * This means we are somehow out of sync. So, we
921 * reset the adapter.
922 */
923 #ifdef IEDEBUG
924 print_rbd(sc->rbuffs[sc->rbhead]);
925 #endif
926 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
927 sc->sc_dev.dv_xname, sc->rbhead);
928 iereset(sc);
929 return;
930 }
931
932 i = (i & IE_RBD_LAST) != 0;
933
934 sc->rbuffs[sc->rbhead]->ie_rbd_length |= SWAP(IE_RBD_LAST);
935 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0);
936 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf;
937 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~SWAP(IE_RBD_LAST);
938 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
939 } while (!i);
940 }
941
942
943 /*
944 * Start transmission on an interface.
945 */
946 void
947 iestart(ifp)
948 struct ifnet *ifp;
949 {
950 struct ie_softc *sc = ifp->if_softc;
951 struct mbuf *m0, *m;
952 u_char *buffer;
953 u_short len;
954
955 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
956 return;
957
958 for (;;) {
959 if (sc->xmit_busy == NTXBUF) {
960 ifp->if_flags |= IFF_OACTIVE;
961 break;
962 }
963
964 IF_DEQUEUE(&ifp->if_snd, m0);
965 if (m0 == 0)
966 break;
967
968 /* We need to use m->m_pkthdr.len, so require the header */
969 if ((m0->m_flags & M_PKTHDR) == 0)
970 panic("iestart: no header mbuf");
971
972 #if NBPFILTER > 0
973 /* Tap off here if there is a BPF listener. */
974 if (ifp->if_bpf)
975 bpf_mtap(ifp->if_bpf, m0);
976 #endif
977
978 #ifdef IEDEBUG
979 if (sc->sc_debug & IED_ENQ)
980 printf("%s: fill buffer %d\n", sc->sc_dev.dv_xname,
981 sc->xchead);
982 #endif
983
984 if (m0->m_pkthdr.len > IE_TBUF_SIZE)
985 printf("%s: tbuf overflow\n", sc->sc_dev.dv_xname);
986
987 buffer = sc->xmit_cbuffs[sc->xchead];
988 for (m = m0; m != 0; m = m->m_next) {
989 bcopy(mtod(m, caddr_t), buffer, m->m_len);
990 buffer += m->m_len;
991 }
992
993 len = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
994 m_freem(m0);
995
996 sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len);
997
998 /* Start the first packet transmitting. */
999 if (sc->xmit_busy == 0)
1000 iexmit(sc);
1001
1002 sc->xchead = (sc->xchead + 1) % NTXBUF;
1003 sc->xmit_busy++;
1004 }
1005 }
1006
1007 /*
1008 * set up IE's ram space
1009 */
1010 int
1011 ie_setupram(sc)
1012 struct ie_softc *sc;
1013 {
1014 volatile struct ie_sys_conf_ptr *scp;
1015 volatile struct ie_int_sys_conf_ptr *iscp;
1016 volatile struct ie_sys_ctl_block *scb;
1017 int s;
1018
1019 s = splnet();
1020
1021 scp = sc->scp;
1022 (sc->memzero)((char *) scp, sizeof *scp);
1023
1024 iscp = sc->iscp;
1025 (sc->memzero)((char *) iscp, sizeof *iscp);
1026
1027 scb = sc->scb;
1028 (sc->memzero)((char *) scb, sizeof *scb);
1029
1030 scp->ie_bus_use = 0; /* 16-bit */
1031 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp));
1032
1033 iscp->ie_busy = 1; /* ie_busy == char */
1034 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb);
1035 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr));
1036
1037 if (sc->hwreset)
1038 (sc->hwreset)(sc);
1039
1040 (sc->chan_attn) (sc);
1041
1042 delay(100); /* wait a while... */
1043
1044 if (iscp->ie_busy) {
1045 splx(s);
1046 return 0;
1047 }
1048 /*
1049 * Acknowledge any interrupts we may have caused...
1050 */
1051 ie_ack(sc, IE_ST_WHENCE);
1052 splx(s);
1053
1054 return 1;
1055 }
1056
1057 void
1058 iereset(sc)
1059 struct ie_softc *sc;
1060 {
1061 int s = splnet();
1062
1063 printf("%s: reset\n", sc->sc_dev.dv_xname);
1064
1065 /* Clear OACTIVE in case we're called from watchdog (frozen xmit). */
1066 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
1067
1068 /*
1069 * Stop i82586 dead in its tracks.
1070 */
1071 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1072 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname);
1073
1074 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1075 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname);
1076
1077
1078 #if notdef
1079 if (sc->hwreset)
1080 (sc->hwreset)(sc);
1081 #endif
1082 #ifdef notdef
1083 if (!check_ie_present(sc, sc->sc_maddr, sc->sc_msize))
1084 panic("ie disappeared!\n");
1085 #endif
1086
1087 ieinit(sc);
1088
1089 splx(s);
1090 }
1091
1092 /*
1093 * Send a command to the controller and wait for it to either complete
1094 * or be accepted, depending on the command. If the command pointer
1095 * is null, then pretend that the command is not an action command.
1096 * If the command pointer is not null, and the command is an action
1097 * command, wait for
1098 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1099 * to become true.
1100 */
1101 static int
1102 command_and_wait(sc, cmd, pcmd, mask)
1103 struct ie_softc *sc;
1104 int cmd; /* native byte-order */
1105 volatile void *pcmd;
1106 int mask; /* native byte-order */
1107 {
1108 volatile struct ie_cmd_common *cc = pcmd;
1109 volatile struct ie_sys_ctl_block *scb = sc->scb;
1110 int i;
1111
1112 scb->ie_command = (u_short)SWAP(cmd);
1113 (sc->chan_attn)(sc);
1114
1115 if (IE_ACTION_COMMAND(cmd) && pcmd) {
1116 /*
1117 * According to the packet driver, the minimum timeout should
1118 * be .369 seconds, which we round up to .4.
1119 */
1120
1121 /*
1122 * Now spin-lock waiting for status. This is not a very nice
1123 * thing to do, but I haven't figured out how, or indeed if, we
1124 * can put the process waiting for action to sleep. (We may
1125 * be getting called through some other timeout running in the
1126 * kernel.)
1127 */
1128 for (i = 0; i < 369000; i++) {
1129 delay(1);
1130 if ((SWAP(cc->ie_cmd_status) & mask))
1131 return (0);
1132 }
1133
1134 } else {
1135 /*
1136 * Otherwise, just wait for the command to be accepted.
1137 */
1138
1139 /* XXX spin lock; wait at most 0.1 seconds */
1140 for (i = 0; i < 100000; i++) {
1141 if (scb->ie_command)
1142 return (0);
1143 delay(1);
1144 }
1145 }
1146
1147 /* Timeout */
1148 return (1);
1149 }
1150
1151 /*
1152 * Run the time-domain reflectometer.
1153 */
1154 static void
1155 run_tdr(sc, cmd)
1156 struct ie_softc *sc;
1157 struct ie_tdr_cmd *cmd;
1158 {
1159 int result;
1160
1161 cmd->com.ie_cmd_status = SWAP(0);
1162 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_TDR | IE_CMD_LAST);
1163 cmd->com.ie_cmd_link = SWAP(0xffff);
1164
1165 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1166 cmd->ie_tdr_time = SWAP(0);
1167
1168 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1169 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0)
1170 result = 0x10000; /* XXX */
1171 else
1172 result = SWAP(cmd->ie_tdr_time);
1173
1174 ie_ack(sc, IE_ST_WHENCE);
1175
1176 if (result & IE_TDR_SUCCESS)
1177 return;
1178
1179 if (result & 0x10000)
1180 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname);
1181 else if (result & IE_TDR_XCVR)
1182 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname);
1183 else if (result & IE_TDR_OPEN)
1184 printf("%s: TDR detected an open %d clocks away\n",
1185 sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1186 else if (result & IE_TDR_SHORT)
1187 printf("%s: TDR detected a short %d clocks away\n",
1188 sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1189 else
1190 printf("%s: TDR returned unknown status %x\n",
1191 sc->sc_dev.dv_xname, result);
1192 }
1193
1194 #ifdef notdef
1195 /* ALIGN works on 8 byte boundaries.... but 4 byte boundaries are ok for sun */
1196 #define _ALLOC(p, n) (bzero(p, n), p += n, p - n)
1197 #define ALLOC(p, n) _ALLOC(p, ALIGN(n)) /* XXX convert to this? */
1198 #endif
1199
1200 /*
1201 * setup_bufs: set up the buffers
1202 *
1203 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz.
1204 * this is to be used for the buffers. the chip indexs its control data
1205 * structures with 16 bit offsets, and it indexes actual buffers with
1206 * 24 bit addresses. so we should allocate control buffers first so that
1207 * we don't overflow the 16 bit offset field. The number of transmit
1208 * buffers is fixed at compile time.
1209 *
1210 * note: this function was written to be easy to understand, rather than
1211 * highly efficient (it isn't in the critical path).
1212 */
1213 static void
1214 setup_bufs(sc)
1215 struct ie_softc *sc;
1216 {
1217 caddr_t ptr = sc->buf_area; /* memory pool */
1218 int n, r;
1219
1220 /*
1221 * step 0: zero memory and figure out how many recv buffers and
1222 * frames we can have.
1223 */
1224 (sc->memzero)(ptr, sc->buf_area_sz);
1225 ptr = (sc->align)(ptr); /* set alignment and stick with it */
1226
1227 n = (int)(sc->align)((caddr_t) sizeof(struct ie_xmit_cmd)) +
1228 (int)(sc->align)((caddr_t) sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE;
1229 n *= NTXBUF; /* n = total size of xmit area */
1230
1231 n = sc->buf_area_sz - n;/* n = free space for recv stuff */
1232
1233 r = (int)(sc->align)((caddr_t) sizeof(struct ie_recv_frame_desc)) +
1234 (((int)(sc->align)((caddr_t) sizeof(struct ie_recv_buf_desc)) +
1235 IE_RBUF_SIZE) * B_PER_F);
1236
1237 /* r = size of one R frame */
1238
1239 sc->nframes = n / r;
1240 if (sc->nframes <= 0)
1241 panic("ie: bogus buffer calc\n");
1242 if (sc->nframes > MAXFRAMES)
1243 sc->nframes = MAXFRAMES;
1244
1245 sc->nrxbuf = sc->nframes * B_PER_F;
1246
1247 #ifdef IEDEBUG
1248 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf);
1249 #endif
1250
1251 /*
1252 * step 1a: lay out and zero frame data structures for transmit and recv
1253 */
1254 for (n = 0; n < NTXBUF; n++) {
1255 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr;
1256 ptr = (sc->align)(ptr + sizeof(struct ie_xmit_cmd));
1257 }
1258
1259 for (n = 0; n < sc->nframes; n++) {
1260 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr;
1261 ptr = (sc->align)(ptr + sizeof(struct ie_recv_frame_desc));
1262 }
1263
1264 /*
1265 * step 1b: link together the recv frames and set EOL on last one
1266 */
1267 for (n = 0; n < sc->nframes; n++) {
1268 sc->rframes[n]->ie_fd_next =
1269 MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]);
1270 }
1271 sc->rframes[sc->nframes - 1]->ie_fd_last |= SWAP(IE_FD_LAST);
1272
1273 /*
1274 * step 2a: lay out and zero frame buffer structures for xmit and recv
1275 */
1276 for (n = 0; n < NTXBUF; n++) {
1277 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr;
1278 ptr = (sc->align)(ptr + sizeof(struct ie_xmit_buf));
1279 }
1280
1281 for (n = 0; n < sc->nrxbuf; n++) {
1282 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr;
1283 ptr = (sc->align)(ptr + sizeof(struct ie_recv_buf_desc));
1284 }
1285
1286 /*
1287 * step 2b: link together recv bufs and set EOL on last one
1288 */
1289 for (n = 0; n < sc->nrxbuf; n++) {
1290 sc->rbuffs[n]->ie_rbd_next =
1291 MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]);
1292 }
1293 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= SWAP(IE_RBD_LAST);
1294
1295 /*
1296 * step 3: allocate the actual data buffers for xmit and recv
1297 * recv buffer gets linked into recv_buf_desc list here
1298 */
1299 for (n = 0; n < NTXBUF; n++) {
1300 sc->xmit_cbuffs[n] = (u_char *) ptr;
1301 ptr = (sc->align)(ptr + IE_TBUF_SIZE);
1302 }
1303
1304 /* Pointers to last packet sent and next available transmit buffer. */
1305 sc->xchead = sc->xctail = 0;
1306
1307 /* Clear transmit-busy flag and set number of free transmit buffers. */
1308 sc->xmit_busy = 0;
1309
1310 for (n = 0; n < sc->nrxbuf; n++) {
1311 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */
1312 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE);
1313 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr));
1314 ptr = (sc->align)(ptr + IE_RBUF_SIZE);
1315 }
1316
1317 /*
1318 * step 4: set the head and tail pointers on receive to keep track of
1319 * the order in which RFDs and RBDs are used. link in recv frames
1320 * and buffer into the scb.
1321 */
1322
1323 sc->rfhead = 0;
1324 sc->rftail = sc->nframes - 1;
1325 sc->rbhead = 0;
1326 sc->rbtail = sc->nrxbuf - 1;
1327
1328 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1329 sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]);
1330
1331 #ifdef IEDEBUG
1332 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area);
1333 #endif
1334 }
1335
1336 /*
1337 * Run the multicast setup command.
1338 * Called at splnet().
1339 */
1340 static int
1341 mc_setup(sc, ptr)
1342 struct ie_softc *sc;
1343 void *ptr;
1344 {
1345 volatile struct ie_mcast_cmd *cmd = ptr;
1346
1347 cmd->com.ie_cmd_status = SWAP(0);
1348 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_MCAST | IE_CMD_LAST);
1349 cmd->com.ie_cmd_link = SWAP(0xffff);
1350
1351 (sc->memcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs,
1352 sc->mcast_count * sizeof *sc->mcast_addrs);
1353
1354 cmd->ie_mcast_bytes =
1355 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */
1356
1357 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1358 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1359 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0) {
1360 printf("%s: multicast address setup command failed\n",
1361 sc->sc_dev.dv_xname);
1362 return 0;
1363 }
1364 return 1;
1365 }
1366
1367 /*
1368 * This routine takes the environment generated by check_ie_present() and adds
1369 * to it all the other structures we need to operate the adapter. This
1370 * includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, starting
1371 * the receiver unit, and clearing interrupts.
1372 *
1373 * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER.
1374 */
1375 int
1376 ieinit(sc)
1377 struct ie_softc *sc;
1378 {
1379 volatile struct ie_sys_ctl_block *scb = sc->scb;
1380 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1381 void *ptr;
1382
1383 ptr = sc->buf_area;
1384
1385 /*
1386 * Send the configure command first.
1387 */
1388 {
1389 volatile struct ie_config_cmd *cmd = ptr;
1390
1391 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1392 cmd->com.ie_cmd_status = SWAP(0);
1393 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_CONFIG | IE_CMD_LAST);
1394 cmd->com.ie_cmd_link = SWAP(0xffff);
1395
1396 ie_setup_config(cmd, sc->promisc, 0);
1397
1398 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1399 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0) {
1400 printf("%s: configure command failed\n",
1401 sc->sc_dev.dv_xname);
1402 return 0;
1403 }
1404 }
1405
1406 /*
1407 * Now send the Individual Address Setup command.
1408 */
1409 {
1410 volatile struct ie_iasetup_cmd *cmd = ptr;
1411
1412 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1413 cmd->com.ie_cmd_status = SWAP(0);
1414 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_IASETUP | IE_CMD_LAST);
1415 cmd->com.ie_cmd_link = SWAP(0xffff);
1416
1417 (sc->memcopy)(LLADDR(ifp->if_sadl),
1418 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address);
1419
1420 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1421 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0) {
1422 printf("%s: individual address setup command failed\n",
1423 sc->sc_dev.dv_xname);
1424 return 0;
1425 }
1426 }
1427
1428 /*
1429 * Now run the time-domain reflectometer.
1430 */
1431 run_tdr(sc, ptr);
1432
1433 /*
1434 * Acknowledge any interrupts we have generated thus far.
1435 */
1436 ie_ack(sc, IE_ST_WHENCE);
1437
1438 /*
1439 * Set up the transmit and recv buffers.
1440 */
1441 setup_bufs(sc);
1442
1443 ifp->if_flags |= IFF_RUNNING;
1444 ifp->if_flags &= ~IFF_OACTIVE;
1445
1446 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1447 command_and_wait(sc, IE_RU_START, 0, 0);
1448
1449 ie_ack(sc, IE_ST_WHENCE);
1450
1451 if (sc->hwinit)
1452 (sc->hwinit)(sc);
1453
1454 return 0;
1455 }
1456
1457 static void
1458 iestop(sc)
1459 struct ie_softc *sc;
1460 {
1461
1462 command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1463 }
1464
1465 int
1466 ieioctl(ifp, cmd, data)
1467 register struct ifnet *ifp;
1468 u_long cmd;
1469 caddr_t data;
1470 {
1471 struct ie_softc *sc = ifp->if_softc;
1472 struct ifaddr *ifa = (struct ifaddr *)data;
1473 struct ifreq *ifr = (struct ifreq *)data;
1474 int s, error = 0;
1475
1476 s = splnet();
1477
1478 switch(cmd) {
1479
1480 case SIOCSIFADDR:
1481 ifp->if_flags |= IFF_UP;
1482
1483 switch(ifa->ifa_addr->sa_family) {
1484 #ifdef INET
1485 case AF_INET:
1486 ieinit(sc);
1487 arp_ifinit(ifp, ifa);
1488 break;
1489 #endif
1490 #ifdef NS
1491 /* XXX - This code is probably wrong. */
1492 case AF_NS:
1493 {
1494 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1495
1496 if (ns_nullhost(*ina))
1497 ina->x_host =
1498 *(union ns_host *)LLADDR(ifp->if_sadl);
1499 else
1500 bcopy(ina->x_host.c_host,
1501 LLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1502 /* Set new address. */
1503 ieinit(sc);
1504 break;
1505 }
1506 #endif /* NS */
1507 default:
1508 ieinit(sc);
1509 break;
1510 }
1511 break;
1512
1513 case SIOCSIFFLAGS:
1514 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1515 if ((ifp->if_flags & IFF_UP) == 0 &&
1516 (ifp->if_flags & IFF_RUNNING) != 0) {
1517 /*
1518 * If interface is marked down and it is running, then
1519 * stop it.
1520 */
1521 iestop(sc);
1522 ifp->if_flags &= ~IFF_RUNNING;
1523 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1524 (ifp->if_flags & IFF_RUNNING) == 0) {
1525 /*
1526 * If interface is marked up and it is stopped, then
1527 * start it.
1528 */
1529 ieinit(sc);
1530 } else {
1531 /*
1532 * Reset the interface to pick up changes in any other
1533 * flags that affect hardware registers.
1534 */
1535 iestop(sc);
1536 ieinit(sc);
1537 }
1538 #ifdef IEDEBUG
1539 if (ifp->if_flags & IFF_DEBUG)
1540 sc->sc_debug = IED_ALL;
1541 else
1542 sc->sc_debug = 0;
1543 #endif
1544 break;
1545
1546 case SIOCADDMULTI:
1547 case SIOCDELMULTI:
1548 error = (cmd == SIOCADDMULTI) ?
1549 ether_addmulti(ifr, &sc->sc_ethercom):
1550 ether_delmulti(ifr, &sc->sc_ethercom);
1551
1552 if (error == ENETRESET) {
1553 /*
1554 * Multicast list has changed; set the hardware filter
1555 * accordingly.
1556 */
1557 mc_reset(sc);
1558 error = 0;
1559 }
1560 break;
1561
1562 default:
1563 error = EINVAL;
1564 }
1565 splx(s);
1566 return error;
1567 }
1568
1569 static void
1570 mc_reset(sc)
1571 struct ie_softc *sc;
1572 {
1573 struct ether_multi *enm;
1574 struct ether_multistep step;
1575
1576 /*
1577 * Step through the list of addresses.
1578 */
1579 sc->mcast_count = 0;
1580 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1581 while (enm) {
1582 if (sc->mcast_count >= MAXMCAST ||
1583 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
1584 sc->sc_ethercom.ec_if.if_flags |= IFF_ALLMULTI;
1585 ieioctl(&sc->sc_ethercom.ec_if, SIOCSIFFLAGS, (void *)0);
1586 goto setflag;
1587 }
1588
1589 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6);
1590 sc->mcast_count++;
1591 ETHER_NEXT_MULTI(step, enm);
1592 }
1593 setflag:
1594 sc->want_mcsetup = 1;
1595 }
1596
1597 #ifdef IEDEBUG
1598 void
1599 print_rbd(rbd)
1600 volatile struct ie_recv_buf_desc *rbd;
1601 {
1602 u_long bufval;
1603
1604 bcopy((char *)&rbd->ie_rbd_buffer, &bufval, 4); /*XXX*/
1605
1606 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %lx\n"
1607 "length %04x, mbz %04x\n", (u_long)rbd,
1608 SWAP(rbd->ie_rbd_actual),
1609 SWAP(rbd->ie_rbd_next),
1610 bufval,
1611 SWAP(rbd->ie_rbd_length),
1612 rbd->mbz);
1613 }
1614 #endif
1615