i82586.c revision 1.1 1 /* $NetBSD: i82586.c,v 1.1 1997/07/22 23:32:01 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1997 Paul Kranenburg.
5 * Copyright (c) 1993, 1994, 1995 Charles Hannum.
6 * Copyright (c) 1992, 1993, University of Vermont and State
7 * Agricultural College.
8 * Copyright (c) 1992, 1993, Garrett A. Wollman.
9 *
10 * Portions:
11 * Copyright (c) 1994, 1995, Rafal K. Boni
12 * Copyright (c) 1990, 1991, William F. Jolitz
13 * Copyright (c) 1990, The Regents of the University of California
14 *
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by Charles Hannum, by the
28 * University of Vermont and State Agricultural College and Garrett A.
29 * Wollman, by William F. Jolitz, and by the University of California,
30 * Berkeley, Lawrence Berkeley Laboratory, and its contributors.
31 * 4. Neither the names of the Universities nor the names of the authors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 */
47
48 /*
49 * Intel 82586 Ethernet chip
50 * Register, bit, and structure definitions.
51 *
52 * Original StarLAN driver written by Garrett Wollman with reference to the
53 * Clarkson Packet Driver code for this chip written by Russ Nelson and others.
54 *
55 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump.
56 *
57 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni.
58 *
59 * Majorly cleaned up and 3C507 code merged by Charles Hannum.
60 *
61 * Converted to SUN ie driver by Charles D. Cranor,
62 * October 1994, January 1995.
63 * This sun version based on i386 version 1.30.
64 */
65
66 /*
67 * The i82586 is a very painful chip, found in sun3's, sun-4/100's
68 * sun-4/200's, and VME based suns. The byte order is all wrong for a
69 * SUN, making life difficult. Programming this chip is mostly the same,
70 * but certain details differ from system to system. This driver is
71 * written so that different "ie" interfaces can be controled by the same
72 * driver.
73 */
74
75 /*
76 Mode of operation:
77
78 We run the 82586 in a standard Ethernet mode. We keep NFRAMES
79 received frame descriptors around for the receiver to use, and
80 NRXBUF associated receive buffer descriptors, both in a circular
81 list. Whenever a frame is received, we rotate both lists as
82 necessary. (The 586 treats both lists as a simple queue.) We also
83 keep a transmit command around so that packets can be sent off
84 quickly.
85
86 We configure the adapter in AL-LOC = 1 mode, which means that the
87 Ethernet/802.3 MAC header is placed at the beginning of the receive
88 buffer rather than being split off into various fields in the RFD.
89 This also means that we must include this header in the transmit
90 buffer as well.
91
92 By convention, all transmit commands, and only transmit commands,
93 shall have the I (IE_CMD_INTR) bit set in the command. This way,
94 when an interrupt arrives at ieintr(), it is immediately possible
95 to tell what precisely caused it. ANY OTHER command-sending
96 routines should run at splnet(), and should post an acknowledgement
97 to every interrupt they generate.
98
99 */
100
101 #include "bpfilter.h"
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/buf.h>
107 #include <sys/protosw.h>
108 #include <sys/socket.h>
109 #include <sys/ioctl.h>
110 #include <sys/errno.h>
111 #include <sys/syslog.h>
112 #include <sys/device.h>
113
114 #include <net/if.h>
115 #include <net/if_types.h>
116 #include <net/if_dl.h>
117 #include <net/if_ether.h>
118
119 #if NBPFILTER > 0
120 #include <net/bpf.h>
121 #include <net/bpfdesc.h>
122 #endif
123
124 #ifdef INET
125 #include <netinet/in.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/in_var.h>
128 #include <netinet/ip.h>
129 #include <netinet/if_inarp.h>
130 #endif
131
132 #ifdef NS
133 #include <netns/ns.h>
134 #include <netns/ns_if.h>
135 #endif
136
137 #include <dev/ic/i82586reg.h>
138 #include <dev/ic/i82586var.h>
139
140 static struct mbuf *last_not_for_us;
141
142 void iewatchdog __P((struct ifnet *));
143 int ieinit __P((struct ie_softc *));
144 int ieioctl __P((struct ifnet *, u_long, caddr_t));
145 void iestart __P((struct ifnet *));
146 void iereset __P((struct ie_softc *));
147 static void ie_readframe __P((struct ie_softc *, int));
148 static void ie_drop_packet_buffer __P((struct ie_softc *));
149 int ie_setupram __P((struct ie_softc *));
150 static int command_and_wait __P((struct ie_softc *, int,
151 void volatile *, int));
152 /*static*/ void ierint __P((struct ie_softc *));
153 /*static*/ void ietint __P((struct ie_softc *));
154 static int ieget __P((struct ie_softc *, struct mbuf **,
155 struct ether_header *, int *));
156 static void setup_bufs __P((struct ie_softc *));
157 static int mc_setup __P((struct ie_softc *, void *));
158 static void mc_reset __P((struct ie_softc *));
159 static __inline int ether_equal __P((u_char *, u_char *));
160 static __inline void ie_ack __P((struct ie_softc *, u_int));
161 static __inline void ie_setup_config __P((volatile struct ie_config_cmd *,
162 int, int));
163 static __inline int check_eh __P((struct ie_softc *, struct ether_header *,
164 int *));
165 static __inline int ie_buflen __P((struct ie_softc *, int));
166 static __inline int ie_packet_len __P((struct ie_softc *));
167 static __inline void iexmit __P((struct ie_softc *));
168
169 static void run_tdr __P((struct ie_softc *, struct ie_tdr_cmd *));
170 static void iestop __P((struct ie_softc *));
171
172 #ifdef IEDEBUG
173 void print_rbd __P((volatile struct ie_recv_buf_desc *));
174
175 int in_ierint = 0;
176 int in_ietint = 0;
177 #endif
178
179 struct cfdriver ie_cd = {
180 NULL, "ie", DV_IFNET
181 };
182
183 /*
184 * Address generation macros:
185 * MK_24 = KVA -> 24 bit address in native byte order
186 * MK_16 = KVA -> 16 bit address in INTEL byte order
187 * ST_24 = store a 24 bit address in native byte order to INTEL byte order
188 */
189 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base))
190
191 #if BYTE_ORDER == BIG_ENDIAN
192 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) )
193 #define SWAP(x) ({u_short _z=(x); (u_short)XSWAP(_z);})
194
195 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) ))
196 #define ST_24(to, from) { \
197 u_long fval = (u_long)(from); \
198 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \
199 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0] ;*/ \
200 }
201 #else
202 #define SWAP(x) x
203 #define MK_16(base, ptr) ((u_short)(u_long)MK_24(base, ptr))
204 #define ST_24(to, from) {to = (from);}
205 #endif
206
207 /*
208 * Here are a few useful functions. We could have done these as macros, but
209 * since we have the inline facility, it makes sense to use that instead.
210 */
211 static __inline void
212 ie_setup_config(cmd, promiscuous, manchester)
213 volatile struct ie_config_cmd *cmd;
214 int promiscuous, manchester;
215 {
216
217 cmd->ie_config_count = 0x0c;
218 cmd->ie_fifo = 8;
219 cmd->ie_save_bad = 0x40;
220 cmd->ie_addr_len = 0x2e;
221 cmd->ie_priority = 0;
222 cmd->ie_ifs = 0x60;
223 cmd->ie_slot_low = 0;
224 cmd->ie_slot_high = 0xf2;
225 cmd->ie_promisc = !!promiscuous | manchester << 2;
226 cmd->ie_crs_cdt = 0;
227 cmd->ie_min_len = 64;
228 cmd->ie_junk = 0xff;
229 }
230
231 static __inline void
232 ie_ack(sc, mask)
233 struct ie_softc *sc;
234 u_int mask; /* in native byte-order */
235 {
236 volatile struct ie_sys_ctl_block *scb = sc->scb;
237
238 command_and_wait(sc, SWAP(scb->ie_status) & mask, 0, 0);
239 }
240
241
242 /*
243 * Taken almost exactly from Bill's if_is.c, then modified beyond recognition.
244 */
245 void
246 ie_attach(sc, name, etheraddr)
247 struct ie_softc *sc;
248 char *name;
249 u_int8_t *etheraddr;
250 {
251 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
252
253 if (ie_setupram(sc) == 0) { /* XXX - ISA version? */
254 printf(": RAM CONFIG FAILED!\n");
255 /* XXX should reclaim resources? */
256 return;
257 }
258
259 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
260 ifp->if_softc = sc;
261 ifp->if_start = iestart;
262 ifp->if_ioctl = ieioctl;
263 ifp->if_watchdog = iewatchdog;
264 ifp->if_flags =
265 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
266
267 /* Attach the interface. */
268 if_attach(ifp);
269 ether_ifattach(ifp, etheraddr);
270
271 printf(" address %s, type %s\n", ether_sprintf(etheraddr), name);
272
273 #if NBPFILTER > 0
274 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
275 #endif
276 }
277
278
279 /*
280 * Device timeout/watchdog routine. Entered if the device neglects to generate
281 * an interrupt after a transmit has been started on it.
282 */
283 void
284 iewatchdog(ifp)
285 struct ifnet *ifp;
286 {
287 struct ie_softc *sc = ifp->if_softc;
288
289 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
290 ++ifp->if_oerrors;
291
292 iereset(sc);
293 }
294
295 /*
296 * What to do upon receipt of an interrupt.
297 */
298 int
299 ieintr(v)
300 void *v;
301 {
302 struct ie_softc *sc = v;
303 register u_short status;
304
305 status = SWAP(sc->scb->ie_status);
306
307 /*
308 * Implementation dependent interrupt handling.
309 */
310 if (sc->intrhook)
311 (*sc->intrhook)(sc);
312
313 loop:
314 /* Ack interrupts FIRST in case we receive more during the ISR. */
315 ie_ack(sc, IE_ST_WHENCE & status);
316
317 if (status & (IE_ST_FR | IE_ST_RNR)) {
318 #ifdef IEDEBUG
319 in_ierint++;
320 if (sc->sc_debug & IED_RINT)
321 printf("%s: rint\n", sc->sc_dev.dv_xname);
322 #endif
323 ierint(sc);
324 #ifdef IEDEBUG
325 in_ierint--;
326 #endif
327 }
328
329 if (status & IE_ST_CX) {
330 #ifdef IEDEBUG
331 in_ietint++;
332 if (sc->sc_debug & IED_TINT)
333 printf("%s: tint\n", sc->sc_dev.dv_xname);
334 #endif
335 ietint(sc);
336 #ifdef IEDEBUG
337 in_ietint--;
338 #endif
339 }
340
341 if (status & IE_ST_RNR) {
342 printf("%s: receiver not ready\n", sc->sc_dev.dv_xname);
343 sc->sc_ethercom.ec_if.if_ierrors++;
344 iereset(sc);
345 return (1);
346 }
347
348 #ifdef IEDEBUG
349 if ((status & IE_ST_CNA) && (sc->sc_debug & IED_CNA))
350 printf("%s: cna\n", sc->sc_dev.dv_xname);
351 #endif
352
353 status = SWAP(sc->scb->ie_status);
354 if (status & IE_ST_WHENCE)
355 goto loop;
356
357 return (1);
358 }
359
360 /*
361 * Process a received-frame interrupt.
362 */
363 void
364 ierint(sc)
365 struct ie_softc *sc;
366 {
367 volatile struct ie_sys_ctl_block *scb = sc->scb;
368 int i, status;
369 static int timesthru = 1024;
370
371 i = sc->rfhead;
372 for (;;) {
373 status = SWAP(sc->rframes[i]->ie_fd_status);
374
375 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
376 sc->sc_ethercom.ec_if.if_ipackets++;
377 if (!--timesthru) {
378 sc->sc_ethercom.ec_if.if_ierrors +=
379 SWAP(scb->ie_err_crc) +
380 SWAP(scb->ie_err_align) +
381 SWAP(scb->ie_err_resource) +
382 SWAP(scb->ie_err_overrun);
383 scb->ie_err_crc = scb->ie_err_align =
384 scb->ie_err_resource = scb->ie_err_overrun =
385 SWAP(0);
386 timesthru = 1024;
387 }
388 ie_readframe(sc, i);
389 } else {
390 if ((status & IE_FD_RNR) != 0 &&
391 (SWAP(scb->ie_status) & IE_RU_READY) == 0) {
392 sc->rframes[0]->ie_fd_buf_desc =
393 MK_16(sc->sc_maddr, sc->rbuffs[0]);
394 scb->ie_recv_list =
395 MK_16(sc->sc_maddr, sc->rframes[0]);
396 command_and_wait(sc, IE_RU_START, 0, 0);
397 }
398 break;
399 }
400 i = (i + 1) % sc->nframes;
401 }
402 }
403
404 /*
405 * Process a command-complete interrupt. These are only generated by the
406 * transmission of frames. This routine is deceptively simple, since most of
407 * the real work is done by iestart().
408 */
409 void
410 ietint(sc)
411 struct ie_softc *sc;
412 {
413 int status;
414
415 sc->sc_ethercom.ec_if.if_timer = 0;
416 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
417
418 status = SWAP(sc->xmit_cmds[sc->xctail]->ie_xmit_status);
419
420 if (!(status & IE_STAT_COMPL) || (status & IE_STAT_BUSY))
421 printf("ietint: command still busy!\n");
422
423 if (status & IE_STAT_OK) {
424 sc->sc_ethercom.ec_if.if_opackets++;
425 sc->sc_ethercom.ec_if.if_collisions += (status & IE_XS_MAXCOLL);
426 } else if (status & IE_STAT_ABORT) {
427 printf("%s: send aborted\n", sc->sc_dev.dv_xname);
428 sc->sc_ethercom.ec_if.if_oerrors++;
429 } else if (status & IE_XS_NOCARRIER) {
430 printf("%s: no carrier\n", sc->sc_dev.dv_xname);
431 sc->sc_ethercom.ec_if.if_oerrors++;
432 } else if (status & IE_XS_LOSTCTS) {
433 printf("%s: lost CTS\n", sc->sc_dev.dv_xname);
434 sc->sc_ethercom.ec_if.if_oerrors++;
435 } else if (status & IE_XS_UNDERRUN) {
436 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname);
437 sc->sc_ethercom.ec_if.if_oerrors++;
438 } else if (status & IE_XS_EXCMAX) {
439 printf("%s: too many collisions\n", sc->sc_dev.dv_xname);
440 sc->sc_ethercom.ec_if.if_collisions += 16;
441 sc->sc_ethercom.ec_if.if_oerrors++;
442 }
443
444 /*
445 * If multicast addresses were added or deleted while transmitting,
446 * mc_reset() set the want_mcsetup flag indicating that we should do
447 * it.
448 */
449 if (sc->want_mcsetup) {
450 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[sc->xctail]);
451 sc->want_mcsetup = 0;
452 }
453
454 /* Done with the buffer. */
455 sc->xmit_free++;
456 sc->xmit_busy = 0;
457 sc->xctail = (sc->xctail + 1) % NTXBUF;
458
459 iestart(&sc->sc_ethercom.ec_if);
460 }
461
462 /*
463 * Compare two Ether/802 addresses for equality, inlined and unrolled for
464 * speed. I'd love to have an inline assembler version of this...
465 */
466 static __inline int
467 ether_equal(one, two)
468 u_char *one, *two;
469 {
470
471 if (one[5] != two[5] || one[4] != two[4] || one[3] != two[3] ||
472 one[2] != two[2] || one[1] != two[1] || one[0] != two[0])
473 return 0;
474 return 1;
475 }
476
477 /*
478 * Check for a valid address. to_bpf is filled in with one of the following:
479 * 0 -> BPF doesn't get this packet
480 * 1 -> BPF does get this packet
481 * 2 -> BPF does get this packet, but we don't
482 * Return value is true if the packet is for us, and false otherwise.
483 *
484 * This routine is a mess, but it's also critical that it be as fast
485 * as possible. It could be made cleaner if we can assume that the
486 * only client which will fiddle with IFF_PROMISC is BPF. This is
487 * probably a good assumption, but we do not make it here. (Yet.)
488 */
489 static __inline int
490 check_eh(sc, eh, to_bpf)
491 struct ie_softc *sc;
492 struct ether_header *eh;
493 int *to_bpf;
494 {
495 struct ifnet *ifp;
496 int i;
497
498 ifp = &sc->sc_ethercom.ec_if;
499
500 switch(sc->promisc) {
501 case IFF_ALLMULTI:
502 /*
503 * Receiving all multicasts, but no unicasts except those
504 * destined for us.
505 */
506 #if NBPFILTER > 0
507 /* BPF gets this packet if anybody cares */
508 *to_bpf = (sc->sc_ethercom.ec_if.if_bpf != 0);
509 #endif
510 if (eh->ether_dhost[0] & 1)
511 return 1;
512 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
513 return 1;
514 return 0;
515
516 case IFF_PROMISC:
517 /*
518 * Receiving all packets. These need to be passed on to BPF.
519 */
520 #if NBPFILTER > 0
521 *to_bpf = (sc->sc_ethercom.ec_if.if_bpf != 0);
522 #endif
523 /* If for us, accept and hand up to BPF */
524 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
525 return 1;
526
527 #if NBPFILTER > 0
528 if (*to_bpf)
529 *to_bpf = 2; /* we don't need to see it */
530 #endif
531
532 /*
533 * Not a multicast, so BPF wants to see it but we don't.
534 */
535 if (!(eh->ether_dhost[0] & 1))
536 return 1;
537
538 /*
539 * If it's one of our multicast groups, accept it and pass it
540 * up.
541 */
542 for (i = 0; i < sc->mcast_count; i++) {
543 if (ether_equal(eh->ether_dhost,
544 (u_char *)&sc->mcast_addrs[i])) {
545 #if NBPFILTER > 0
546 if (*to_bpf)
547 *to_bpf = 1;
548 #endif
549 return 1;
550 }
551 }
552 return 1;
553
554 case IFF_ALLMULTI | IFF_PROMISC:
555 /*
556 * Acting as a multicast router, and BPF running at the same
557 * time. Whew! (Hope this is a fast machine...)
558 */
559 #if NBPFILTER > 0
560 *to_bpf = (sc->sc_ethercom.ec_if.if_bpf != 0);
561 #endif
562 /* We want to see multicasts. */
563 if (eh->ether_dhost[0] & 1)
564 return 1;
565
566 /* We want to see our own packets */
567 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
568 return 1;
569
570 /* Anything else goes to BPF but nothing else. */
571 #if NBPFILTER > 0
572 if (*to_bpf)
573 *to_bpf = 2;
574 #endif
575 return 1;
576
577 default:
578 /*
579 * Only accept unicast packets destined for us, or multicasts
580 * for groups that we belong to. For now, we assume that the
581 * '586 will only return packets that we asked it for. This
582 * isn't strictly true (it uses hashing for the multicast
583 * filter), but it will do in this case, and we want to get
584 * out of here as quickly as possible.
585 */
586 #if NBPFILTER > 0
587 *to_bpf = (sc->sc_ethercom.ec_if.if_bpf != 0);
588 #endif
589 return 1;
590 }
591 return 0;
592 }
593
594 /*
595 * We want to isolate the bits that have meaning... This assumes that
596 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
597 * the size of the buffer, then we are screwed anyway.
598 */
599 static __inline int
600 ie_buflen(sc, head)
601 struct ie_softc *sc;
602 int head;
603 {
604
605 return (SWAP(sc->rbuffs[head]->ie_rbd_actual)
606 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
607 }
608
609 static __inline int
610 ie_packet_len(sc)
611 struct ie_softc *sc;
612 {
613 int i;
614 int head = sc->rbhead;
615 int acc = 0;
616 int oldhead = head;
617
618 do {
619 if ((SWAP(sc->rbuffs[sc->rbhead]->ie_rbd_actual) & IE_RBD_USED)
620 == 0) {
621 #ifdef IEDEBUG
622 print_rbd(sc->rbuffs[sc->rbhead]);
623 #endif
624 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
625 sc->sc_dev.dv_xname, sc->rbhead);
626 iereset(sc);
627 return -1;
628 }
629
630 i = (SWAP(sc->rbuffs[head]->ie_rbd_actual) & IE_RBD_LAST) != 0;
631
632 acc += ie_buflen(sc, head);
633 head = (head + 1) % sc->nrxbuf;
634 if (oldhead == head){
635 printf("ie: packet len: looping: acc = %d (head=%d)\n",
636 acc, head);
637 iereset(sc);
638 return -1;
639 }
640 } while (!i);
641
642 return acc;
643 }
644
645 /*
646 * Setup all necessary artifacts for an XMIT command, and then pass the XMIT
647 * command to the chip to be executed. On the way, if we have a BPF listener
648 * also give him a copy.
649 */
650 static __inline void
651 iexmit(sc)
652 struct ie_softc *sc;
653 {
654
655 #if NBPFILTER > 0
656 /*
657 * If BPF is listening on this interface, let it see the packet before
658 * we push it on the wire.
659 */
660 if (sc->sc_ethercom.ec_if.if_bpf)
661 bpf_tap(sc->sc_ethercom.ec_if.if_bpf,
662 sc->xmit_cbuffs[sc->xctail],
663 SWAP(sc->xmit_buffs[sc->xctail]->ie_xmit_flags));
664 #endif
665
666 sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= SWAP(IE_XMIT_LAST);
667 sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff);
668 ST_24(sc->xmit_buffs[sc->xctail]->ie_xmit_buf,
669 MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xctail]));
670
671 sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff);
672 sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd =
673 SWAP(IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST);
674
675 sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0);
676 sc->xmit_cmds[sc->xctail]->ie_xmit_desc =
677 MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xctail]);
678
679 sc->scb->ie_command_list =
680 MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xctail]);
681
682 command_and_wait(sc, IE_CU_START, 0, 0);
683
684 sc->xmit_busy = 1;
685 sc->sc_ethercom.ec_if.if_timer = 5;
686 }
687
688 /*
689 * Read data off the interface, and turn it into an mbuf chain.
690 *
691 * This code is DRAMATICALLY different from the previous version; this
692 * version tries to allocate the entire mbuf chain up front, given the
693 * length of the data available. This enables us to allocate mbuf
694 * clusters in many situations where before we would have had a long
695 * chain of partially-full mbufs. This should help to speed up the
696 * operation considerably. (Provided that it works, of course.)
697 */
698 static __inline int
699 ieget(sc, mp, ehp, to_bpf)
700 struct ie_softc *sc;
701 struct mbuf **mp;
702 struct ether_header *ehp;
703 int *to_bpf;
704 {
705 struct mbuf *m, *top, **mymp;
706 int i;
707 int offset;
708 int totlen, resid;
709 int thismboff;
710 int head;
711
712 totlen = ie_packet_len(sc);
713 if (totlen <= 0)
714 return -1;
715
716 i = sc->rbhead;
717
718 /*
719 * Snarf the Ethernet header.
720 */
721 (sc->memcopy)((caddr_t)sc->cbuffs[i], (caddr_t)ehp, sizeof *ehp);
722
723 /*
724 * As quickly as possible, check if this packet is for us.
725 * If not, don't waste a single cycle copying the rest of the
726 * packet in.
727 * This is only a consideration when FILTER is defined; i.e., when
728 * we are either running BPF or doing multicasting.
729 */
730 if (!check_eh(sc, ehp, to_bpf)) {
731 ie_drop_packet_buffer(sc);
732
733 /* just this case, it's not an error */
734 sc->sc_ethercom.ec_if.if_ierrors--;
735 return -1;
736 }
737 totlen -= (offset = sizeof *ehp);
738
739 MGETHDR(*mp, M_DONTWAIT, MT_DATA);
740 if (!*mp) {
741 ie_drop_packet_buffer(sc);
742 return -1;
743 }
744
745 m = *mp;
746 m->m_pkthdr.rcvif = &sc->sc_ethercom.ec_if;
747 m->m_len = MHLEN;
748 resid = m->m_pkthdr.len = totlen;
749 top = 0;
750 mymp = ⊤
751
752 /*
753 * This loop goes through and allocates mbufs for all the data we will
754 * be copying in. It does not actually do the copying yet.
755 */
756 do { /* while (resid > 0) */
757 /*
758 * Try to allocate an mbuf to hold the data that we have. If
759 * we already allocated one, just get another one and stick it
760 * on the end (eventually). If we don't already have one, try
761 * to allocate an mbuf cluster big enough to hold the whole
762 * packet, if we think it's reasonable, or a single mbuf which
763 * may or may not be big enough.
764 * Got that?
765 */
766 if (top) {
767 MGET(m, M_DONTWAIT, MT_DATA);
768 if (!m) {
769 m_freem(top);
770 ie_drop_packet_buffer(sc);
771 return -1;
772 }
773 m->m_len = MLEN;
774 }
775
776 if (resid >= MINCLSIZE) {
777 MCLGET(m, M_DONTWAIT);
778 if (m->m_flags & M_EXT)
779 m->m_len = min(resid, MCLBYTES);
780 } else {
781 if (resid < m->m_len) {
782 if (!top && resid + max_linkhdr <= m->m_len)
783 m->m_data += max_linkhdr;
784 m->m_len = resid;
785 }
786 }
787 resid -= m->m_len;
788 *mymp = m;
789 mymp = &m->m_next;
790 } while (resid > 0);
791
792 resid = totlen;
793 m = top;
794 thismboff = 0;
795 head = sc->rbhead;
796
797 /*
798 * Now we take the mbuf chain (hopefully only one mbuf most of the
799 * time) and stuff the data into it. There are no possible failures
800 * at or after this point.
801 */
802 while (resid > 0) { /* while there's stuff left */
803 int thislen = ie_buflen(sc, head) - offset;
804
805 /*
806 * If too much data for the current mbuf, then fill the current
807 * one up, go to the next one, and try again.
808 */
809 if (thislen > m->m_len - thismboff) {
810 int newlen = m->m_len - thismboff;
811 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset),
812 mtod(m, caddr_t) + thismboff, (u_int)newlen);
813 m = m->m_next;
814 thismboff = 0; /* new mbuf, so no offset */
815 offset += newlen; /* we are now this far
816 into the packet */
817 resid -= newlen; /* so there is this much
818 left to get */
819 continue;
820 }
821
822 /*
823 * If there is more than enough space in the mbuf to hold the
824 * contents of this buffer, copy everything in, advance
825 * pointers and so on.
826 */
827 if (thislen < m->m_len - thismboff) {
828 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset),
829 mtod(m, caddr_t) + thismboff, (u_int)thislen);
830 thismboff += thislen; /* we are this far into the mbuf */
831 resid -= thislen; /* and this much is left */
832 goto nextbuf;
833 }
834
835 /*
836 * Otherwise, there is exactly enough space to put this
837 * buffer's contents into the current mbuf. Do the combination
838 * of the above actions.
839 */
840 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + offset),
841 mtod(m, caddr_t) + thismboff, (u_int)thislen);
842 m = m->m_next;
843 thismboff = 0; /* new mbuf, start at the beginning */
844 resid -= thislen; /* and we are this far through */
845
846 /*
847 * Advance all the pointers. We can get here from either of
848 * the last two cases, but never the first.
849 */
850 nextbuf:
851 offset = 0;
852 sc->rbuffs[head]->ie_rbd_actual = SWAP(0);
853 sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST;
854 sc->rbhead = head = (head + 1) % sc->nrxbuf;
855 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
856 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
857 }
858
859 /*
860 * Unless something changed strangely while we were doing the copy, we
861 * have now copied everything in from the shared memory.
862 * This means that we are done.
863 */
864 return 0;
865 }
866
867 /*
868 * Read frame NUM from unit UNIT (pre-cached as IE).
869 *
870 * This routine reads the RFD at NUM, and copies in the buffers from the list
871 * of RBD, then rotates the RBD and RFD lists so that the receiver doesn't
872 * start complaining. Trailers are DROPPED---there's no point in wasting time
873 * on confusing code to deal with them. Hopefully, this machine will never ARP
874 * for trailers anyway.
875 */
876 static void
877 ie_readframe(sc, num)
878 struct ie_softc *sc;
879 int num; /* frame number to read */
880 {
881 int status;
882 struct mbuf *m = 0;
883 struct ether_header eh;
884 #if NBPFILTER > 0
885 int bpf_gets_it = 0;
886 #endif
887
888 status = SWAP(sc->rframes[num]->ie_fd_status);
889
890 /* Immediately advance the RFD list, since we have copied ours now. */
891 sc->rframes[num]->ie_fd_status = SWAP(0);
892 sc->rframes[num]->ie_fd_last |= SWAP(IE_FD_LAST);
893 sc->rframes[sc->rftail]->ie_fd_last &= ~SWAP(IE_FD_LAST);
894 sc->rftail = (sc->rftail + 1) % sc->nframes;
895 sc->rfhead = (sc->rfhead + 1) % sc->nframes;
896
897 if (status & IE_FD_OK) {
898 #if NBPFILTER > 0
899 if (ieget(sc, &m, &eh, &bpf_gets_it)) {
900 #else
901 if (ieget(sc, &m, &eh, 0)) {
902 #endif
903 sc->sc_ethercom.ec_if.if_ierrors++;
904 return;
905 }
906 }
907
908 #ifdef IEDEBUG
909 if (sc->sc_debug & IED_READFRAME)
910 printf("%s: frame from ether %s type %x\n", sc->sc_dev.dv_xname,
911 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type);
912 #endif
913
914 if (!m)
915 return;
916
917 if (last_not_for_us) {
918 m_freem(last_not_for_us);
919 last_not_for_us = 0;
920 }
921
922 #if NBPFILTER > 0
923 /*
924 * Check for a BPF filter; if so, hand it up.
925 * Note that we have to stick an extra mbuf up front, because bpf_mtap
926 * expects to have the ether header at the front.
927 * It doesn't matter that this results in an ill-formatted mbuf chain,
928 * since BPF just looks at the data. (It doesn't try to free the mbuf,
929 * tho' it will make a copy for tcpdump.)
930 */
931 if (bpf_gets_it) {
932 struct mbuf m0;
933 m0.m_len = sizeof eh;
934 m0.m_data = (caddr_t)&eh;
935 m0.m_next = m;
936
937 /* Pass it up. */
938 bpf_mtap(sc->sc_ethercom.ec_if.if_bpf, &m0);
939 }
940 /*
941 * A signal passed up from the filtering code indicating that the
942 * packet is intended for BPF but not for the protocol machinery.
943 * We can save a few cycles by not handing it off to them.
944 */
945 if (bpf_gets_it == 2) {
946 last_not_for_us = m;
947 return;
948 }
949 #endif /* NBPFILTER > 0 */
950
951 /*
952 * In here there used to be code to check destination addresses upon
953 * receipt of a packet. We have deleted that code, and replaced it
954 * with code to check the address much earlier in the cycle, before
955 * copying the data in; this saves us valuable cycles when operating
956 * as a multicast router or when using BPF.
957 */
958
959 /*
960 * Finally pass this packet up to higher layers.
961 */
962 ether_input(&sc->sc_ethercom.ec_if, &eh, m);
963 }
964
965 static void
966 ie_drop_packet_buffer(sc)
967 struct ie_softc *sc;
968 {
969 int i;
970
971 do {
972 /*
973 * This means we are somehow out of sync. So, we reset the
974 * adapter.
975 */
976 i = SWAP(sc->rbuffs[sc->rbhead]->ie_rbd_actual);
977 if ((i & IE_RBD_USED) == 0) {
978 #ifdef IEDEBUG
979 print_rbd(sc->rbuffs[sc->rbhead]);
980 #endif
981 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
982 sc->sc_dev.dv_xname, sc->rbhead);
983 iereset(sc);
984 return;
985 }
986
987 i = (i & IE_RBD_LAST) != 0;
988
989 sc->rbuffs[sc->rbhead]->ie_rbd_length |= SWAP(IE_RBD_LAST);
990 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0);
991 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf;
992 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~SWAP(IE_RBD_LAST);
993 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
994 } while (!i);
995 }
996
997
998 /*
999 * Start transmission on an interface.
1000 */
1001 void
1002 iestart(ifp)
1003 struct ifnet *ifp;
1004 {
1005 struct ie_softc *sc = ifp->if_softc;
1006 struct mbuf *m0, *m;
1007 u_char *buffer;
1008 u_short len;
1009
1010 if ((ifp->if_flags & IFF_RUNNING) == 0)
1011 return;
1012
1013 if (sc->xmit_free == 0) {
1014 ifp->if_flags |= IFF_OACTIVE;
1015 if (!sc->xmit_busy)
1016 iexmit(sc);
1017 return;
1018 }
1019
1020 do {
1021 IF_DEQUEUE(&sc->sc_ethercom.ec_if.if_snd, m);
1022 if (!m)
1023 break;
1024
1025 len = 0;
1026 buffer = sc->xmit_cbuffs[sc->xchead];
1027
1028 for (m0 = m; m && (len +m->m_len) < IE_TBUF_SIZE;
1029 m = m->m_next) {
1030 bcopy(mtod(m, caddr_t), buffer, m->m_len);
1031 buffer += m->m_len;
1032 len += m->m_len;
1033 }
1034 if (m)
1035 printf("%s: tbuf overflow\n", sc->sc_dev.dv_xname);
1036
1037 m_freem(m0);
1038 len = max(len, ETHER_MIN_LEN);
1039 sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len);
1040
1041 sc->xmit_free--;
1042 sc->xchead = (sc->xchead + 1) % NTXBUF;
1043 } while (sc->xmit_free > 0);
1044
1045 /* If we stuffed any packets into the card's memory, send now. */
1046 if ((sc->xmit_free < NTXBUF) && (!sc->xmit_busy))
1047 iexmit(sc);
1048
1049 return;
1050 }
1051
1052 /*
1053 * set up IE's ram space
1054 */
1055 int
1056 ie_setupram(sc)
1057 struct ie_softc *sc;
1058 {
1059 volatile struct ie_sys_conf_ptr *scp;
1060 volatile struct ie_int_sys_conf_ptr *iscp;
1061 volatile struct ie_sys_ctl_block *scb;
1062 int s;
1063
1064 s = splnet();
1065
1066 scp = sc->scp;
1067 (sc->memzero)((char *) scp, sizeof *scp);
1068
1069 iscp = sc->iscp;
1070 (sc->memzero)((char *) iscp, sizeof *iscp);
1071
1072 scb = sc->scb;
1073 (sc->memzero)((char *) scb, sizeof *scb);
1074
1075 scp->ie_bus_use = 0; /* 16-bit */
1076 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp));
1077
1078 iscp->ie_busy = 1; /* ie_busy == char */
1079 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb);
1080 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr));
1081
1082 if (sc->hwreset)
1083 (sc->hwreset)(sc);
1084
1085 (sc->chan_attn) (sc);
1086
1087 delay(100); /* wait a while... */
1088
1089 if (iscp->ie_busy) {
1090 splx(s);
1091 return 0;
1092 }
1093 /*
1094 * Acknowledge any interrupts we may have caused...
1095 */
1096 ie_ack(sc, IE_ST_WHENCE);
1097 splx(s);
1098
1099 return 1;
1100 }
1101
1102 void
1103 iereset(sc)
1104 struct ie_softc *sc;
1105 {
1106 int s = splnet();
1107
1108 printf("%s: reset\n", sc->sc_dev.dv_xname);
1109
1110 /* Clear OACTIVE in case we're called from watchdog (frozen xmit). */
1111 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
1112
1113 /*
1114 * Stop i82586 dead in its tracks.
1115 */
1116 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1117 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname);
1118
1119 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1120 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname);
1121
1122
1123 #if notdef
1124 if (sc->hwreset)
1125 (sc->hwreset)(sc);
1126 #endif
1127 #ifdef notdef
1128 if (!check_ie_present(sc, sc->sc_maddr, sc->sc_msize))
1129 panic("ie disappeared!\n");
1130 #endif
1131
1132 ieinit(sc);
1133
1134 splx(s);
1135 }
1136
1137 /*
1138 * Send a command to the controller and wait for it to either complete
1139 * or be accepted, depending on the command. If the command pointer
1140 * is null, then pretend that the command is not an action command.
1141 * If the command pointer is not null, and the command is an action
1142 * command, wait for
1143 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1144 * to become true.
1145 */
1146 static int
1147 command_and_wait(sc, cmd, pcmd, mask)
1148 struct ie_softc *sc;
1149 int cmd; /* native byte-order */
1150 volatile void *pcmd;
1151 int mask; /* native byte-order */
1152 {
1153 volatile struct ie_cmd_common *cc = pcmd;
1154 volatile struct ie_sys_ctl_block *scb = sc->scb;
1155 int i;
1156
1157 scb->ie_command = (u_short)SWAP(cmd);
1158
1159 if (IE_ACTION_COMMAND(cmd) && pcmd) {
1160 (sc->chan_attn)(sc);
1161
1162 /*
1163 * According to the packet driver, the minimum timeout should
1164 * be .369 seconds, which we round up to .4.
1165 */
1166
1167 /*
1168 * Now spin-lock waiting for status. This is not a very nice
1169 * thing to do, but I haven't figured out how, or indeed if, we
1170 * can put the process waiting for action to sleep. (We may
1171 * be getting called through some other timeout running in the
1172 * kernel.)
1173 */
1174 for (i = 0; i < 369; i++) {
1175 delay(1000);
1176 if ((SWAP(cc->ie_cmd_status) & mask))
1177 return (0);
1178 }
1179 return (1);
1180
1181 } else {
1182 /*
1183 * Otherwise, just wait for the command to be accepted.
1184 */
1185 (sc->chan_attn)(sc);
1186
1187 /* XXX spin lock; wait at most 0.1 seconds */
1188 for (i = 0; i < 10000; i++) {
1189 if (scb->ie_command)
1190 return (0);
1191 delay(10);
1192 }
1193
1194 return (1);
1195 }
1196 }
1197
1198 /*
1199 * Run the time-domain reflectometer.
1200 */
1201 static void
1202 run_tdr(sc, cmd)
1203 struct ie_softc *sc;
1204 struct ie_tdr_cmd *cmd;
1205 {
1206 int result;
1207
1208 cmd->com.ie_cmd_status = SWAP(0);
1209 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_TDR | IE_CMD_LAST);
1210 cmd->com.ie_cmd_link = SWAP(0xffff);
1211
1212 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1213 cmd->ie_tdr_time = SWAP(0);
1214
1215 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1216 !(SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK))
1217 result = 0x10000; /* XXX */
1218 else
1219 result = SWAP(cmd->ie_tdr_time);
1220
1221 ie_ack(sc, IE_ST_WHENCE);
1222
1223 if (result & IE_TDR_SUCCESS)
1224 return;
1225
1226 if (result & 0x10000)
1227 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname);
1228 else if (result & IE_TDR_XCVR)
1229 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname);
1230 else if (result & IE_TDR_OPEN)
1231 printf("%s: TDR detected an open %d clocks away\n",
1232 sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1233 else if (result & IE_TDR_SHORT)
1234 printf("%s: TDR detected a short %d clocks away\n",
1235 sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1236 else
1237 printf("%s: TDR returned unknown status %x\n",
1238 sc->sc_dev.dv_xname, result);
1239 }
1240
1241 #ifdef notdef
1242 /* ALIGN works on 8 byte boundaries.... but 4 byte boundaries are ok for sun */
1243 #define _ALLOC(p, n) (bzero(p, n), p += n, p - n)
1244 #define ALLOC(p, n) _ALLOC(p, ALIGN(n)) /* XXX convert to this? */
1245 #endif
1246
1247 /*
1248 * setup_bufs: set up the buffers
1249 *
1250 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz.
1251 * this is to be used for the buffers. the chip indexs its control data
1252 * structures with 16 bit offsets, and it indexes actual buffers with
1253 * 24 bit addresses. so we should allocate control buffers first so that
1254 * we don't overflow the 16 bit offset field. The number of transmit
1255 * buffers is fixed at compile time.
1256 *
1257 * note: this function was written to be easy to understand, rather than
1258 * highly efficient (it isn't in the critical path).
1259 */
1260 static void
1261 setup_bufs(sc)
1262 struct ie_softc *sc;
1263 {
1264 caddr_t ptr = sc->buf_area; /* memory pool */
1265 int n, r;
1266
1267 /*
1268 * step 0: zero memory and figure out how many recv buffers and
1269 * frames we can have. XXX CURRENTLY HARDWIRED AT MAX
1270 */
1271 (sc->memzero)(ptr, sc->buf_area_sz);
1272 ptr = (sc->align)(ptr); /* set alignment and stick with it */
1273
1274 n = (int)(sc->align)((caddr_t) sizeof(struct ie_xmit_cmd)) +
1275 (int)(sc->align)((caddr_t) sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE;
1276 n *= NTXBUF; /* n = total size of xmit area */
1277
1278 n = sc->buf_area_sz - n;/* n = free space for recv stuff */
1279
1280 r = (int)(sc->align)((caddr_t) sizeof(struct ie_recv_frame_desc)) +
1281 (((int)(sc->align)((caddr_t) sizeof(struct ie_recv_buf_desc)) +
1282 IE_RBUF_SIZE) * B_PER_F);
1283
1284 /* r = size of one R frame */
1285
1286 sc->nframes = n / r;
1287 if (sc->nframes <= 0)
1288 panic("ie: bogus buffer calc\n");
1289 if (sc->nframes > MXFRAMES)
1290 sc->nframes = MXFRAMES;
1291
1292 sc->nrxbuf = sc->nframes * B_PER_F;
1293
1294 #ifdef IEDEBUG
1295 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf);
1296 #endif
1297
1298 /*
1299 * step 1a: lay out and zero frame data structures for transmit and recv
1300 */
1301 for (n = 0; n < NTXBUF; n++) {
1302 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr;
1303 ptr = (sc->align)(ptr + sizeof(struct ie_xmit_cmd));
1304 }
1305
1306 for (n = 0; n < sc->nframes; n++) {
1307 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr;
1308 ptr = (sc->align)(ptr + sizeof(struct ie_recv_frame_desc));
1309 }
1310
1311 /*
1312 * step 1b: link together the recv frames and set EOL on last one
1313 */
1314 for (n = 0; n < sc->nframes; n++) {
1315 sc->rframes[n]->ie_fd_next =
1316 MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]);
1317 }
1318 sc->rframes[sc->nframes - 1]->ie_fd_last |= SWAP(IE_FD_LAST);
1319
1320 /*
1321 * step 2a: lay out and zero frame buffer structures for xmit and recv
1322 */
1323 for (n = 0; n < NTXBUF; n++) {
1324 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr;
1325 ptr = (sc->align)(ptr + sizeof(struct ie_xmit_buf));
1326 }
1327
1328 for (n = 0; n < sc->nrxbuf; n++) {
1329 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr;
1330 ptr = (sc->align)(ptr + sizeof(struct ie_recv_buf_desc));
1331 }
1332
1333 /*
1334 * step 2b: link together recv bufs and set EOL on last one
1335 */
1336 for (n = 0; n < sc->nrxbuf; n++) {
1337 sc->rbuffs[n]->ie_rbd_next =
1338 MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]);
1339 }
1340 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= SWAP(IE_RBD_LAST);
1341
1342 /*
1343 * step 3: allocate the actual data buffers for xmit and recv
1344 * recv buffer gets linked into recv_buf_desc list here
1345 */
1346 for (n = 0; n < NTXBUF; n++) {
1347 sc->xmit_cbuffs[n] = (u_char *) ptr;
1348 ptr = (sc->align)(ptr + IE_TBUF_SIZE);
1349 }
1350
1351 /* Pointers to last packet sent and next available transmit buffer. */
1352 sc->xchead = sc->xctail = 0;
1353
1354 /* Clear transmit-busy flag and set number of free transmit buffers. */
1355 sc->xmit_busy = 0;
1356 sc->xmit_free = NTXBUF;
1357
1358 for (n = 0; n < sc->nrxbuf; n++) {
1359 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */
1360 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE);
1361 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr));
1362 ptr = (sc->align)(ptr + IE_RBUF_SIZE);
1363 }
1364
1365 /*
1366 * step 4: set the head and tail pointers on receive to keep track of
1367 * the order in which RFDs and RBDs are used. link in recv frames
1368 * and buffer into the scb.
1369 */
1370
1371 sc->rfhead = 0;
1372 sc->rftail = sc->nframes - 1;
1373 sc->rbhead = 0;
1374 sc->rbtail = sc->nrxbuf - 1;
1375
1376 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1377 sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]);
1378
1379 #ifdef IEDEBUG
1380 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area);
1381 #endif
1382 }
1383
1384 /*
1385 * Run the multicast setup command.
1386 * Called at splnet().
1387 */
1388 static int
1389 mc_setup(sc, ptr)
1390 struct ie_softc *sc;
1391 void *ptr;
1392 {
1393 volatile struct ie_mcast_cmd *cmd = ptr;
1394
1395 cmd->com.ie_cmd_status = SWAP(0);
1396 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_MCAST | IE_CMD_LAST);
1397 cmd->com.ie_cmd_link = SWAP(0xffff);
1398
1399 (sc->memcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs,
1400 sc->mcast_count * sizeof *sc->mcast_addrs);
1401
1402 cmd->ie_mcast_bytes =
1403 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */
1404
1405 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1406 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1407 !(SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK)) {
1408 printf("%s: multicast address setup command failed\n",
1409 sc->sc_dev.dv_xname);
1410 return 0;
1411 }
1412 return 1;
1413 }
1414
1415 /*
1416 * This routine takes the environment generated by check_ie_present() and adds
1417 * to it all the other structures we need to operate the adapter. This
1418 * includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, starting
1419 * the receiver unit, and clearing interrupts.
1420 *
1421 * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER.
1422 */
1423 int
1424 ieinit(sc)
1425 struct ie_softc *sc;
1426 {
1427 volatile struct ie_sys_ctl_block *scb = sc->scb;
1428 void *ptr;
1429
1430 ptr = sc->buf_area;
1431
1432 /*
1433 * Send the configure command first.
1434 */
1435 {
1436 volatile struct ie_config_cmd *cmd = ptr;
1437
1438 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1439 cmd->com.ie_cmd_status = SWAP(0);
1440 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_CONFIG | IE_CMD_LAST);
1441 cmd->com.ie_cmd_link = SWAP(0xffff);
1442
1443 ie_setup_config(cmd, sc->promisc, 0);
1444
1445 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1446 !(SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK)) {
1447 printf("%s: configure command failed\n",
1448 sc->sc_dev.dv_xname);
1449 return 0;
1450 }
1451 }
1452
1453 /*
1454 * Now send the Individual Address Setup command.
1455 */
1456 {
1457 volatile struct ie_iasetup_cmd *cmd = ptr;
1458
1459 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1460 cmd->com.ie_cmd_status = SWAP(0);
1461 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_IASETUP | IE_CMD_LAST);
1462 cmd->com.ie_cmd_link = SWAP(0xffff);
1463
1464 (sc->memcopy)(LLADDR(sc->sc_ethercom.ec_if.if_sadl),
1465 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address);
1466
1467 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1468 !(SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK)) {
1469 printf("%s: individual address setup command failed\n",
1470 sc->sc_dev.dv_xname);
1471 return 0;
1472 }
1473 }
1474
1475 /*
1476 * Now run the time-domain reflectometer.
1477 */
1478 run_tdr(sc, ptr);
1479
1480 /*
1481 * Acknowledge any interrupts we have generated thus far.
1482 */
1483 ie_ack(sc, IE_ST_WHENCE);
1484
1485 /*
1486 * Set up the transmit and recv buffers.
1487 */
1488 setup_bufs(sc);
1489
1490 sc->sc_ethercom.ec_if.if_flags |= IFF_RUNNING;
1491
1492 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1493 command_and_wait(sc, IE_RU_START, 0, 0);
1494
1495 ie_ack(sc, IE_ST_WHENCE);
1496
1497 if (sc->hwinit)
1498 (sc->hwinit)(sc);
1499
1500 return 0;
1501 }
1502
1503 static void
1504 iestop(sc)
1505 struct ie_softc *sc;
1506 {
1507
1508 command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1509 }
1510
1511 int
1512 ieioctl(ifp, cmd, data)
1513 register struct ifnet *ifp;
1514 u_long cmd;
1515 caddr_t data;
1516 {
1517 struct ie_softc *sc = ifp->if_softc;
1518 struct ifaddr *ifa = (struct ifaddr *)data;
1519 struct ifreq *ifr = (struct ifreq *)data;
1520 int s, error = 0;
1521
1522 s = splnet();
1523
1524 switch(cmd) {
1525
1526 case SIOCSIFADDR:
1527 ifp->if_flags |= IFF_UP;
1528
1529 switch(ifa->ifa_addr->sa_family) {
1530 #ifdef INET
1531 case AF_INET:
1532 ieinit(sc);
1533 arp_ifinit(ifp, ifa);
1534 break;
1535 #endif
1536 #ifdef NS
1537 /* XXX - This code is probably wrong. */
1538 case AF_NS:
1539 {
1540 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1541
1542 if (ns_nullhost(*ina))
1543 ina->x_host =
1544 *(union ns_host *)LLADDR(ifp->if_sadl);
1545 else
1546 bcopy(ina->x_host.c_host,
1547 LLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1548 /* Set new address. */
1549 ieinit(sc);
1550 break;
1551 }
1552 #endif /* NS */
1553 default:
1554 ieinit(sc);
1555 break;
1556 }
1557 break;
1558
1559 case SIOCSIFFLAGS:
1560 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1561 if ((ifp->if_flags & IFF_UP) == 0 &&
1562 (ifp->if_flags & IFF_RUNNING) != 0) {
1563 /*
1564 * If interface is marked down and it is running, then
1565 * stop it.
1566 */
1567 iestop(sc);
1568 ifp->if_flags &= ~IFF_RUNNING;
1569 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1570 (ifp->if_flags & IFF_RUNNING) == 0) {
1571 /*
1572 * If interface is marked up and it is stopped, then
1573 * start it.
1574 */
1575 ieinit(sc);
1576 } else {
1577 /*
1578 * Reset the interface to pick up changes in any other
1579 * flags that affect hardware registers.
1580 */
1581 iestop(sc);
1582 ieinit(sc);
1583 }
1584 #ifdef IEDEBUG
1585 if (ifp->if_flags & IFF_DEBUG)
1586 sc->sc_debug = IED_ALL;
1587 else
1588 sc->sc_debug = 0;
1589 #endif
1590 break;
1591
1592 case SIOCADDMULTI:
1593 case SIOCDELMULTI:
1594 error = (cmd == SIOCADDMULTI) ?
1595 ether_addmulti(ifr, &sc->sc_ethercom):
1596 ether_delmulti(ifr, &sc->sc_ethercom);
1597
1598 if (error == ENETRESET) {
1599 /*
1600 * Multicast list has changed; set the hardware filter
1601 * accordingly.
1602 */
1603 mc_reset(sc);
1604 error = 0;
1605 }
1606 break;
1607
1608 default:
1609 error = EINVAL;
1610 }
1611 splx(s);
1612 return error;
1613 }
1614
1615 static void
1616 mc_reset(sc)
1617 struct ie_softc *sc;
1618 {
1619 struct ether_multi *enm;
1620 struct ether_multistep step;
1621
1622 /*
1623 * Step through the list of addresses.
1624 */
1625 sc->mcast_count = 0;
1626 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1627 while (enm) {
1628 if (sc->mcast_count >= MAXMCAST ||
1629 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
1630 sc->sc_ethercom.ec_if.if_flags |= IFF_ALLMULTI;
1631 ieioctl(&sc->sc_ethercom.ec_if, SIOCSIFFLAGS, (void *)0);
1632 goto setflag;
1633 }
1634
1635 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6);
1636 sc->mcast_count++;
1637 ETHER_NEXT_MULTI(step, enm);
1638 }
1639 setflag:
1640 sc->want_mcsetup = 1;
1641 }
1642
1643 #ifdef IEDEBUG
1644 void
1645 print_rbd(rbd)
1646 volatile struct ie_recv_buf_desc *rbd;
1647 {
1648
1649 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %08x\n"
1650 "length %04x, mbz %04x\n", (u_long)rbd,
1651 SWAP(rbd->ie_rbd_actual,
1652 SWAP(rbd->ie_rbd_next),
1653 SWAP(rbd->ie_rbd_buffer),
1654 SWAP(rbd->ie_rbd_length),
1655 rbd->mbz);
1656 }
1657 #endif
1658