i82586.c revision 1.5 1 /* $NetBSD: i82586.c,v 1.5 1997/07/29 20:24:47 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1997 Paul Kranenburg.
5 * Copyright (c) 1993, 1994, 1995 Charles Hannum.
6 * Copyright (c) 1992, 1993, University of Vermont and State
7 * Agricultural College.
8 * Copyright (c) 1992, 1993, Garrett A. Wollman.
9 *
10 * Portions:
11 * Copyright (c) 1994, 1995, Rafal K. Boni
12 * Copyright (c) 1990, 1991, William F. Jolitz
13 * Copyright (c) 1990, The Regents of the University of California
14 *
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 * must display the following acknowledgement:
27 * This product includes software developed by Charles Hannum, by the
28 * University of Vermont and State Agricultural College and Garrett A.
29 * Wollman, by William F. Jolitz, and by the University of California,
30 * Berkeley, Lawrence Berkeley Laboratory, and its contributors.
31 * 4. Neither the names of the Universities nor the names of the authors
32 * may be used to endorse or promote products derived from this software
33 * without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 */
47
48 /*
49 * Intel 82586 Ethernet chip
50 * Register, bit, and structure definitions.
51 *
52 * Original StarLAN driver written by Garrett Wollman with reference to the
53 * Clarkson Packet Driver code for this chip written by Russ Nelson and others.
54 *
55 * BPF support code taken from hpdev/if_le.c, supplied with tcpdump.
56 *
57 * 3C507 support is loosely based on code donated to NetBSD by Rafal Boni.
58 *
59 * Majorly cleaned up and 3C507 code merged by Charles Hannum.
60 *
61 * Converted to SUN ie driver by Charles D. Cranor,
62 * October 1994, January 1995.
63 * This sun version based on i386 version 1.30.
64 */
65
66 /*
67 * The i82586 is a very painful chip, found in sun3's, sun-4/100's
68 * sun-4/200's, and VME based suns. The byte order is all wrong for a
69 * SUN, making life difficult. Programming this chip is mostly the same,
70 * but certain details differ from system to system. This driver is
71 * written so that different "ie" interfaces can be controled by the same
72 * driver.
73 */
74
75 /*
76 Mode of operation:
77
78 We run the 82586 in a standard Ethernet mode. We keep NFRAMES
79 received frame descriptors around for the receiver to use, and
80 NRXBUF associated receive buffer descriptors, both in a circular
81 list. Whenever a frame is received, we rotate both lists as
82 necessary. (The 586 treats both lists as a simple queue.) We also
83 keep a transmit command around so that packets can be sent off
84 quickly.
85
86 We configure the adapter in AL-LOC = 1 mode, which means that the
87 Ethernet/802.3 MAC header is placed at the beginning of the receive
88 buffer rather than being split off into various fields in the RFD.
89 This also means that we must include this header in the transmit
90 buffer as well.
91
92 By convention, all transmit commands, and only transmit commands,
93 shall have the I (IE_CMD_INTR) bit set in the command. This way,
94 when an interrupt arrives at ieintr(), it is immediately possible
95 to tell what precisely caused it. ANY OTHER command-sending
96 routines should run at splnet(), and should post an acknowledgement
97 to every interrupt they generate.
98
99 */
100
101 #include "bpfilter.h"
102
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/mbuf.h>
106 #include <sys/buf.h>
107 #include <sys/protosw.h>
108 #include <sys/socket.h>
109 #include <sys/ioctl.h>
110 #include <sys/errno.h>
111 #include <sys/syslog.h>
112 #include <sys/device.h>
113
114 #include <net/if.h>
115 #include <net/if_types.h>
116 #include <net/if_dl.h>
117 #include <net/if_ether.h>
118
119 #if NBPFILTER > 0
120 #include <net/bpf.h>
121 #include <net/bpfdesc.h>
122 #endif
123
124 #ifdef INET
125 #include <netinet/in.h>
126 #include <netinet/in_systm.h>
127 #include <netinet/in_var.h>
128 #include <netinet/ip.h>
129 #include <netinet/if_inarp.h>
130 #endif
131
132 #ifdef NS
133 #include <netns/ns.h>
134 #include <netns/ns_if.h>
135 #endif
136
137 #include <machine/bus.h>
138
139 #include <dev/ic/i82586reg.h>
140 #include <dev/ic/i82586var.h>
141
142 void iewatchdog __P((struct ifnet *));
143 int ieinit __P((struct ie_softc *));
144 int ieioctl __P((struct ifnet *, u_long, caddr_t));
145 void iestart __P((struct ifnet *));
146 void iereset __P((struct ie_softc *));
147 static void ie_readframe __P((struct ie_softc *, int));
148 static void ie_drop_packet_buffer __P((struct ie_softc *));
149 int ie_setupram __P((struct ie_softc *));
150 static int command_and_wait __P((struct ie_softc *, int,
151 void volatile *, int));
152 /*static*/ void ierint __P((struct ie_softc *));
153 /*static*/ void ietint __P((struct ie_softc *));
154 static struct mbuf *ieget __P((struct ie_softc *,
155 struct ether_header *, int *));
156 static void setup_bufs __P((struct ie_softc *));
157 static int mc_setup __P((struct ie_softc *, void *));
158 static void mc_reset __P((struct ie_softc *));
159 static __inline int ether_equal __P((u_char *, u_char *));
160 static __inline void ie_ack __P((struct ie_softc *, u_int));
161 static __inline void ie_setup_config __P((volatile struct ie_config_cmd *,
162 int, int));
163 static __inline int check_eh __P((struct ie_softc *, struct ether_header *,
164 int *));
165 static __inline int ie_buflen __P((struct ie_softc *, int));
166 static __inline int ie_packet_len __P((struct ie_softc *));
167 static __inline void iexmit __P((struct ie_softc *));
168
169 static void run_tdr __P((struct ie_softc *, struct ie_tdr_cmd *));
170 static void iestop __P((struct ie_softc *));
171
172 #ifdef IEDEBUG
173 void print_rbd __P((volatile struct ie_recv_buf_desc *));
174
175 int in_ierint = 0;
176 int in_ietint = 0;
177 #endif
178
179 struct cfdriver ie_cd = {
180 NULL, "ie", DV_IFNET
181 };
182
183 /*
184 * Address generation macros:
185 * MK_24 = KVA -> 24 bit address in native byte order
186 * MK_16 = KVA -> 16 bit address in INTEL byte order
187 * ST_24 = store a 24 bit address in native byte order to INTEL byte order
188 */
189 #define MK_24(base, ptr) ((caddr_t)((u_long)ptr - (u_long)base))
190
191 #if BYTE_ORDER == BIG_ENDIAN
192 #define XSWAP(y) ( ((y) >> 8) | ((y) << 8) )
193 #define SWAP(x) ({u_short _z=(x); (u_short)XSWAP(_z);})
194
195 #define MK_16(base, ptr) SWAP((u_short)( ((u_long)(ptr)) - ((u_long)(base)) ))
196 #define ST_24(to, from) { \
197 u_long fval = (u_long)(from); \
198 u_char *t = (u_char *)&(to), *f = (u_char *)&fval; \
199 t[0] = f[3]; t[1] = f[2]; t[2] = f[1]; /*t[3] = f[0] ;*/ \
200 }
201 #else
202 #define SWAP(x) x
203 #define MK_16(base, ptr) ((u_short)(u_long)MK_24(base, ptr))
204 #define ST_24(to, from) {to = (from);}
205 #endif
206
207 /*
208 * Here are a few useful functions. We could have done these as macros, but
209 * since we have the inline facility, it makes sense to use that instead.
210 */
211 static __inline void
212 ie_setup_config(cmd, promiscuous, manchester)
213 volatile struct ie_config_cmd *cmd;
214 int promiscuous, manchester;
215 {
216
217 cmd->ie_config_count = 0x0c;
218 cmd->ie_fifo = 8;
219 cmd->ie_save_bad = 0x40;
220 cmd->ie_addr_len = 0x2e;
221 cmd->ie_priority = 0;
222 cmd->ie_ifs = 0x60;
223 cmd->ie_slot_low = 0;
224 cmd->ie_slot_high = 0xf2;
225 cmd->ie_promisc = !!promiscuous | manchester << 2;
226 cmd->ie_crs_cdt = 0;
227 cmd->ie_min_len = 64;
228 cmd->ie_junk = 0xff;
229 }
230
231 static __inline void
232 ie_ack(sc, mask)
233 struct ie_softc *sc;
234 u_int mask; /* in native byte-order */
235 {
236 volatile struct ie_sys_ctl_block *scb = sc->scb;
237
238 command_and_wait(sc, SWAP(scb->ie_status) & mask, 0, 0);
239 }
240
241
242 /*
243 * Taken almost exactly from Bill's if_is.c, then modified beyond recognition.
244 */
245 void
246 ie_attach(sc, name, etheraddr)
247 struct ie_softc *sc;
248 char *name;
249 u_int8_t *etheraddr;
250 {
251 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
252
253 if (ie_setupram(sc) == 0) { /* XXX - ISA version? */
254 printf(": RAM CONFIG FAILED!\n");
255 /* XXX should reclaim resources? */
256 return;
257 }
258
259 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
260 ifp->if_softc = sc;
261 ifp->if_start = iestart;
262 ifp->if_ioctl = ieioctl;
263 ifp->if_watchdog = iewatchdog;
264 ifp->if_flags =
265 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
266
267 /* Attach the interface. */
268 if_attach(ifp);
269 ether_ifattach(ifp, etheraddr);
270
271 printf(" address %s, type %s\n", ether_sprintf(etheraddr), name);
272
273 #if NBPFILTER > 0
274 bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header));
275 #endif
276 }
277
278
279 /*
280 * Device timeout/watchdog routine. Entered if the device neglects to generate
281 * an interrupt after a transmit has been started on it.
282 */
283 void
284 iewatchdog(ifp)
285 struct ifnet *ifp;
286 {
287 struct ie_softc *sc = ifp->if_softc;
288
289 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
290 ++ifp->if_oerrors;
291
292 iereset(sc);
293 }
294
295 /*
296 * What to do upon receipt of an interrupt.
297 */
298 int
299 ieintr(v)
300 void *v;
301 {
302 struct ie_softc *sc = v;
303 register u_short status;
304
305 bus_space_barrier(sc->bt, sc->bh, 0, 0, BUS_SPACE_BARRIER_READ);
306 status = SWAP(sc->scb->ie_status);
307
308 /*
309 * Implementation dependent interrupt handling.
310 */
311 if (sc->intrhook)
312 (*sc->intrhook)(sc);
313
314 loop:
315 /* Ack interrupts FIRST in case we receive more during the ISR. */
316 ie_ack(sc, IE_ST_WHENCE & status);
317
318 if (status & (IE_ST_FR | IE_ST_RNR)) {
319 #ifdef IEDEBUG
320 in_ierint++;
321 if (sc->sc_debug & IED_RINT)
322 printf("%s: rint\n", sc->sc_dev.dv_xname);
323 #endif
324 ierint(sc);
325 #ifdef IEDEBUG
326 in_ierint--;
327 #endif
328 }
329
330 if (status & IE_ST_CX) {
331 #ifdef IEDEBUG
332 in_ietint++;
333 if (sc->sc_debug & IED_TINT)
334 printf("%s: tint\n", sc->sc_dev.dv_xname);
335 #endif
336 ietint(sc);
337 #ifdef IEDEBUG
338 in_ietint--;
339 #endif
340 }
341
342 if (status & IE_ST_RNR) {
343 printf("%s: receiver not ready\n", sc->sc_dev.dv_xname);
344 sc->sc_ethercom.ec_if.if_ierrors++;
345 iereset(sc);
346 return (1);
347 }
348
349 #ifdef IEDEBUG
350 if ((status & IE_ST_CNA) && (sc->sc_debug & IED_CNA))
351 printf("%s: cna\n", sc->sc_dev.dv_xname);
352 #endif
353
354 bus_space_barrier(sc->bt, sc->bh, 0, 0, BUS_SPACE_BARRIER_READ);
355 status = SWAP(sc->scb->ie_status);
356 if (status & IE_ST_WHENCE)
357 goto loop;
358
359 return (1);
360 }
361
362 /*
363 * Process a received-frame interrupt.
364 */
365 void
366 ierint(sc)
367 struct ie_softc *sc;
368 {
369 volatile struct ie_sys_ctl_block *scb = sc->scb;
370 int i, status;
371 static int timesthru = 1024;
372
373 i = sc->rfhead;
374 for (;;) {
375 status = SWAP(sc->rframes[i]->ie_fd_status);
376
377 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
378 if (--timesthru == 0) {
379 sc->sc_ethercom.ec_if.if_ierrors +=
380 SWAP(scb->ie_err_crc) +
381 SWAP(scb->ie_err_align) +
382 SWAP(scb->ie_err_resource) +
383 SWAP(scb->ie_err_overrun);
384 scb->ie_err_crc = scb->ie_err_align =
385 scb->ie_err_resource = scb->ie_err_overrun =
386 SWAP(0);
387 timesthru = 1024;
388 }
389 ie_readframe(sc, i);
390 } else {
391 if ((status & IE_FD_RNR) != 0 &&
392 (SWAP(scb->ie_status) & IE_RU_READY) == 0) {
393 sc->rframes[0]->ie_fd_buf_desc =
394 MK_16(sc->sc_maddr, sc->rbuffs[0]);
395 scb->ie_recv_list =
396 MK_16(sc->sc_maddr, sc->rframes[0]);
397 command_and_wait(sc, IE_RU_START, 0, 0);
398 }
399 break;
400 }
401 i = (i + 1) % sc->nframes;
402 }
403 }
404
405 /*
406 * Process a command-complete interrupt. These are only generated by the
407 * transmission of frames. This routine is deceptively simple, since most of
408 * the real work is done by iestart().
409 */
410 void
411 ietint(sc)
412 struct ie_softc *sc;
413 {
414 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
415 int status;
416
417 ifp->if_timer = 0;
418 ifp->if_flags &= ~IFF_OACTIVE;
419
420 status = SWAP(sc->xmit_cmds[sc->xctail]->ie_xmit_status);
421
422 if ((status & IE_STAT_COMPL) == 0 || (status & IE_STAT_BUSY))
423 printf("ietint: command still busy!\n");
424
425 if (status & IE_STAT_OK) {
426 ifp->if_opackets++;
427 ifp->if_collisions += (status & IE_XS_MAXCOLL);
428 } else {
429 ifp->if_oerrors++;
430 /*
431 * Check SQE and DEFERRED?
432 * What if more than one bit is set?
433 */
434 if (status & IE_STAT_ABORT)
435 printf("%s: send aborted\n", sc->sc_dev.dv_xname);
436 else if (status & IE_XS_NOCARRIER)
437 printf("%s: no carrier\n", sc->sc_dev.dv_xname);
438 else if (status & IE_XS_LOSTCTS)
439 printf("%s: lost CTS\n", sc->sc_dev.dv_xname);
440 else if (status & IE_XS_UNDERRUN)
441 printf("%s: DMA underrun\n", sc->sc_dev.dv_xname);
442 else if (status & IE_XS_EXCMAX) {
443 printf("%s: too many collisions\n",
444 sc->sc_dev.dv_xname);
445 sc->sc_ethercom.ec_if.if_collisions += 16;
446 }
447 }
448
449 /*
450 * If multicast addresses were added or deleted while transmitting,
451 * mc_reset() set the want_mcsetup flag indicating that we should do
452 * it.
453 */
454 if (sc->want_mcsetup) {
455 mc_setup(sc, (caddr_t)sc->xmit_cbuffs[sc->xctail]);
456 sc->want_mcsetup = 0;
457 }
458
459 /* Done with the buffer. */
460 sc->xmit_busy--;
461 sc->xctail = (sc->xctail + 1) % NTXBUF;
462
463 /* Start the next packet, if any, transmitting. */
464 if (sc->xmit_busy > 0)
465 iexmit(sc);
466
467 iestart(ifp);
468 }
469
470 /*
471 * Compare two Ether/802 addresses for equality, inlined and unrolled for
472 * speed.
473 */
474 static __inline int
475 ether_equal(one, two)
476 u_char *one, *two;
477 {
478
479 if (one[5] != two[5] || one[4] != two[4] || one[3] != two[3] ||
480 one[2] != two[2] || one[1] != two[1] || one[0] != two[0])
481 return 0;
482 return 1;
483 }
484
485 /*
486 * Check for a valid address. to_bpf is filled in with one of the following:
487 * 0 -> BPF doesn't get this packet
488 * 1 -> BPF does get this packet
489 * 2 -> BPF does get this packet, but we don't
490 * Return value is true if the packet is for us, and false otherwise.
491 *
492 * This routine is a mess, but it's also critical that it be as fast
493 * as possible. It could be made cleaner if we can assume that the
494 * only client which will fiddle with IFF_PROMISC is BPF. This is
495 * probably a good assumption, but we do not make it here. (Yet.)
496 */
497 static __inline int
498 check_eh(sc, eh, to_bpf)
499 struct ie_softc *sc;
500 struct ether_header *eh;
501 int *to_bpf;
502 {
503 struct ifnet *ifp;
504 int i;
505
506 ifp = &sc->sc_ethercom.ec_if;
507
508 switch(sc->promisc) {
509 case IFF_ALLMULTI:
510 /*
511 * Receiving all multicasts, but no unicasts except those
512 * destined for us.
513 */
514 #if NBPFILTER > 0
515 /* BPF gets this packet if anybody cares */
516 *to_bpf = (ifp->if_bpf != 0);
517 #endif
518 if (eh->ether_dhost[0] & 1)
519 return 1;
520 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
521 return 1;
522 return 0;
523
524 case IFF_PROMISC:
525 /*
526 * Receiving all packets. These need to be passed on to BPF.
527 */
528 #if NBPFILTER > 0
529 *to_bpf = (ifp->if_bpf != 0);
530 #endif
531 /* If for us, accept and hand up to BPF */
532 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
533 return 1;
534
535 #if NBPFILTER > 0
536 if (*to_bpf)
537 *to_bpf = 2; /* we don't need to see it */
538 #endif
539
540 /*
541 * Not a multicast, so BPF wants to see it but we don't.
542 */
543 if ((eh->ether_dhost[0] & 1) == 0)
544 return 1;
545
546 /*
547 * If it's one of our multicast groups, accept it and pass it
548 * up.
549 */
550 for (i = 0; i < sc->mcast_count; i++) {
551 if (ether_equal(eh->ether_dhost,
552 (u_char *)&sc->mcast_addrs[i])) {
553 #if NBPFILTER > 0
554 if (*to_bpf)
555 *to_bpf = 1;
556 #endif
557 return 1;
558 }
559 }
560 return 1;
561
562 case IFF_ALLMULTI | IFF_PROMISC:
563 /*
564 * Acting as a multicast router, and BPF running at the same
565 * time. Whew! (Hope this is a fast machine...)
566 */
567 #if NBPFILTER > 0
568 *to_bpf = (ifp->if_bpf != 0);
569 #endif
570 /* We want to see multicasts. */
571 if (eh->ether_dhost[0] & 1)
572 return 1;
573
574 /* We want to see our own packets */
575 if (ether_equal(eh->ether_dhost, LLADDR(ifp->if_sadl)))
576 return 1;
577
578 /* Anything else goes to BPF but nothing else. */
579 #if NBPFILTER > 0
580 if (*to_bpf)
581 *to_bpf = 2;
582 #endif
583 return 1;
584
585 default:
586 /*
587 * Only accept unicast packets destined for us, or multicasts
588 * for groups that we belong to. For now, we assume that the
589 * '586 will only return packets that we asked it for. This
590 * isn't strictly true (it uses hashing for the multicast
591 * filter), but it will do in this case, and we want to get
592 * out of here as quickly as possible.
593 */
594 #if NBPFILTER > 0
595 *to_bpf = (ifp->if_bpf != 0);
596 #endif
597 return 1;
598 }
599 return 0;
600 }
601
602 /*
603 * We want to isolate the bits that have meaning... This assumes that
604 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
605 * the size of the buffer, then we are screwed anyway.
606 */
607 static __inline int
608 ie_buflen(sc, head)
609 struct ie_softc *sc;
610 int head;
611 {
612
613 return (SWAP(sc->rbuffs[head]->ie_rbd_actual)
614 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
615 }
616
617
618 static __inline int
619 ie_packet_len(sc)
620 struct ie_softc *sc;
621 {
622 int i;
623 int head = sc->rbhead;
624 int acc = 0;
625 int oldhead = head;
626
627 do {
628 bus_space_barrier(sc->bt, sc->bh, 0, 0, BUS_SPACE_BARRIER_READ);
629 i = SWAP(sc->rbuffs[head]->ie_rbd_actual);
630 if ((i & IE_RBD_USED) == 0) {
631 #ifdef IEDEBUG
632 print_rbd(sc->rbuffs[head]);
633 #endif
634 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
635 sc->sc_dev.dv_xname, sc->rbhead);
636 iereset(sc);
637 return -1;
638 }
639
640 i = (i & IE_RBD_LAST) != 0;
641
642 acc += ie_buflen(sc, head);
643 head = (head + 1) % sc->nrxbuf;
644 if (oldhead == head) {
645 printf("ie: packet len: looping: acc = %d (head=%d)\n",
646 acc, head);
647 iereset(sc);
648 return -1;
649 }
650 } while (!i);
651
652 return acc;
653 }
654
655 /*
656 * Setup all necessary artifacts for an XMIT command, and then pass the XMIT
657 * command to the chip to be executed. On the way, if we have a BPF listener
658 * also give him a copy.
659 */
660 static __inline void
661 iexmit(sc)
662 struct ie_softc *sc;
663 {
664
665 #ifdef IEDEBUG
666 if (sc->sc_debug & IED_XMIT)
667 printf("%s: xmit buffer %d\n", sc->sc_dev.dv_xname,
668 sc->xctail);
669 #endif
670
671 sc->xmit_buffs[sc->xctail]->ie_xmit_flags |= SWAP(IE_XMIT_LAST);
672 sc->xmit_buffs[sc->xctail]->ie_xmit_next = SWAP(0xffff);
673 ST_24(sc->xmit_buffs[sc->xctail]->ie_xmit_buf,
674 MK_24(sc->sc_iobase, sc->xmit_cbuffs[sc->xctail]));
675
676 sc->xmit_cmds[sc->xctail]->com.ie_cmd_link = SWAP(0xffff);
677 sc->xmit_cmds[sc->xctail]->com.ie_cmd_cmd =
678 SWAP(IE_CMD_XMIT | IE_CMD_INTR | IE_CMD_LAST);
679
680 sc->xmit_cmds[sc->xctail]->ie_xmit_status = SWAP(0);
681 sc->xmit_cmds[sc->xctail]->ie_xmit_desc =
682 MK_16(sc->sc_maddr, sc->xmit_buffs[sc->xctail]);
683
684 sc->scb->ie_command_list =
685 MK_16(sc->sc_maddr, sc->xmit_cmds[sc->xctail]);
686
687 command_and_wait(sc, IE_CU_START, 0, 0);
688
689 sc->sc_ethercom.ec_if.if_timer = 5;
690 }
691
692 /*
693 * Read data off the interface, and turn it into an mbuf chain.
694 *
695 * This code is DRAMATICALLY different from the previous version; this
696 * version tries to allocate the entire mbuf chain up front, given the
697 * length of the data available. This enables us to allocate mbuf
698 * clusters in many situations where before we would have had a long
699 * chain of partially-full mbufs. This should help to speed up the
700 * operation considerably. (Provided that it works, of course.)
701 */
702 struct mbuf *
703 ieget(sc, ehp, to_bpf)
704 struct ie_softc *sc;
705 struct ether_header *ehp;
706 int *to_bpf;
707 {
708 struct mbuf *top, **mp, *m;
709 int len, totlen, resid;
710 int thisrboff, thismboff;
711 int head;
712
713 totlen = ie_packet_len(sc);
714 if (totlen <= 0)
715 return 0;
716
717 head = sc->rbhead;
718
719 /*
720 * Snarf the Ethernet header.
721 */
722 (sc->memcopy)((caddr_t)sc->cbuffs[head], (caddr_t)ehp, sizeof *ehp);
723
724 /*
725 * As quickly as possible, check if this packet is for us.
726 * If not, don't waste a single cycle copying the rest of the
727 * packet in.
728 * This is only a consideration when FILTER is defined; i.e., when
729 * we are either running BPF or doing multicasting.
730 */
731 if (!check_eh(sc, ehp, to_bpf)) {
732 /* just this case, it's not an error */
733 sc->sc_ethercom.ec_if.if_ierrors--;
734 return 0;
735 }
736
737 resid = totlen -= (thisrboff = sizeof *ehp);
738
739 MGETHDR(m, M_DONTWAIT, MT_DATA);
740 if (m == 0)
741 return 0;
742 m->m_pkthdr.rcvif = &sc->sc_ethercom.ec_if;
743 m->m_pkthdr.len = totlen;
744 len = MHLEN;
745 top = 0;
746 mp = ⊤
747
748 /*
749 * This loop goes through and allocates mbufs for all the data we will
750 * be copying in. It does not actually do the copying yet.
751 */
752 while (totlen > 0) {
753 if (top) {
754 MGET(m, M_DONTWAIT, MT_DATA);
755 if (m == 0) {
756 m_freem(top);
757 return 0;
758 }
759 len = MLEN;
760 }
761 if (totlen >= MINCLSIZE) {
762 MCLGET(m, M_DONTWAIT);
763 if ((m->m_flags & M_EXT) == 0) {
764 m_freem(top);
765 return 0;
766 }
767 len = MCLBYTES;
768 }
769 m->m_len = len = min(totlen, len);
770 totlen -= len;
771 *mp = m;
772 mp = &m->m_next;
773 }
774
775 m = top;
776 thismboff = 0;
777
778 /*
779 * Now we take the mbuf chain (hopefully only one mbuf most of the
780 * time) and stuff the data into it. There are no possible failures at
781 * or after this point.
782 */
783 while (resid > 0) {
784 int thisrblen = ie_buflen(sc, head) - thisrboff,
785 thismblen = m->m_len - thismboff;
786 len = min(thisrblen, thismblen);
787
788 (sc->memcopy)((caddr_t)(sc->cbuffs[head] + thisrboff),
789 mtod(m, caddr_t) + thismboff, (u_int)len);
790 resid -= len;
791
792 if (len == thismblen) {
793 m = m->m_next;
794 thismboff = 0;
795 } else
796 thismboff += len;
797
798 if (len == thisrblen) {
799 head = (head + 1) % sc->nrxbuf;
800 thisrboff = 0;
801 } else
802 thisrboff += len;
803 }
804
805 /*
806 * Unless something changed strangely while we were doing the copy, we
807 * have now copied everything in from the shared memory.
808 * This means that we are done.
809 */
810 return top;
811 }
812
813 /*
814 * Read frame NUM from unit UNIT (pre-cached as IE).
815 *
816 * This routine reads the RFD at NUM, and copies in the buffers from the list
817 * of RBD, then rotates the RBD and RFD lists so that the receiver doesn't
818 * start complaining. Trailers are DROPPED---there's no point in wasting time
819 * on confusing code to deal with them. Hopefully, this machine will never ARP
820 * for trailers anyway.
821 */
822 static void
823 ie_readframe(sc, num)
824 struct ie_softc *sc;
825 int num; /* frame number to read */
826 {
827 int status;
828 struct mbuf *m = 0;
829 struct ether_header eh;
830 #if NBPFILTER > 0
831 int bpf_gets_it = 0;
832 #endif
833
834 status = SWAP(sc->rframes[num]->ie_fd_status);
835
836 /* Immediately advance the RFD list, since we have copied ours now. */
837 sc->rframes[num]->ie_fd_status = SWAP(0);
838 sc->rframes[num]->ie_fd_last |= SWAP(IE_FD_LAST);
839 sc->rframes[sc->rftail]->ie_fd_last &= ~SWAP(IE_FD_LAST);
840 sc->rftail = (sc->rftail + 1) % sc->nframes;
841 sc->rfhead = (sc->rfhead + 1) % sc->nframes;
842
843 if (status & IE_FD_OK) {
844 #if NBPFILTER > 0
845 m = ieget(sc, &eh, &bpf_gets_it);
846 #else
847 m = ieget(sc, &eh, 0);
848 #endif
849 ie_drop_packet_buffer(sc);
850 }
851 if (m == 0) {
852 sc->sc_ethercom.ec_if.if_ierrors++;
853 return;
854 }
855
856 #ifdef IEDEBUG
857 if (sc->sc_debug & IED_READFRAME)
858 printf("%s: frame from ether %s type 0x%x\n",
859 sc->sc_dev.dv_xname,
860 ether_sprintf(eh.ether_shost), (u_int)eh.ether_type);
861 #endif
862
863 #if NBPFILTER > 0
864 /*
865 * Check for a BPF filter; if so, hand it up.
866 * Note that we have to stick an extra mbuf up front, because bpf_mtap
867 * expects to have the ether header at the front.
868 * It doesn't matter that this results in an ill-formatted mbuf chain,
869 * since BPF just looks at the data. (It doesn't try to free the mbuf,
870 * tho' it will make a copy for tcpdump.)
871 */
872 if (bpf_gets_it) {
873 struct mbuf m0;
874 m0.m_len = sizeof eh;
875 m0.m_data = (caddr_t)&eh;
876 m0.m_next = m;
877
878 /* Pass it up. */
879 bpf_mtap(sc->sc_ethercom.ec_if.if_bpf, &m0);
880
881 /*
882 * A signal passed up from the filtering code indicating that
883 * the packet is intended for BPF but not for the protocol
884 * machinery. We can save a few cycles by not handing it off
885 * to them.
886 */
887 if (bpf_gets_it == 2) {
888 m_freem(m);
889 return;
890 }
891 }
892 #endif /* NBPFILTER > 0 */
893
894 /*
895 * In here there used to be code to check destination addresses upon
896 * receipt of a packet. We have deleted that code, and replaced it
897 * with code to check the address much earlier in the cycle, before
898 * copying the data in; this saves us valuable cycles when operating
899 * as a multicast router or when using BPF.
900 */
901
902 /*
903 * Finally pass this packet up to higher layers.
904 */
905 ether_input(&sc->sc_ethercom.ec_if, &eh, m);
906 sc->sc_ethercom.ec_if.if_ipackets++;
907 }
908
909 static void
910 ie_drop_packet_buffer(sc)
911 struct ie_softc *sc;
912 {
913 int i;
914
915 do {
916 bus_space_barrier(sc->bt, sc->bh, 0, 0, BUS_SPACE_BARRIER_READ);
917 i = SWAP(sc->rbuffs[sc->rbhead]->ie_rbd_actual);
918 if ((i & IE_RBD_USED) == 0) {
919 /*
920 * This means we are somehow out of sync. So, we
921 * reset the adapter.
922 */
923 #ifdef IEDEBUG
924 print_rbd(sc->rbuffs[sc->rbhead]);
925 #endif
926 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
927 sc->sc_dev.dv_xname, sc->rbhead);
928 iereset(sc);
929 return;
930 }
931
932 i = (i & IE_RBD_LAST) != 0;
933
934 sc->rbuffs[sc->rbhead]->ie_rbd_length |= SWAP(IE_RBD_LAST);
935 sc->rbuffs[sc->rbhead]->ie_rbd_actual = SWAP(0);
936 sc->rbhead = (sc->rbhead + 1) % sc->nrxbuf;
937 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~SWAP(IE_RBD_LAST);
938 sc->rbtail = (sc->rbtail + 1) % sc->nrxbuf;
939 } while (!i);
940 }
941
942
943 /*
944 * Start transmission on an interface.
945 */
946 void
947 iestart(ifp)
948 struct ifnet *ifp;
949 {
950 struct ie_softc *sc = ifp->if_softc;
951 struct mbuf *m0, *m;
952 u_char *buffer;
953 u_short len;
954
955 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
956 return;
957
958 for (;;) {
959 if (sc->xmit_busy == NTXBUF) {
960 ifp->if_flags |= IFF_OACTIVE;
961 break;
962 }
963
964 IF_DEQUEUE(&ifp->if_snd, m0);
965 if (m0 == 0)
966 break;
967
968 /* We need to use m->m_pkthdr.len, so require the header */
969 if ((m0->m_flags & M_PKTHDR) == 0)
970 panic("iestart: no header mbuf");
971
972 #if NBPFILTER > 0
973 /* Tap off here if there is a BPF listener. */
974 if (ifp->if_bpf)
975 bpf_mtap(ifp->if_bpf, m0);
976 #endif
977
978 #ifdef IEDEBUG
979 if (sc->sc_debug & IED_ENQ)
980 printf("%s: fill buffer %d\n", sc->sc_dev.dv_xname,
981 sc->xchead);
982 #endif
983
984 if (m0->m_pkthdr.len > IE_TBUF_SIZE)
985 printf("%s: tbuf overflow\n", sc->sc_dev.dv_xname);
986
987 buffer = sc->xmit_cbuffs[sc->xchead];
988 for (m = m0; m != 0; m = m->m_next) {
989 (sc->memcopy)(mtod(m, caddr_t), buffer, m->m_len);
990 buffer += m->m_len;
991 }
992
993 len = max(m0->m_pkthdr.len, ETHER_MIN_LEN);
994 m_freem(m0);
995
996 sc->xmit_buffs[sc->xchead]->ie_xmit_flags = SWAP(len);
997
998 /* Start the first packet transmitting. */
999 if (sc->xmit_busy == 0)
1000 iexmit(sc);
1001
1002 sc->xchead = (sc->xchead + 1) % NTXBUF;
1003 sc->xmit_busy++;
1004 }
1005 }
1006
1007 /*
1008 * set up IE's ram space
1009 */
1010 int
1011 ie_setupram(sc)
1012 struct ie_softc *sc;
1013 {
1014 volatile struct ie_sys_conf_ptr *scp;
1015 volatile struct ie_int_sys_conf_ptr *iscp;
1016 volatile struct ie_sys_ctl_block *scb;
1017 int s;
1018
1019 s = splnet();
1020
1021 scp = sc->scp;
1022 (sc->memzero)((char *) scp, sizeof *scp);
1023
1024 iscp = sc->iscp;
1025 (sc->memzero)((char *) iscp, sizeof *iscp);
1026
1027 scb = sc->scb;
1028 (sc->memzero)((char *) scb, sizeof *scb);
1029
1030 scp->ie_bus_use = 0; /* 16-bit */
1031 ST_24(scp->ie_iscp_ptr, MK_24(sc->sc_iobase, iscp));
1032
1033 iscp->ie_busy = 1; /* ie_busy == char */
1034 iscp->ie_scb_offset = MK_16(sc->sc_maddr, scb);
1035 ST_24(iscp->ie_base, MK_24(sc->sc_iobase, sc->sc_maddr));
1036
1037 if (sc->hwreset)
1038 (sc->hwreset)(sc);
1039
1040 (sc->chan_attn) (sc);
1041
1042 delay(100); /* wait a while... */
1043
1044 if (iscp->ie_busy) {
1045 splx(s);
1046 return 0;
1047 }
1048 /*
1049 * Acknowledge any interrupts we may have caused...
1050 */
1051 ie_ack(sc, IE_ST_WHENCE);
1052 splx(s);
1053
1054 return 1;
1055 }
1056
1057 void
1058 iereset(sc)
1059 struct ie_softc *sc;
1060 {
1061 int s = splnet();
1062
1063 printf("%s: reset\n", sc->sc_dev.dv_xname);
1064
1065 /* Clear OACTIVE in case we're called from watchdog (frozen xmit). */
1066 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
1067
1068 /*
1069 * Stop i82586 dead in its tracks.
1070 */
1071 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1072 printf("%s: abort commands timed out\n", sc->sc_dev.dv_xname);
1073
1074 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1075 printf("%s: disable commands timed out\n", sc->sc_dev.dv_xname);
1076
1077
1078 #if 1
1079 if (sc->hwreset)
1080 (sc->hwreset)(sc);
1081 #endif
1082 ie_ack(sc, IE_ST_WHENCE);
1083 #ifdef notdef
1084 if (!check_ie_present(sc, sc->sc_maddr, sc->sc_msize))
1085 panic("ie disappeared!\n");
1086 #endif
1087
1088 ieinit(sc);
1089
1090 splx(s);
1091 }
1092
1093 /*
1094 * Send a command to the controller and wait for it to either complete
1095 * or be accepted, depending on the command. If the command pointer
1096 * is null, then pretend that the command is not an action command.
1097 * If the command pointer is not null, and the command is an action
1098 * command, wait for
1099 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1100 * to become true.
1101 */
1102 static int
1103 command_and_wait(sc, cmd, pcmd, mask)
1104 struct ie_softc *sc;
1105 int cmd; /* native byte-order */
1106 volatile void *pcmd;
1107 int mask; /* native byte-order */
1108 {
1109 volatile struct ie_cmd_common *cc = pcmd;
1110 volatile struct ie_sys_ctl_block *scb = sc->scb;
1111 int i;
1112
1113 scb->ie_command = (u_short)SWAP(cmd);
1114 bus_space_barrier(sc->bt, sc->bh, 0, 0, BUS_SPACE_BARRIER_WRITE);
1115 (sc->chan_attn)(sc);
1116
1117 if (IE_ACTION_COMMAND(cmd) && pcmd) {
1118 /*
1119 * According to the packet driver, the minimum timeout should
1120 * be .369 seconds, which we round up to .4.
1121 */
1122
1123 /*
1124 * Now spin-lock waiting for status. This is not a very nice
1125 * thing to do, but I haven't figured out how, or indeed if, we
1126 * can put the process waiting for action to sleep. (We may
1127 * be getting called through some other timeout running in the
1128 * kernel.)
1129 */
1130 for (i = 0; i < 369000; i++) {
1131 delay(1);
1132 if ((SWAP(cc->ie_cmd_status) & mask))
1133 return (0);
1134 }
1135
1136 } else {
1137 /*
1138 * Otherwise, just wait for the command to be accepted.
1139 */
1140
1141 /* XXX spin lock; wait at most 0.1 seconds */
1142 for (i = 0; i < 100000; i++) {
1143 if (scb->ie_command)
1144 return (0);
1145 delay(1);
1146 }
1147 }
1148
1149 /* Timeout */
1150 return (1);
1151 }
1152
1153 /*
1154 * Run the time-domain reflectometer.
1155 */
1156 static void
1157 run_tdr(sc, cmd)
1158 struct ie_softc *sc;
1159 struct ie_tdr_cmd *cmd;
1160 {
1161 int result;
1162
1163 cmd->com.ie_cmd_status = SWAP(0);
1164 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_TDR | IE_CMD_LAST);
1165 cmd->com.ie_cmd_link = SWAP(0xffff);
1166
1167 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1168 cmd->ie_tdr_time = SWAP(0);
1169
1170 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1171 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0)
1172 result = 0x10000; /* XXX */
1173 else
1174 result = SWAP(cmd->ie_tdr_time);
1175
1176 ie_ack(sc, IE_ST_WHENCE);
1177
1178 if (result & IE_TDR_SUCCESS)
1179 return;
1180
1181 if (result & 0x10000)
1182 printf("%s: TDR command failed\n", sc->sc_dev.dv_xname);
1183 else if (result & IE_TDR_XCVR)
1184 printf("%s: transceiver problem\n", sc->sc_dev.dv_xname);
1185 else if (result & IE_TDR_OPEN)
1186 printf("%s: TDR detected an open %d clocks away\n",
1187 sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1188 else if (result & IE_TDR_SHORT)
1189 printf("%s: TDR detected a short %d clocks away\n",
1190 sc->sc_dev.dv_xname, result & IE_TDR_TIME);
1191 else
1192 printf("%s: TDR returned unknown status 0x%x\n",
1193 sc->sc_dev.dv_xname, result);
1194 }
1195
1196 #ifdef notdef
1197 /* ALIGN works on 8 byte boundaries.... but 4 byte boundaries are ok for sun */
1198 #define _ALLOC(p, n) (bzero(p, n), p += n, p - n)
1199 #define ALLOC(p, n) _ALLOC(p, ALIGN(n)) /* XXX convert to this? */
1200 #endif
1201
1202 /*
1203 * setup_bufs: set up the buffers
1204 *
1205 * we have a block of KVA at sc->buf_area which is of size sc->buf_area_sz.
1206 * this is to be used for the buffers. the chip indexs its control data
1207 * structures with 16 bit offsets, and it indexes actual buffers with
1208 * 24 bit addresses. so we should allocate control buffers first so that
1209 * we don't overflow the 16 bit offset field. The number of transmit
1210 * buffers is fixed at compile time.
1211 *
1212 * note: this function was written to be easy to understand, rather than
1213 * highly efficient (it isn't in the critical path).
1214 */
1215 static void
1216 setup_bufs(sc)
1217 struct ie_softc *sc;
1218 {
1219 caddr_t ptr = sc->buf_area; /* memory pool */
1220 int n, r;
1221
1222 /*
1223 * step 0: zero memory and figure out how many recv buffers and
1224 * frames we can have.
1225 */
1226 (sc->memzero)(ptr, sc->buf_area_sz);
1227 ptr = (sc->align)(ptr); /* set alignment and stick with it */
1228
1229 n = (int)(sc->align)((caddr_t) sizeof(struct ie_xmit_cmd)) +
1230 (int)(sc->align)((caddr_t) sizeof(struct ie_xmit_buf)) + IE_TBUF_SIZE;
1231 n *= NTXBUF; /* n = total size of xmit area */
1232
1233 n = sc->buf_area_sz - n;/* n = free space for recv stuff */
1234
1235 r = (int)(sc->align)((caddr_t) sizeof(struct ie_recv_frame_desc)) +
1236 (((int)(sc->align)((caddr_t) sizeof(struct ie_recv_buf_desc)) +
1237 IE_RBUF_SIZE) * B_PER_F);
1238
1239 /* r = size of one R frame */
1240
1241 sc->nframes = n / r;
1242 if (sc->nframes <= 0)
1243 panic("ie: bogus buffer calc\n");
1244 if (sc->nframes > MAXFRAMES)
1245 sc->nframes = MAXFRAMES;
1246
1247 sc->nrxbuf = sc->nframes * B_PER_F;
1248
1249 #ifdef IEDEBUG
1250 printf("IEDEBUG: %d frames %d bufs\n", sc->nframes, sc->nrxbuf);
1251 #endif
1252
1253 /*
1254 * step 1a: lay out and zero frame data structures for transmit and recv
1255 */
1256 for (n = 0; n < NTXBUF; n++) {
1257 sc->xmit_cmds[n] = (volatile struct ie_xmit_cmd *) ptr;
1258 ptr = (sc->align)(ptr + sizeof(struct ie_xmit_cmd));
1259 }
1260
1261 for (n = 0; n < sc->nframes; n++) {
1262 sc->rframes[n] = (volatile struct ie_recv_frame_desc *) ptr;
1263 ptr = (sc->align)(ptr + sizeof(struct ie_recv_frame_desc));
1264 }
1265
1266 /*
1267 * step 1b: link together the recv frames and set EOL on last one
1268 */
1269 for (n = 0; n < sc->nframes; n++) {
1270 sc->rframes[n]->ie_fd_next =
1271 MK_16(sc->sc_maddr, sc->rframes[(n + 1) % sc->nframes]);
1272 }
1273 sc->rframes[sc->nframes - 1]->ie_fd_last |= SWAP(IE_FD_LAST);
1274
1275 /*
1276 * step 2a: lay out and zero frame buffer structures for xmit and recv
1277 */
1278 for (n = 0; n < NTXBUF; n++) {
1279 sc->xmit_buffs[n] = (volatile struct ie_xmit_buf *) ptr;
1280 ptr = (sc->align)(ptr + sizeof(struct ie_xmit_buf));
1281 }
1282
1283 for (n = 0; n < sc->nrxbuf; n++) {
1284 sc->rbuffs[n] = (volatile struct ie_recv_buf_desc *) ptr;
1285 ptr = (sc->align)(ptr + sizeof(struct ie_recv_buf_desc));
1286 }
1287
1288 /*
1289 * step 2b: link together recv bufs and set EOL on last one
1290 */
1291 for (n = 0; n < sc->nrxbuf; n++) {
1292 sc->rbuffs[n]->ie_rbd_next =
1293 MK_16(sc->sc_maddr, sc->rbuffs[(n + 1) % sc->nrxbuf]);
1294 }
1295 sc->rbuffs[sc->nrxbuf - 1]->ie_rbd_length |= SWAP(IE_RBD_LAST);
1296
1297 /*
1298 * step 3: allocate the actual data buffers for xmit and recv
1299 * recv buffer gets linked into recv_buf_desc list here
1300 */
1301 for (n = 0; n < NTXBUF; n++) {
1302 sc->xmit_cbuffs[n] = (u_char *) ptr;
1303 ptr = (sc->align)(ptr + IE_TBUF_SIZE);
1304 }
1305
1306 /* Pointers to last packet sent and next available transmit buffer. */
1307 sc->xchead = sc->xctail = 0;
1308
1309 /* Clear transmit-busy flag and set number of free transmit buffers. */
1310 sc->xmit_busy = 0;
1311
1312 for (n = 0; n < sc->nrxbuf; n++) {
1313 sc->cbuffs[n] = (char *) ptr; /* XXX why char vs uchar? */
1314 sc->rbuffs[n]->ie_rbd_length = SWAP(IE_RBUF_SIZE);
1315 ST_24(sc->rbuffs[n]->ie_rbd_buffer, MK_24(sc->sc_iobase, ptr));
1316 ptr = (sc->align)(ptr + IE_RBUF_SIZE);
1317 }
1318
1319 /*
1320 * step 4: set the head and tail pointers on receive to keep track of
1321 * the order in which RFDs and RBDs are used. link in recv frames
1322 * and buffer into the scb.
1323 */
1324
1325 sc->rfhead = 0;
1326 sc->rftail = sc->nframes - 1;
1327 sc->rbhead = 0;
1328 sc->rbtail = sc->nrxbuf - 1;
1329
1330 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1331 sc->rframes[0]->ie_fd_buf_desc = MK_16(sc->sc_maddr, sc->rbuffs[0]);
1332
1333 #ifdef IEDEBUG
1334 printf("IE_DEBUG: reserved %d bytes\n", ptr - sc->buf_area);
1335 #endif
1336 }
1337
1338 /*
1339 * Run the multicast setup command.
1340 * Called at splnet().
1341 */
1342 static int
1343 mc_setup(sc, ptr)
1344 struct ie_softc *sc;
1345 void *ptr;
1346 {
1347 volatile struct ie_mcast_cmd *cmd = ptr;
1348
1349 cmd->com.ie_cmd_status = SWAP(0);
1350 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_MCAST | IE_CMD_LAST);
1351 cmd->com.ie_cmd_link = SWAP(0xffff);
1352
1353 (sc->memcopy)((caddr_t)sc->mcast_addrs, (caddr_t)cmd->ie_mcast_addrs,
1354 sc->mcast_count * sizeof *sc->mcast_addrs);
1355
1356 cmd->ie_mcast_bytes =
1357 SWAP(sc->mcast_count * ETHER_ADDR_LEN); /* grrr... */
1358
1359 sc->scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1360 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1361 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0) {
1362 printf("%s: multicast address setup command failed\n",
1363 sc->sc_dev.dv_xname);
1364 return 0;
1365 }
1366 return 1;
1367 }
1368
1369 /*
1370 * This routine takes the environment generated by check_ie_present() and adds
1371 * to it all the other structures we need to operate the adapter. This
1372 * includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, starting
1373 * the receiver unit, and clearing interrupts.
1374 *
1375 * THIS ROUTINE MUST BE CALLED AT splnet() OR HIGHER.
1376 */
1377 int
1378 ieinit(sc)
1379 struct ie_softc *sc;
1380 {
1381 volatile struct ie_sys_ctl_block *scb = sc->scb;
1382 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1383 void *ptr;
1384
1385 ptr = sc->buf_area;
1386
1387 /*
1388 * Send the configure command first.
1389 */
1390 {
1391 volatile struct ie_config_cmd *cmd = ptr;
1392
1393 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1394 cmd->com.ie_cmd_status = SWAP(0);
1395 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_CONFIG | IE_CMD_LAST);
1396 cmd->com.ie_cmd_link = SWAP(0xffff);
1397
1398 ie_setup_config(cmd, sc->promisc, 0);
1399
1400 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1401 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0) {
1402 printf("%s: configure command failed\n",
1403 sc->sc_dev.dv_xname);
1404 return 0;
1405 }
1406 }
1407
1408 /*
1409 * Now send the Individual Address Setup command.
1410 */
1411 {
1412 volatile struct ie_iasetup_cmd *cmd = ptr;
1413
1414 scb->ie_command_list = MK_16(sc->sc_maddr, cmd);
1415 cmd->com.ie_cmd_status = SWAP(0);
1416 cmd->com.ie_cmd_cmd = SWAP(IE_CMD_IASETUP | IE_CMD_LAST);
1417 cmd->com.ie_cmd_link = SWAP(0xffff);
1418
1419 (sc->memcopy)(LLADDR(ifp->if_sadl),
1420 (caddr_t)&cmd->ie_address, sizeof cmd->ie_address);
1421
1422 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) ||
1423 (SWAP(cmd->com.ie_cmd_status) & IE_STAT_OK) == 0) {
1424 printf("%s: individual address setup command failed\n",
1425 sc->sc_dev.dv_xname);
1426 return 0;
1427 }
1428 }
1429
1430 /*
1431 * Now run the time-domain reflectometer.
1432 */
1433 run_tdr(sc, ptr);
1434
1435 /*
1436 * Acknowledge any interrupts we have generated thus far.
1437 */
1438 ie_ack(sc, IE_ST_WHENCE);
1439
1440 /*
1441 * Set up the transmit and recv buffers.
1442 */
1443 setup_bufs(sc);
1444
1445 ifp->if_flags |= IFF_RUNNING;
1446 ifp->if_flags &= ~IFF_OACTIVE;
1447
1448 sc->scb->ie_recv_list = MK_16(sc->sc_maddr, sc->rframes[0]);
1449 command_and_wait(sc, IE_RU_START, 0, 0);
1450
1451 ie_ack(sc, IE_ST_WHENCE);
1452
1453 if (sc->hwinit)
1454 (sc->hwinit)(sc);
1455
1456 return 0;
1457 }
1458
1459 static void
1460 iestop(sc)
1461 struct ie_softc *sc;
1462 {
1463
1464 command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1465 }
1466
1467 int
1468 ieioctl(ifp, cmd, data)
1469 register struct ifnet *ifp;
1470 u_long cmd;
1471 caddr_t data;
1472 {
1473 struct ie_softc *sc = ifp->if_softc;
1474 struct ifaddr *ifa = (struct ifaddr *)data;
1475 struct ifreq *ifr = (struct ifreq *)data;
1476 int s, error = 0;
1477
1478 s = splnet();
1479
1480 switch(cmd) {
1481
1482 case SIOCSIFADDR:
1483 ifp->if_flags |= IFF_UP;
1484
1485 switch(ifa->ifa_addr->sa_family) {
1486 #ifdef INET
1487 case AF_INET:
1488 ieinit(sc);
1489 arp_ifinit(ifp, ifa);
1490 break;
1491 #endif
1492 #ifdef NS
1493 /* XXX - This code is probably wrong. */
1494 case AF_NS:
1495 {
1496 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1497
1498 if (ns_nullhost(*ina))
1499 ina->x_host =
1500 *(union ns_host *)LLADDR(ifp->if_sadl);
1501 else
1502 bcopy(ina->x_host.c_host,
1503 LLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
1504 /* Set new address. */
1505 ieinit(sc);
1506 break;
1507 }
1508 #endif /* NS */
1509 default:
1510 ieinit(sc);
1511 break;
1512 }
1513 break;
1514
1515 case SIOCSIFFLAGS:
1516 sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1517 if ((ifp->if_flags & IFF_UP) == 0 &&
1518 (ifp->if_flags & IFF_RUNNING) != 0) {
1519 /*
1520 * If interface is marked down and it is running, then
1521 * stop it.
1522 */
1523 iestop(sc);
1524 ifp->if_flags &= ~IFF_RUNNING;
1525 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1526 (ifp->if_flags & IFF_RUNNING) == 0) {
1527 /*
1528 * If interface is marked up and it is stopped, then
1529 * start it.
1530 */
1531 ieinit(sc);
1532 } else {
1533 /*
1534 * Reset the interface to pick up changes in any other
1535 * flags that affect hardware registers.
1536 */
1537 iestop(sc);
1538 ieinit(sc);
1539 }
1540 #ifdef IEDEBUG
1541 if (ifp->if_flags & IFF_DEBUG)
1542 sc->sc_debug = IED_ALL;
1543 else
1544 sc->sc_debug = 0;
1545 #endif
1546 break;
1547
1548 case SIOCADDMULTI:
1549 case SIOCDELMULTI:
1550 error = (cmd == SIOCADDMULTI) ?
1551 ether_addmulti(ifr, &sc->sc_ethercom):
1552 ether_delmulti(ifr, &sc->sc_ethercom);
1553
1554 if (error == ENETRESET) {
1555 /*
1556 * Multicast list has changed; set the hardware filter
1557 * accordingly.
1558 */
1559 mc_reset(sc);
1560 error = 0;
1561 }
1562 break;
1563
1564 default:
1565 error = EINVAL;
1566 }
1567 splx(s);
1568 return error;
1569 }
1570
1571 static void
1572 mc_reset(sc)
1573 struct ie_softc *sc;
1574 {
1575 struct ether_multi *enm;
1576 struct ether_multistep step;
1577
1578 /*
1579 * Step through the list of addresses.
1580 */
1581 sc->mcast_count = 0;
1582 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
1583 while (enm) {
1584 if (sc->mcast_count >= MAXMCAST ||
1585 bcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0) {
1586 sc->sc_ethercom.ec_if.if_flags |= IFF_ALLMULTI;
1587 ieioctl(&sc->sc_ethercom.ec_if, SIOCSIFFLAGS, (void *)0);
1588 goto setflag;
1589 }
1590
1591 bcopy(enm->enm_addrlo, &sc->mcast_addrs[sc->mcast_count], 6);
1592 sc->mcast_count++;
1593 ETHER_NEXT_MULTI(step, enm);
1594 }
1595 setflag:
1596 sc->want_mcsetup = 1;
1597 }
1598
1599 #ifdef IEDEBUG
1600 void
1601 print_rbd(rbd)
1602 volatile struct ie_recv_buf_desc *rbd;
1603 {
1604 u_long bufval;
1605
1606 bcopy((char *)&rbd->ie_rbd_buffer, &bufval, 4); /*XXX*/
1607
1608 printf("RBD at %08lx:\nactual %04x, next %04x, buffer %lx\n"
1609 "length %04x, mbz %04x\n", (u_long)rbd,
1610 SWAP(rbd->ie_rbd_actual),
1611 SWAP(rbd->ie_rbd_next),
1612 bufval,
1613 SWAP(rbd->ie_rbd_length),
1614 rbd->mbz);
1615 }
1616 #endif
1617