hme.c revision 1.1.4.3 1 /* $NetBSD: hme.c,v 1.1.4.3 2001/01/05 17:35:37 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * HME Ethernet module driver.
41 */
42
43 #define HMEDEBUG
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48 #include "rnd.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/mbuf.h>
54 #include <sys/syslog.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/malloc.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #if NRND > 0
61 #include <sys/rnd.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_media.h>
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #endif
76
77 #ifdef NS
78 #include <netns/ns.h>
79 #include <netns/ns_if.h>
80 #endif
81
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #include <net/bpfdesc.h>
85 #endif
86
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 #include <machine/bus.h>
91
92 #include <dev/ic/hmereg.h>
93 #include <dev/ic/hmevar.h>
94
95 void hme_start __P((struct ifnet *));
96 void hme_stop __P((struct hme_softc *));
97 int hme_ioctl __P((struct ifnet *, u_long, caddr_t));
98 void hme_tick __P((void *));
99 void hme_watchdog __P((struct ifnet *));
100 void hme_shutdown __P((void *));
101 void hme_init __P((struct hme_softc *));
102 void hme_meminit __P((struct hme_softc *));
103 void hme_mifinit __P((struct hme_softc *));
104 void hme_reset __P((struct hme_softc *));
105 void hme_setladrf __P((struct hme_softc *));
106
107 /* MII methods & callbacks */
108 static int hme_mii_readreg __P((struct device *, int, int));
109 static void hme_mii_writereg __P((struct device *, int, int, int));
110 static void hme_mii_statchg __P((struct device *));
111
112 int hme_mediachange __P((struct ifnet *));
113 void hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
114
115 struct mbuf *hme_get __P((struct hme_softc *, int, int));
116 int hme_put __P((struct hme_softc *, int, struct mbuf *));
117 void hme_read __P((struct hme_softc *, int, int));
118 int hme_eint __P((struct hme_softc *, u_int));
119 int hme_rint __P((struct hme_softc *));
120 int hme_tint __P((struct hme_softc *));
121
122 static int ether_cmp __P((u_char *, u_char *));
123
124 /* Default buffer copy routines */
125 void hme_copytobuf_contig __P((struct hme_softc *, void *, int, int));
126 void hme_copyfrombuf_contig __P((struct hme_softc *, void *, int, int));
127 void hme_zerobuf_contig __P((struct hme_softc *, int, int));
128
129
130 void
131 hme_config(sc)
132 struct hme_softc *sc;
133 {
134 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
135 struct mii_data *mii = &sc->sc_mii;
136 struct mii_softc *child;
137 bus_dma_tag_t dmatag = sc->sc_dmatag;
138 bus_dma_segment_t seg;
139 bus_size_t size;
140 int rseg, error;
141
142 /*
143 * HME common initialization.
144 *
145 * hme_softc fields that must be initialized by the front-end:
146 *
147 * the bus tag:
148 * sc_bustag
149 *
150 * the dma bus tag:
151 * sc_dmatag
152 *
153 * the bus handles:
154 * sc_seb (Shared Ethernet Block registers)
155 * sc_erx (Receiver Unit registers)
156 * sc_etx (Transmitter Unit registers)
157 * sc_mac (MAC registers)
158 * sc_mif (Managment Interface registers)
159 *
160 * the maximum bus burst size:
161 * sc_burst
162 *
163 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
164 * rb_membase, rb_dmabase)
165 *
166 * the local Ethernet address:
167 * sc_enaddr
168 *
169 */
170
171 /* Make sure the chip is stopped. */
172 hme_stop(sc);
173
174
175 /*
176 * Allocate descriptors and buffers
177 * XXX - do all this differently.. and more configurably,
178 * eg. use things as `dma_load_mbuf()' on transmit,
179 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
180 * all the time) on the reveiver side.
181 *
182 * Note: receive buffers must be 64-byte aligned.
183 * Also, apparently, the buffers must extend to a DMA burst
184 * boundary beyond the maximum packet size.
185 */
186 #define _HME_NDESC 32
187 #define _HME_BUFSZ 1600
188
189 /* Note: the # of descriptors must be a multiple of 16 */
190 sc->sc_rb.rb_ntbuf = _HME_NDESC;
191 sc->sc_rb.rb_nrbuf = _HME_NDESC;
192
193 /*
194 * Allocate DMA capable memory
195 * Buffer descriptors must be aligned on a 2048 byte boundary;
196 * take this into account when calculating the size. Note that
197 * the maximum number of descriptors (256) occupies 2048 bytes,
198 * so we allocate that much regardless of _HME_NDESC.
199 */
200 size = 2048 + /* TX descriptors */
201 2048 + /* RX descriptors */
202 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */
203 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* TX buffers */
204
205 /* Allocate DMA buffer */
206 if ((error = bus_dmamem_alloc(dmatag, size,
207 2048, 0,
208 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
209 printf("%s: DMA buffer alloc error %d\n",
210 sc->sc_dev.dv_xname, error);
211 return;
212 }
213
214 /* Map DMA memory in CPU addressable space */
215 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
216 &sc->sc_rb.rb_membase,
217 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
218 printf("%s: DMA buffer map error %d\n",
219 sc->sc_dev.dv_xname, error);
220 bus_dmamap_unload(dmatag, sc->sc_dmamap);
221 bus_dmamem_free(dmatag, &seg, rseg);
222 return;
223 }
224
225 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
226 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
227 printf("%s: DMA map create error %d\n",
228 sc->sc_dev.dv_xname, error);
229 return;
230 }
231
232 /* Load the buffer */
233 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
234 sc->sc_rb.rb_membase, size, NULL,
235 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
236 printf("%s: DMA buffer map load error %d\n",
237 sc->sc_dev.dv_xname, error);
238 bus_dmamem_free(dmatag, &seg, rseg);
239 return;
240 }
241 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
242
243 printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
244
245 /* Initialize ifnet structure. */
246 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
247 ifp->if_softc = sc;
248 ifp->if_start = hme_start;
249 ifp->if_ioctl = hme_ioctl;
250 ifp->if_watchdog = hme_watchdog;
251 ifp->if_flags =
252 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
253 IFQ_SET_READY(&ifp->if_snd);
254
255 /* Initialize ifmedia structures and MII info */
256 mii->mii_ifp = ifp;
257 mii->mii_readreg = hme_mii_readreg;
258 mii->mii_writereg = hme_mii_writereg;
259 mii->mii_statchg = hme_mii_statchg;
260
261 ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
262
263 hme_mifinit(sc);
264
265 mii_attach(&sc->sc_dev, mii, 0xffffffff,
266 MII_PHY_ANY, MII_OFFSET_ANY, 0);
267
268 child = LIST_FIRST(&mii->mii_phys);
269 if (child == NULL) {
270 /* No PHY attached */
271 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
272 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
273 } else {
274 /*
275 * Walk along the list of attached MII devices and
276 * establish an `MII instance' to `phy number'
277 * mapping. We'll use this mapping in media change
278 * requests to determine which phy to use to program
279 * the MIF configuration register.
280 */
281 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
282 /*
283 * Note: we support just two PHYs: the built-in
284 * internal device and an external on the MII
285 * connector.
286 */
287 if (child->mii_phy > 1 || child->mii_inst > 1) {
288 printf("%s: cannot accomodate MII device %s"
289 " at phy %d, instance %d\n",
290 sc->sc_dev.dv_xname,
291 child->mii_dev.dv_xname,
292 child->mii_phy, child->mii_inst);
293 continue;
294 }
295
296 sc->sc_phys[child->mii_inst] = child->mii_phy;
297 }
298
299 /*
300 * XXX - we can really do the following ONLY if the
301 * phy indeed has the auto negotiation capability!!
302 */
303 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
304 }
305
306 /* claim 802.1q capability */
307 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
308
309 /* Attach the interface. */
310 if_attach(ifp);
311 ether_ifattach(ifp, sc->sc_enaddr);
312
313 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
314 if (sc->sc_sh == NULL)
315 panic("hme_config: can't establish shutdownhook");
316
317 #if 0
318 printf("%s: %d receive buffers, %d transmit buffers\n",
319 sc->sc_dev.dv_xname, sc->sc_nrbuf, sc->sc_ntbuf);
320 sc->sc_rbufaddr = malloc(sc->sc_nrbuf * sizeof(int), M_DEVBUF,
321 M_WAITOK);
322 sc->sc_tbufaddr = malloc(sc->sc_ntbuf * sizeof(int), M_DEVBUF,
323 M_WAITOK);
324 #endif
325
326 #if NRND > 0
327 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
328 RND_TYPE_NET, 0);
329 #endif
330
331 callout_init(&sc->sc_tick_ch);
332 }
333
334 void
335 hme_tick(arg)
336 void *arg;
337 {
338 struct hme_softc *sc = arg;
339 int s;
340
341 s = splnet();
342 mii_tick(&sc->sc_mii);
343 splx(s);
344
345 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
346 }
347
348 void
349 hme_reset(sc)
350 struct hme_softc *sc;
351 {
352 int s;
353
354 s = splnet();
355 hme_init(sc);
356 splx(s);
357 }
358
359 void
360 hme_stop(sc)
361 struct hme_softc *sc;
362 {
363 bus_space_tag_t t = sc->sc_bustag;
364 bus_space_handle_t seb = sc->sc_seb;
365 int n;
366
367 callout_stop(&sc->sc_tick_ch);
368 mii_down(&sc->sc_mii);
369
370 /* Reset transmitter and receiver */
371 bus_space_write_4(t, seb, HME_SEBI_RESET,
372 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
373
374 for (n = 0; n < 20; n++) {
375 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
376 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
377 return;
378 DELAY(20);
379 }
380
381 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
382 }
383
384 void
385 hme_meminit(sc)
386 struct hme_softc *sc;
387 {
388 bus_addr_t txbufdma, rxbufdma;
389 bus_addr_t dma;
390 caddr_t p;
391 unsigned int ntbuf, nrbuf, i;
392 struct hme_ring *hr = &sc->sc_rb;
393
394 p = hr->rb_membase;
395 dma = hr->rb_dmabase;
396
397 ntbuf = hr->rb_ntbuf;
398 nrbuf = hr->rb_nrbuf;
399
400 /*
401 * Allocate transmit descriptors
402 */
403 hr->rb_txd = p;
404 hr->rb_txddma = dma;
405 p += ntbuf * HME_XD_SIZE;
406 dma += ntbuf * HME_XD_SIZE;
407 /* We have reserved descriptor space until the next 2048 byte boundary.*/
408 dma = (bus_addr_t)roundup((u_long)dma, 2048);
409 p = (caddr_t)roundup((u_long)p, 2048);
410
411 /*
412 * Allocate receive descriptors
413 */
414 hr->rb_rxd = p;
415 hr->rb_rxddma = dma;
416 p += nrbuf * HME_XD_SIZE;
417 dma += nrbuf * HME_XD_SIZE;
418 /* Again move forward to the next 2048 byte boundary.*/
419 dma = (bus_addr_t)roundup((u_long)dma, 2048);
420 p = (caddr_t)roundup((u_long)p, 2048);
421
422
423 /*
424 * Allocate transmit buffers
425 */
426 hr->rb_txbuf = p;
427 txbufdma = dma;
428 p += ntbuf * _HME_BUFSZ;
429 dma += ntbuf * _HME_BUFSZ;
430
431 /*
432 * Allocate receive buffers
433 */
434 hr->rb_rxbuf = p;
435 rxbufdma = dma;
436 p += nrbuf * _HME_BUFSZ;
437 dma += nrbuf * _HME_BUFSZ;
438
439 /*
440 * Initialize transmit buffer descriptors
441 */
442 for (i = 0; i < ntbuf; i++) {
443 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
444 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
445 }
446
447 /*
448 * Initialize receive buffer descriptors
449 */
450 for (i = 0; i < nrbuf; i++) {
451 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
452 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
453 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
454 }
455
456 hr->rb_tdhead = hr->rb_tdtail = 0;
457 hr->rb_td_nbusy = 0;
458 hr->rb_rdtail = 0;
459 }
460
461 /*
462 * Initialization of interface; set up initialization block
463 * and transmit/receive descriptor rings.
464 */
465 void
466 hme_init(sc)
467 struct hme_softc *sc;
468 {
469 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
470 bus_space_tag_t t = sc->sc_bustag;
471 bus_space_handle_t seb = sc->sc_seb;
472 bus_space_handle_t etx = sc->sc_etx;
473 bus_space_handle_t erx = sc->sc_erx;
474 bus_space_handle_t mac = sc->sc_mac;
475 bus_space_handle_t mif = sc->sc_mif;
476 u_int8_t *ea;
477 u_int32_t v;
478
479 /*
480 * Initialization sequence. The numbered steps below correspond
481 * to the sequence outlined in section 6.3.5.1 in the Ethernet
482 * Channel Engine manual (part of the PCIO manual).
483 * See also the STP2002-STQ document from Sun Microsystems.
484 */
485
486 /* step 1 & 2. Reset the Ethernet Channel */
487 hme_stop(sc);
488
489 /* Re-initialize the MIF */
490 hme_mifinit(sc);
491
492 /* Call MI reset function if any */
493 if (sc->sc_hwreset)
494 (*sc->sc_hwreset)(sc);
495
496 #if 0
497 /* Mask all MIF interrupts, just in case */
498 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
499 #endif
500
501 /* step 3. Setup data structures in host memory */
502 hme_meminit(sc);
503
504 /* step 4. TX MAC registers & counters */
505 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
506 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
507 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
508 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
509 bus_space_write_4(t, mac, HME_MACI_TXSIZE,
510 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
511 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN :
512 ETHER_MAX_LEN);
513
514 /* Load station MAC address */
515 ea = sc->sc_enaddr;
516 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
517 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
518 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
519
520 /*
521 * Init seed for backoff
522 * (source suggested by manual: low 10 bits of MAC address)
523 */
524 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
525 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
526
527
528 /* Note: Accepting power-on default for other MAC registers here.. */
529
530
531 /* step 5. RX MAC registers & counters */
532 hme_setladrf(sc);
533
534 /* step 6 & 7. Program Descriptor Ring Base Addresses */
535 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
536 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
537
538 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
539 bus_space_write_4(t, mac, HME_MACI_RXSIZE,
540 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
541 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN :
542 ETHER_MAX_LEN);
543
544
545 /* step 8. Global Configuration & Interrupt Mask */
546 bus_space_write_4(t, seb, HME_SEBI_IMASK,
547 ~(
548 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
549 HME_SEB_STAT_HOSTTOTX |
550 HME_SEB_STAT_RXTOHOST |
551 HME_SEB_STAT_TXALL |
552 HME_SEB_STAT_TXPERR |
553 HME_SEB_STAT_RCNTEXP |
554 HME_SEB_STAT_ALL_ERRORS ));
555
556 switch (sc->sc_burst) {
557 default:
558 v = 0;
559 break;
560 case 16:
561 v = HME_SEB_CFG_BURST16;
562 break;
563 case 32:
564 v = HME_SEB_CFG_BURST32;
565 break;
566 case 64:
567 v = HME_SEB_CFG_BURST64;
568 break;
569 }
570 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
571
572 /* step 9. ETX Configuration: use mostly default values */
573
574 /* Enable DMA */
575 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
576 v |= HME_ETX_CFG_DMAENABLE;
577 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
578
579 /* Transmit Descriptor ring size: in increments of 16 */
580 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1);
581
582
583 /* step 10. ERX Configuration */
584 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
585
586 /* Encode Receive Descriptor ring size: four possible values */
587 switch (_HME_NDESC /*XXX*/) {
588 case 32:
589 v |= HME_ERX_CFG_RINGSIZE32;
590 break;
591 case 64:
592 v |= HME_ERX_CFG_RINGSIZE64;
593 break;
594 case 128:
595 v |= HME_ERX_CFG_RINGSIZE128;
596 break;
597 case 256:
598 v |= HME_ERX_CFG_RINGSIZE256;
599 break;
600 default:
601 printf("hme: invalid Receive Descriptor ring size\n");
602 break;
603 }
604
605 /* Enable DMA */
606 v |= HME_ERX_CFG_DMAENABLE;
607 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
608
609 /* step 11. XIF Configuration */
610 v = bus_space_read_4(t, mac, HME_MACI_XIF);
611 v |= HME_MAC_XIF_OE;
612 /* If an external transceiver is connected, enable its MII drivers */
613 if ((bus_space_read_4(t, mif, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
614 v |= HME_MAC_XIF_MIIENABLE;
615 bus_space_write_4(t, mac, HME_MACI_XIF, v);
616
617
618 /* step 12. RX_MAC Configuration Register */
619 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
620 v |= HME_MAC_RXCFG_ENABLE;
621 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
622
623 /* step 13. TX_MAC Configuration Register */
624 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
625 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
626 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
627
628 /* step 14. Issue Transmit Pending command */
629
630 /* Call MI initialization function if any */
631 if (sc->sc_hwinit)
632 (*sc->sc_hwinit)(sc);
633
634 /* Start the one second timer. */
635 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
636
637 ifp->if_flags |= IFF_RUNNING;
638 ifp->if_flags &= ~IFF_OACTIVE;
639 ifp->if_timer = 0;
640 hme_start(ifp);
641 }
642
643 /*
644 * Compare two Ether/802 addresses for equality, inlined and unrolled for
645 * speed.
646 */
647 static __inline__ int
648 ether_cmp(a, b)
649 u_char *a, *b;
650 {
651
652 if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
653 a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
654 return (0);
655 return (1);
656 }
657
658
659 /*
660 * Routine to copy from mbuf chain to transmit buffer in
661 * network buffer memory.
662 * Returns the amount of data copied.
663 */
664 int
665 hme_put(sc, ri, m)
666 struct hme_softc *sc;
667 int ri; /* Ring index */
668 struct mbuf *m;
669 {
670 struct mbuf *n;
671 int len, tlen = 0;
672 caddr_t bp;
673
674 bp = sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
675 for (; m; m = n) {
676 len = m->m_len;
677 if (len == 0) {
678 MFREE(m, n);
679 continue;
680 }
681 bcopy(mtod(m, caddr_t), bp, len);
682 bp += len;
683 tlen += len;
684 MFREE(m, n);
685 }
686 return (tlen);
687 }
688
689 /*
690 * Pull data off an interface.
691 * Len is length of data, with local net header stripped.
692 * We copy the data into mbufs. When full cluster sized units are present
693 * we copy into clusters.
694 */
695 struct mbuf *
696 hme_get(sc, ri, totlen)
697 struct hme_softc *sc;
698 int ri, totlen;
699 {
700 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
701 struct mbuf *m, *m0, *newm;
702 caddr_t bp;
703 int len;
704
705 MGETHDR(m0, M_DONTWAIT, MT_DATA);
706 if (m0 == 0)
707 return (0);
708 m0->m_pkthdr.rcvif = ifp;
709 m0->m_pkthdr.len = totlen;
710 len = MHLEN;
711 m = m0;
712
713 bp = sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
714
715 while (totlen > 0) {
716 if (totlen >= MINCLSIZE) {
717 MCLGET(m, M_DONTWAIT);
718 if ((m->m_flags & M_EXT) == 0)
719 goto bad;
720 len = MCLBYTES;
721 }
722
723 if (m == m0) {
724 caddr_t newdata = (caddr_t)
725 ALIGN(m->m_data + sizeof(struct ether_header)) -
726 sizeof(struct ether_header);
727 len -= newdata - m->m_data;
728 m->m_data = newdata;
729 }
730
731 m->m_len = len = min(totlen, len);
732 bcopy(bp, mtod(m, caddr_t), len);
733 bp += len;
734
735 totlen -= len;
736 if (totlen > 0) {
737 MGET(newm, M_DONTWAIT, MT_DATA);
738 if (newm == 0)
739 goto bad;
740 len = MLEN;
741 m = m->m_next = newm;
742 }
743 }
744
745 return (m0);
746
747 bad:
748 m_freem(m0);
749 return (0);
750 }
751
752 /*
753 * Pass a packet to the higher levels.
754 */
755 void
756 hme_read(sc, ix, len)
757 struct hme_softc *sc;
758 int ix, len;
759 {
760 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
761 struct mbuf *m;
762
763 if (len <= sizeof(struct ether_header) ||
764 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
765 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) :
766 ETHERMTU + sizeof(struct ether_header))) {
767 #ifdef HMEDEBUG
768 printf("%s: invalid packet size %d; dropping\n",
769 sc->sc_dev.dv_xname, len);
770 #endif
771 ifp->if_ierrors++;
772 return;
773 }
774
775 /* Pull packet off interface. */
776 m = hme_get(sc, ix, len);
777 if (m == 0) {
778 ifp->if_ierrors++;
779 return;
780 }
781
782 ifp->if_ipackets++;
783
784 #if NBPFILTER > 0
785 /*
786 * Check if there's a BPF listener on this interface.
787 * If so, hand off the raw packet to BPF.
788 */
789 if (ifp->if_bpf)
790 bpf_mtap(ifp->if_bpf, m);
791 #endif
792
793 /* Pass the packet up. */
794 (*ifp->if_input)(ifp, m);
795 }
796
797 void
798 hme_start(ifp)
799 struct ifnet *ifp;
800 {
801 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
802 caddr_t txd = sc->sc_rb.rb_txd;
803 struct mbuf *m;
804 unsigned int ri, len;
805 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
806
807 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
808 return;
809
810 ri = sc->sc_rb.rb_tdhead;
811
812 for (;;) {
813 IFQ_DEQUEUE(&ifp->if_snd, m);
814 if (m == 0)
815 break;
816
817 #if NBPFILTER > 0
818 /*
819 * If BPF is listening on this interface, let it see the
820 * packet before we commit it to the wire.
821 */
822 if (ifp->if_bpf)
823 bpf_mtap(ifp->if_bpf, m);
824 #endif
825
826 /*
827 * Copy the mbuf chain into the transmit buffer.
828 */
829 len = hme_put(sc, ri, m);
830
831 /*
832 * Initialize transmit registers and start transmission
833 */
834 HME_XD_SETFLAGS(sc->sc_pci, txd, ri,
835 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
836 HME_XD_ENCODE_TSIZE(len));
837
838 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/
839 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
840 HME_ETX_TP_DMAWAKEUP);
841
842 if (++ri == ntbuf)
843 ri = 0;
844
845 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
846 ifp->if_flags |= IFF_OACTIVE;
847 break;
848 }
849 }
850
851 sc->sc_rb.rb_tdhead = ri;
852 }
853
854 /*
855 * Transmit interrupt.
856 */
857 int
858 hme_tint(sc)
859 struct hme_softc *sc;
860 {
861 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
862 bus_space_tag_t t = sc->sc_bustag;
863 bus_space_handle_t mac = sc->sc_mac;
864 unsigned int ri, txflags;
865
866 /*
867 * Unload collision counters
868 */
869 ifp->if_collisions +=
870 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
871 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
872 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
873 bus_space_read_4(t, mac, HME_MACI_LTCNT);
874
875 /*
876 * then clear the hardware counters.
877 */
878 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
879 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
880 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
881 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
882
883 /* Fetch current position in the transmit ring */
884 ri = sc->sc_rb.rb_tdtail;
885
886 for (;;) {
887 if (sc->sc_rb.rb_td_nbusy <= 0)
888 break;
889
890 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
891
892 if (txflags & HME_XD_OWN)
893 break;
894
895 ifp->if_flags &= ~IFF_OACTIVE;
896 ifp->if_opackets++;
897
898 if (++ri == sc->sc_rb.rb_ntbuf)
899 ri = 0;
900
901 --sc->sc_rb.rb_td_nbusy;
902 }
903
904 /* Update ring */
905 sc->sc_rb.rb_tdtail = ri;
906
907 hme_start(ifp);
908
909 if (sc->sc_rb.rb_td_nbusy == 0)
910 ifp->if_timer = 0;
911
912 return (1);
913 }
914
915 /*
916 * Receive interrupt.
917 */
918 int
919 hme_rint(sc)
920 struct hme_softc *sc;
921 {
922 caddr_t xdr = sc->sc_rb.rb_rxd;
923 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
924 unsigned int ri, len;
925 u_int32_t flags;
926
927 ri = sc->sc_rb.rb_rdtail;
928
929 /*
930 * Process all buffers with valid data.
931 */
932 for (;;) {
933 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
934 if (flags & HME_XD_OWN)
935 break;
936
937 if (flags & HME_XD_OFL) {
938 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
939 sc->sc_dev.dv_xname, ri, flags);
940 } else {
941 len = HME_XD_DECODE_RSIZE(flags);
942 hme_read(sc, ri, len);
943 }
944
945 /* This buffer can be used by the hardware again */
946 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri,
947 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
948
949 if (++ri == nrbuf)
950 ri = 0;
951 }
952
953 sc->sc_rb.rb_rdtail = ri;
954
955 return (1);
956 }
957
958 int
959 hme_eint(sc, status)
960 struct hme_softc *sc;
961 u_int status;
962 {
963 char bits[128];
964
965 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
966 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
967 return (1);
968 }
969
970 printf("%s: status=%s\n", sc->sc_dev.dv_xname,
971 bitmask_snprintf(status, HME_SEB_STAT_BITS, bits,sizeof(bits)));
972 return (1);
973 }
974
975 int
976 hme_intr(v)
977 void *v;
978 {
979 struct hme_softc *sc = (struct hme_softc *)v;
980 bus_space_tag_t t = sc->sc_bustag;
981 bus_space_handle_t seb = sc->sc_seb;
982 u_int32_t status;
983 int r = 0;
984
985 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
986
987 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
988 r |= hme_eint(sc, status);
989
990 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
991 r |= hme_tint(sc);
992
993 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
994 r |= hme_rint(sc);
995
996 return (r);
997 }
998
999
1000 void
1001 hme_watchdog(ifp)
1002 struct ifnet *ifp;
1003 {
1004 struct hme_softc *sc = ifp->if_softc;
1005
1006 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1007 ++ifp->if_oerrors;
1008
1009 hme_reset(sc);
1010 }
1011
1012 /*
1013 * Initialize the MII Management Interface
1014 */
1015 void
1016 hme_mifinit(sc)
1017 struct hme_softc *sc;
1018 {
1019 bus_space_tag_t t = sc->sc_bustag;
1020 bus_space_handle_t mif = sc->sc_mif;
1021 u_int32_t v;
1022
1023 /* Configure the MIF in frame mode */
1024 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1025 v &= ~HME_MIF_CFG_BBMODE;
1026 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1027 }
1028
1029 /*
1030 * MII interface
1031 */
1032 static int
1033 hme_mii_readreg(self, phy, reg)
1034 struct device *self;
1035 int phy, reg;
1036 {
1037 struct hme_softc *sc = (void *)self;
1038 bus_space_tag_t t = sc->sc_bustag;
1039 bus_space_handle_t mif = sc->sc_mif;
1040 int n;
1041 u_int32_t v;
1042
1043 /* Select the desired PHY in the MIF configuration register */
1044 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1045 /* Clear PHY select bit */
1046 v &= ~HME_MIF_CFG_PHY;
1047 if (phy == HME_PHYAD_EXTERNAL)
1048 /* Set PHY select bit to get at external device */
1049 v |= HME_MIF_CFG_PHY;
1050 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1051
1052 /* Construct the frame command */
1053 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1054 HME_MIF_FO_TAMSB |
1055 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1056 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1057 (reg << HME_MIF_FO_REGAD_SHIFT);
1058
1059 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1060 for (n = 0; n < 100; n++) {
1061 DELAY(1);
1062 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1063 if (v & HME_MIF_FO_TALSB)
1064 return (v & HME_MIF_FO_DATA);
1065 }
1066
1067 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1068 return (0);
1069 }
1070
1071 static void
1072 hme_mii_writereg(self, phy, reg, val)
1073 struct device *self;
1074 int phy, reg, val;
1075 {
1076 struct hme_softc *sc = (void *)self;
1077 bus_space_tag_t t = sc->sc_bustag;
1078 bus_space_handle_t mif = sc->sc_mif;
1079 int n;
1080 u_int32_t v;
1081
1082 /* Select the desired PHY in the MIF configuration register */
1083 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1084 /* Clear PHY select bit */
1085 v &= ~HME_MIF_CFG_PHY;
1086 if (phy == HME_PHYAD_EXTERNAL)
1087 /* Set PHY select bit to get at external device */
1088 v |= HME_MIF_CFG_PHY;
1089 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1090
1091 /* Construct the frame command */
1092 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1093 HME_MIF_FO_TAMSB |
1094 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1095 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1096 (reg << HME_MIF_FO_REGAD_SHIFT) |
1097 (val & HME_MIF_FO_DATA);
1098
1099 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1100 for (n = 0; n < 100; n++) {
1101 DELAY(1);
1102 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1103 if (v & HME_MIF_FO_TALSB)
1104 return;
1105 }
1106
1107 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1108 }
1109
1110 static void
1111 hme_mii_statchg(dev)
1112 struct device *dev;
1113 {
1114 struct hme_softc *sc = (void *)dev;
1115 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1116 int phy = sc->sc_phys[instance];
1117 bus_space_tag_t t = sc->sc_bustag;
1118 bus_space_handle_t mif = sc->sc_mif;
1119 bus_space_handle_t mac = sc->sc_mac;
1120 u_int32_t v;
1121
1122 #ifdef HMEDEBUG
1123 if (sc->sc_debug)
1124 printf("hme_mii_statchg: status change: phy = %d\n", phy);
1125 #endif
1126
1127 /* Select the current PHY in the MIF configuration register */
1128 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1129 v &= ~HME_MIF_CFG_PHY;
1130 if (phy == HME_PHYAD_EXTERNAL)
1131 v |= HME_MIF_CFG_PHY;
1132 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1133
1134 /* Set the MAC Full Duplex bit appropriately */
1135 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1136 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1137 v |= HME_MAC_TXCFG_FULLDPLX;
1138 else
1139 v &= ~HME_MAC_TXCFG_FULLDPLX;
1140 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1141
1142 /* If an external transceiver is selected, enable its MII drivers */
1143 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1144 v &= ~HME_MAC_XIF_MIIENABLE;
1145 if (phy == HME_PHYAD_EXTERNAL)
1146 v |= HME_MAC_XIF_MIIENABLE;
1147 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1148 }
1149
1150 int
1151 hme_mediachange(ifp)
1152 struct ifnet *ifp;
1153 {
1154 struct hme_softc *sc = ifp->if_softc;
1155
1156 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1157 return (EINVAL);
1158
1159 return (mii_mediachg(&sc->sc_mii));
1160 }
1161
1162 void
1163 hme_mediastatus(ifp, ifmr)
1164 struct ifnet *ifp;
1165 struct ifmediareq *ifmr;
1166 {
1167 struct hme_softc *sc = ifp->if_softc;
1168
1169 if ((ifp->if_flags & IFF_UP) == 0)
1170 return;
1171
1172 mii_pollstat(&sc->sc_mii);
1173 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1174 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1175 }
1176
1177 /*
1178 * Process an ioctl request.
1179 */
1180 int
1181 hme_ioctl(ifp, cmd, data)
1182 struct ifnet *ifp;
1183 u_long cmd;
1184 caddr_t data;
1185 {
1186 struct hme_softc *sc = ifp->if_softc;
1187 struct ifaddr *ifa = (struct ifaddr *)data;
1188 struct ifreq *ifr = (struct ifreq *)data;
1189 int s, error = 0;
1190
1191 s = splnet();
1192
1193 switch (cmd) {
1194
1195 case SIOCSIFADDR:
1196 ifp->if_flags |= IFF_UP;
1197
1198 switch (ifa->ifa_addr->sa_family) {
1199 #ifdef INET
1200 case AF_INET:
1201 hme_init(sc);
1202 arp_ifinit(ifp, ifa);
1203 break;
1204 #endif
1205 #ifdef NS
1206 case AF_NS:
1207 {
1208 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1209
1210 if (ns_nullhost(*ina))
1211 ina->x_host =
1212 *(union ns_host *)LLADDR(ifp->if_sadl);
1213 else {
1214 bcopy(ina->x_host.c_host,
1215 LLADDR(ifp->if_sadl),
1216 sizeof(sc->sc_enaddr));
1217 }
1218 /* Set new address. */
1219 hme_init(sc);
1220 break;
1221 }
1222 #endif
1223 default:
1224 hme_init(sc);
1225 break;
1226 }
1227 break;
1228
1229 case SIOCSIFFLAGS:
1230 if ((ifp->if_flags & IFF_UP) == 0 &&
1231 (ifp->if_flags & IFF_RUNNING) != 0) {
1232 /*
1233 * If interface is marked down and it is running, then
1234 * stop it.
1235 */
1236 hme_stop(sc);
1237 ifp->if_flags &= ~IFF_RUNNING;
1238 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1239 (ifp->if_flags & IFF_RUNNING) == 0) {
1240 /*
1241 * If interface is marked up and it is stopped, then
1242 * start it.
1243 */
1244 hme_init(sc);
1245 } else if ((ifp->if_flags & IFF_UP) != 0) {
1246 /*
1247 * Reset the interface to pick up changes in any other
1248 * flags that affect hardware registers.
1249 */
1250 /*hme_stop(sc);*/
1251 hme_init(sc);
1252 }
1253 #ifdef HMEDEBUG
1254 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1255 #endif
1256 break;
1257
1258 case SIOCADDMULTI:
1259 case SIOCDELMULTI:
1260 error = (cmd == SIOCADDMULTI) ?
1261 ether_addmulti(ifr, &sc->sc_ethercom) :
1262 ether_delmulti(ifr, &sc->sc_ethercom);
1263
1264 if (error == ENETRESET) {
1265 /*
1266 * Multicast list has changed; set the hardware filter
1267 * accordingly.
1268 */
1269 hme_setladrf(sc);
1270 error = 0;
1271 }
1272 break;
1273
1274 case SIOCGIFMEDIA:
1275 case SIOCSIFMEDIA:
1276 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1277 break;
1278
1279 default:
1280 error = EINVAL;
1281 break;
1282 }
1283
1284 splx(s);
1285 return (error);
1286 }
1287
1288 void
1289 hme_shutdown(arg)
1290 void *arg;
1291 {
1292
1293 hme_stop((struct hme_softc *)arg);
1294 }
1295
1296 /*
1297 * Set up the logical address filter.
1298 */
1299 void
1300 hme_setladrf(sc)
1301 struct hme_softc *sc;
1302 {
1303 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1304 struct ether_multi *enm;
1305 struct ether_multistep step;
1306 struct ethercom *ec = &sc->sc_ethercom;
1307 bus_space_tag_t t = sc->sc_bustag;
1308 bus_space_handle_t mac = sc->sc_mac;
1309 u_char *cp;
1310 u_int32_t crc;
1311 u_int32_t hash[4];
1312 u_int32_t v;
1313 int len;
1314
1315 /* Clear hash table */
1316 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1317
1318 /* Get current RX configuration */
1319 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1320
1321 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1322 /* Turn on promiscuous mode; turn off the hash filter */
1323 v |= HME_MAC_RXCFG_PMISC;
1324 v &= ~HME_MAC_RXCFG_HENABLE;
1325 ifp->if_flags |= IFF_ALLMULTI;
1326 goto chipit;
1327 }
1328
1329 /* Turn off promiscuous mode; turn on the hash filter */
1330 v &= ~HME_MAC_RXCFG_PMISC;
1331 v |= HME_MAC_RXCFG_HENABLE;
1332
1333 /*
1334 * Set up multicast address filter by passing all multicast addresses
1335 * through a crc generator, and then using the high order 6 bits as an
1336 * index into the 64 bit logical address filter. The high order bit
1337 * selects the word, while the rest of the bits select the bit within
1338 * the word.
1339 */
1340
1341 ETHER_FIRST_MULTI(step, ec, enm);
1342 while (enm != NULL) {
1343 if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
1344 /*
1345 * We must listen to a range of multicast addresses.
1346 * For now, just accept all multicasts, rather than
1347 * trying to set only those filter bits needed to match
1348 * the range. (At this time, the only use of address
1349 * ranges is for IP multicast routing, for which the
1350 * range is big enough to require all bits set.)
1351 */
1352 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1353 ifp->if_flags |= IFF_ALLMULTI;
1354 goto chipit;
1355 }
1356
1357 cp = enm->enm_addrlo;
1358 crc = 0xffffffff;
1359 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1360 int octet = *cp++;
1361 int i;
1362
1363 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
1364 for (i = 0; i < 8; i++) {
1365 if ((crc & 1) ^ (octet & 1)) {
1366 crc >>= 1;
1367 crc ^= MC_POLY_LE;
1368 } else {
1369 crc >>= 1;
1370 }
1371 octet >>= 1;
1372 }
1373 }
1374 /* Just want the 6 most significant bits. */
1375 crc >>= 26;
1376
1377 /* Set the corresponding bit in the filter. */
1378 hash[crc >> 4] |= 1 << (crc & 0xf);
1379
1380 ETHER_NEXT_MULTI(step, enm);
1381 }
1382
1383 ifp->if_flags &= ~IFF_ALLMULTI;
1384
1385 chipit:
1386 /* Now load the hash table into the chip */
1387 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1388 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1389 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1390 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1391 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1392 }
1393
1394 /*
1395 * Routines for accessing the transmit and receive buffers.
1396 * The various CPU and adapter configurations supported by this
1397 * driver require three different access methods for buffers
1398 * and descriptors:
1399 * (1) contig (contiguous data; no padding),
1400 * (2) gap2 (two bytes of data followed by two bytes of padding),
1401 * (3) gap16 (16 bytes of data followed by 16 bytes of padding).
1402 */
1403
1404 #if 0
1405 /*
1406 * contig: contiguous data with no padding.
1407 *
1408 * Buffers may have any alignment.
1409 */
1410
1411 void
1412 hme_copytobuf_contig(sc, from, ri, len)
1413 struct hme_softc *sc;
1414 void *from;
1415 int ri, len;
1416 {
1417 volatile caddr_t buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
1418
1419 /*
1420 * Just call bcopy() to do the work.
1421 */
1422 bcopy(from, buf, len);
1423 }
1424
1425 void
1426 hme_copyfrombuf_contig(sc, to, boff, len)
1427 struct hme_softc *sc;
1428 void *to;
1429 int boff, len;
1430 {
1431 volatile caddr_t buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
1432
1433 /*
1434 * Just call bcopy() to do the work.
1435 */
1436 bcopy(buf, to, len);
1437 }
1438 #endif
1439