hme.c revision 1.20.2.2 1 /* $NetBSD: hme.c,v 1.20.2.2 2001/10/22 20:41:18 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * HME Ethernet module driver.
41 */
42
43 #define HMEDEBUG
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48 #include "rnd.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/mbuf.h>
54 #include <sys/syslog.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/malloc.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #if NRND > 0
61 #include <sys/rnd.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_media.h>
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #endif
76
77 #ifdef NS
78 #include <netns/ns.h>
79 #include <netns/ns_if.h>
80 #endif
81
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #include <net/bpfdesc.h>
85 #endif
86
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 #include <machine/bus.h>
91
92 #include <dev/ic/hmereg.h>
93 #include <dev/ic/hmevar.h>
94
95 void hme_start __P((struct ifnet *));
96 void hme_stop __P((struct hme_softc *));
97 int hme_ioctl __P((struct ifnet *, u_long, caddr_t));
98 void hme_tick __P((void *));
99 void hme_watchdog __P((struct ifnet *));
100 void hme_shutdown __P((void *));
101 void hme_init __P((struct hme_softc *));
102 void hme_meminit __P((struct hme_softc *));
103 void hme_mifinit __P((struct hme_softc *));
104 void hme_reset __P((struct hme_softc *));
105 void hme_setladrf __P((struct hme_softc *));
106
107 /* MII methods & callbacks */
108 static int hme_mii_readreg __P((struct device *, int, int));
109 static void hme_mii_writereg __P((struct device *, int, int, int));
110 static void hme_mii_statchg __P((struct device *));
111
112 int hme_mediachange __P((struct ifnet *));
113 void hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
114
115 struct mbuf *hme_get __P((struct hme_softc *, int, int));
116 int hme_put __P((struct hme_softc *, int, struct mbuf *));
117 void hme_read __P((struct hme_softc *, int, int));
118 int hme_eint __P((struct hme_softc *, u_int));
119 int hme_rint __P((struct hme_softc *));
120 int hme_tint __P((struct hme_softc *));
121
122 static int ether_cmp __P((u_char *, u_char *));
123
124 /* Default buffer copy routines */
125 void hme_copytobuf_contig __P((struct hme_softc *, void *, int, int));
126 void hme_copyfrombuf_contig __P((struct hme_softc *, void *, int, int));
127 void hme_zerobuf_contig __P((struct hme_softc *, int, int));
128
129
130 void
131 hme_config(sc)
132 struct hme_softc *sc;
133 {
134 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
135 struct mii_data *mii = &sc->sc_mii;
136 struct mii_softc *child;
137 bus_dma_tag_t dmatag = sc->sc_dmatag;
138 bus_dma_segment_t seg;
139 bus_size_t size;
140 int rseg, error;
141
142 /*
143 * HME common initialization.
144 *
145 * hme_softc fields that must be initialized by the front-end:
146 *
147 * the bus tag:
148 * sc_bustag
149 *
150 * the dma bus tag:
151 * sc_dmatag
152 *
153 * the bus handles:
154 * sc_seb (Shared Ethernet Block registers)
155 * sc_erx (Receiver Unit registers)
156 * sc_etx (Transmitter Unit registers)
157 * sc_mac (MAC registers)
158 * sc_mif (Managment Interface registers)
159 *
160 * the maximum bus burst size:
161 * sc_burst
162 *
163 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
164 * rb_membase, rb_dmabase)
165 *
166 * the local Ethernet address:
167 * sc_enaddr
168 *
169 */
170
171 /* Make sure the chip is stopped. */
172 hme_stop(sc);
173
174
175 /*
176 * Allocate descriptors and buffers
177 * XXX - do all this differently.. and more configurably,
178 * eg. use things as `dma_load_mbuf()' on transmit,
179 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
180 * all the time) on the reveiver side.
181 *
182 * Note: receive buffers must be 64-byte aligned.
183 * Also, apparently, the buffers must extend to a DMA burst
184 * boundary beyond the maximum packet size.
185 */
186 #define _HME_NDESC 128
187 #define _HME_BUFSZ 1600
188
189 /* Note: the # of descriptors must be a multiple of 16 */
190 sc->sc_rb.rb_ntbuf = _HME_NDESC;
191 sc->sc_rb.rb_nrbuf = _HME_NDESC;
192
193 /*
194 * Allocate DMA capable memory
195 * Buffer descriptors must be aligned on a 2048 byte boundary;
196 * take this into account when calculating the size. Note that
197 * the maximum number of descriptors (256) occupies 2048 bytes,
198 * so we allocate that much regardless of _HME_NDESC.
199 */
200 size = 2048 + /* TX descriptors */
201 2048 + /* RX descriptors */
202 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */
203 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* TX buffers */
204
205 /* Allocate DMA buffer */
206 if ((error = bus_dmamem_alloc(dmatag, size,
207 2048, 0,
208 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
209 printf("%s: DMA buffer alloc error %d\n",
210 sc->sc_dev.dv_xname, error);
211 return;
212 }
213
214 /* Map DMA memory in CPU addressable space */
215 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
216 &sc->sc_rb.rb_membase,
217 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
218 printf("%s: DMA buffer map error %d\n",
219 sc->sc_dev.dv_xname, error);
220 bus_dmamap_unload(dmatag, sc->sc_dmamap);
221 bus_dmamem_free(dmatag, &seg, rseg);
222 return;
223 }
224
225 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
226 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
227 printf("%s: DMA map create error %d\n",
228 sc->sc_dev.dv_xname, error);
229 return;
230 }
231
232 /* Load the buffer */
233 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
234 sc->sc_rb.rb_membase, size, NULL,
235 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
236 printf("%s: DMA buffer map load error %d\n",
237 sc->sc_dev.dv_xname, error);
238 bus_dmamem_free(dmatag, &seg, rseg);
239 return;
240 }
241 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
242
243 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
244 ether_sprintf(sc->sc_enaddr));
245
246 /* Initialize ifnet structure. */
247 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
248 ifp->if_softc = sc;
249 ifp->if_start = hme_start;
250 ifp->if_ioctl = hme_ioctl;
251 ifp->if_watchdog = hme_watchdog;
252 ifp->if_flags =
253 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
254 IFQ_SET_READY(&ifp->if_snd);
255
256 /* Initialize ifmedia structures and MII info */
257 mii->mii_ifp = ifp;
258 mii->mii_readreg = hme_mii_readreg;
259 mii->mii_writereg = hme_mii_writereg;
260 mii->mii_statchg = hme_mii_statchg;
261
262 ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
263
264 hme_mifinit(sc);
265
266 mii_attach(&sc->sc_dev, mii, 0xffffffff,
267 MII_PHY_ANY, MII_OFFSET_ANY, 0);
268
269 child = LIST_FIRST(&mii->mii_phys);
270 if (child == NULL) {
271 /* No PHY attached */
272 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
273 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
274 } else {
275 /*
276 * Walk along the list of attached MII devices and
277 * establish an `MII instance' to `phy number'
278 * mapping. We'll use this mapping in media change
279 * requests to determine which phy to use to program
280 * the MIF configuration register.
281 */
282 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
283 /*
284 * Note: we support just two PHYs: the built-in
285 * internal device and an external on the MII
286 * connector.
287 */
288 if (child->mii_phy > 1 || child->mii_inst > 1) {
289 printf("%s: cannot accomodate MII device %s"
290 " at phy %d, instance %d\n",
291 sc->sc_dev.dv_xname,
292 child->mii_dev.dv_xname,
293 child->mii_phy, child->mii_inst);
294 continue;
295 }
296
297 sc->sc_phys[child->mii_inst] = child->mii_phy;
298 }
299
300 /*
301 * XXX - we can really do the following ONLY if the
302 * phy indeed has the auto negotiation capability!!
303 */
304 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
305 }
306
307 /* claim 802.1q capability */
308 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
309
310 /* Attach the interface. */
311 if_attach(ifp);
312 ether_ifattach(ifp, sc->sc_enaddr);
313
314 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
315 if (sc->sc_sh == NULL)
316 panic("hme_config: can't establish shutdownhook");
317
318 #if 0
319 printf("%s: %d receive buffers, %d transmit buffers\n",
320 sc->sc_dev.dv_xname, sc->sc_nrbuf, sc->sc_ntbuf);
321 sc->sc_rbufaddr = malloc(sc->sc_nrbuf * sizeof(int), M_DEVBUF,
322 M_WAITOK);
323 sc->sc_tbufaddr = malloc(sc->sc_ntbuf * sizeof(int), M_DEVBUF,
324 M_WAITOK);
325 #endif
326
327 #if NRND > 0
328 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
329 RND_TYPE_NET, 0);
330 #endif
331
332 callout_init(&sc->sc_tick_ch);
333 }
334
335 void
336 hme_tick(arg)
337 void *arg;
338 {
339 struct hme_softc *sc = arg;
340 int s;
341
342 s = splnet();
343 mii_tick(&sc->sc_mii);
344 splx(s);
345
346 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
347 }
348
349 void
350 hme_reset(sc)
351 struct hme_softc *sc;
352 {
353 int s;
354
355 s = splnet();
356 hme_init(sc);
357 splx(s);
358 }
359
360 void
361 hme_stop(sc)
362 struct hme_softc *sc;
363 {
364 bus_space_tag_t t = sc->sc_bustag;
365 bus_space_handle_t seb = sc->sc_seb;
366 int n;
367
368 callout_stop(&sc->sc_tick_ch);
369 mii_down(&sc->sc_mii);
370
371 /* Reset transmitter and receiver */
372 bus_space_write_4(t, seb, HME_SEBI_RESET,
373 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
374
375 for (n = 0; n < 20; n++) {
376 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
377 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
378 return;
379 DELAY(20);
380 }
381
382 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
383 }
384
385 void
386 hme_meminit(sc)
387 struct hme_softc *sc;
388 {
389 bus_addr_t txbufdma, rxbufdma;
390 bus_addr_t dma;
391 caddr_t p;
392 unsigned int ntbuf, nrbuf, i;
393 struct hme_ring *hr = &sc->sc_rb;
394
395 p = hr->rb_membase;
396 dma = hr->rb_dmabase;
397
398 ntbuf = hr->rb_ntbuf;
399 nrbuf = hr->rb_nrbuf;
400
401 /*
402 * Allocate transmit descriptors
403 */
404 hr->rb_txd = p;
405 hr->rb_txddma = dma;
406 p += ntbuf * HME_XD_SIZE;
407 dma += ntbuf * HME_XD_SIZE;
408 /* We have reserved descriptor space until the next 2048 byte boundary.*/
409 dma = (bus_addr_t)roundup((u_long)dma, 2048);
410 p = (caddr_t)roundup((u_long)p, 2048);
411
412 /*
413 * Allocate receive descriptors
414 */
415 hr->rb_rxd = p;
416 hr->rb_rxddma = dma;
417 p += nrbuf * HME_XD_SIZE;
418 dma += nrbuf * HME_XD_SIZE;
419 /* Again move forward to the next 2048 byte boundary.*/
420 dma = (bus_addr_t)roundup((u_long)dma, 2048);
421 p = (caddr_t)roundup((u_long)p, 2048);
422
423
424 /*
425 * Allocate transmit buffers
426 */
427 hr->rb_txbuf = p;
428 txbufdma = dma;
429 p += ntbuf * _HME_BUFSZ;
430 dma += ntbuf * _HME_BUFSZ;
431
432 /*
433 * Allocate receive buffers
434 */
435 hr->rb_rxbuf = p;
436 rxbufdma = dma;
437 p += nrbuf * _HME_BUFSZ;
438 dma += nrbuf * _HME_BUFSZ;
439
440 /*
441 * Initialize transmit buffer descriptors
442 */
443 for (i = 0; i < ntbuf; i++) {
444 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
445 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
446 }
447
448 /*
449 * Initialize receive buffer descriptors
450 */
451 for (i = 0; i < nrbuf; i++) {
452 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
453 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
454 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
455 }
456
457 hr->rb_tdhead = hr->rb_tdtail = 0;
458 hr->rb_td_nbusy = 0;
459 hr->rb_rdtail = 0;
460 }
461
462 /*
463 * Initialization of interface; set up initialization block
464 * and transmit/receive descriptor rings.
465 */
466 void
467 hme_init(sc)
468 struct hme_softc *sc;
469 {
470 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
471 bus_space_tag_t t = sc->sc_bustag;
472 bus_space_handle_t seb = sc->sc_seb;
473 bus_space_handle_t etx = sc->sc_etx;
474 bus_space_handle_t erx = sc->sc_erx;
475 bus_space_handle_t mac = sc->sc_mac;
476 bus_space_handle_t mif = sc->sc_mif;
477 u_int8_t *ea;
478 u_int32_t v;
479
480 /*
481 * Initialization sequence. The numbered steps below correspond
482 * to the sequence outlined in section 6.3.5.1 in the Ethernet
483 * Channel Engine manual (part of the PCIO manual).
484 * See also the STP2002-STQ document from Sun Microsystems.
485 */
486
487 /* step 1 & 2. Reset the Ethernet Channel */
488 hme_stop(sc);
489
490 /* Re-initialize the MIF */
491 hme_mifinit(sc);
492
493 /* Call MI reset function if any */
494 if (sc->sc_hwreset)
495 (*sc->sc_hwreset)(sc);
496
497 #if 0
498 /* Mask all MIF interrupts, just in case */
499 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
500 #endif
501
502 /* step 3. Setup data structures in host memory */
503 hme_meminit(sc);
504
505 /* step 4. TX MAC registers & counters */
506 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
507 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
508 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
509 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
510 bus_space_write_4(t, mac, HME_MACI_TXSIZE,
511 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
512 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN :
513 ETHER_MAX_LEN);
514
515 /* Load station MAC address */
516 ea = sc->sc_enaddr;
517 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
518 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
519 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
520
521 /*
522 * Init seed for backoff
523 * (source suggested by manual: low 10 bits of MAC address)
524 */
525 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
526 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
527
528
529 /* Note: Accepting power-on default for other MAC registers here.. */
530
531
532 /* step 5. RX MAC registers & counters */
533 hme_setladrf(sc);
534
535 /* step 6 & 7. Program Descriptor Ring Base Addresses */
536 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
537 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
538
539 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
540 bus_space_write_4(t, mac, HME_MACI_RXSIZE,
541 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
542 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN :
543 ETHER_MAX_LEN);
544
545
546 /* step 8. Global Configuration & Interrupt Mask */
547 bus_space_write_4(t, seb, HME_SEBI_IMASK,
548 ~(
549 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
550 HME_SEB_STAT_HOSTTOTX |
551 HME_SEB_STAT_RXTOHOST |
552 HME_SEB_STAT_TXALL |
553 HME_SEB_STAT_TXPERR |
554 HME_SEB_STAT_RCNTEXP |
555 HME_SEB_STAT_ALL_ERRORS ));
556
557 switch (sc->sc_burst) {
558 default:
559 v = 0;
560 break;
561 case 16:
562 v = HME_SEB_CFG_BURST16;
563 break;
564 case 32:
565 v = HME_SEB_CFG_BURST32;
566 break;
567 case 64:
568 v = HME_SEB_CFG_BURST64;
569 break;
570 }
571 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
572
573 /* step 9. ETX Configuration: use mostly default values */
574
575 /* Enable DMA */
576 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
577 v |= HME_ETX_CFG_DMAENABLE;
578 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
579
580 /* Transmit Descriptor ring size: in increments of 16 */
581 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1);
582
583
584 /* step 10. ERX Configuration */
585 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
586
587 /* Encode Receive Descriptor ring size: four possible values */
588 switch (_HME_NDESC /*XXX*/) {
589 case 32:
590 v |= HME_ERX_CFG_RINGSIZE32;
591 break;
592 case 64:
593 v |= HME_ERX_CFG_RINGSIZE64;
594 break;
595 case 128:
596 v |= HME_ERX_CFG_RINGSIZE128;
597 break;
598 case 256:
599 v |= HME_ERX_CFG_RINGSIZE256;
600 break;
601 default:
602 printf("hme: invalid Receive Descriptor ring size\n");
603 break;
604 }
605
606 /* Enable DMA */
607 v |= HME_ERX_CFG_DMAENABLE;
608 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
609
610 /* step 11. XIF Configuration */
611 v = bus_space_read_4(t, mac, HME_MACI_XIF);
612 v |= HME_MAC_XIF_OE;
613 /* If an external transceiver is connected, enable its MII drivers */
614 if ((bus_space_read_4(t, mif, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
615 v |= HME_MAC_XIF_MIIENABLE;
616 bus_space_write_4(t, mac, HME_MACI_XIF, v);
617
618
619 /* step 12. RX_MAC Configuration Register */
620 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
621 v |= HME_MAC_RXCFG_ENABLE;
622 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
623
624 /* step 13. TX_MAC Configuration Register */
625 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
626 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
627 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
628
629 /* step 14. Issue Transmit Pending command */
630
631 /* Call MI initialization function if any */
632 if (sc->sc_hwinit)
633 (*sc->sc_hwinit)(sc);
634
635 /* Start the one second timer. */
636 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
637
638 ifp->if_flags |= IFF_RUNNING;
639 ifp->if_flags &= ~IFF_OACTIVE;
640 ifp->if_timer = 0;
641 hme_start(ifp);
642 }
643
644 /*
645 * Compare two Ether/802 addresses for equality, inlined and unrolled for
646 * speed.
647 */
648 static __inline__ int
649 ether_cmp(a, b)
650 u_char *a, *b;
651 {
652
653 if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
654 a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
655 return (0);
656 return (1);
657 }
658
659
660 /*
661 * Routine to copy from mbuf chain to transmit buffer in
662 * network buffer memory.
663 * Returns the amount of data copied.
664 */
665 int
666 hme_put(sc, ri, m)
667 struct hme_softc *sc;
668 int ri; /* Ring index */
669 struct mbuf *m;
670 {
671 struct mbuf *n;
672 int len, tlen = 0;
673 caddr_t bp;
674
675 bp = sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
676 for (; m; m = n) {
677 len = m->m_len;
678 if (len == 0) {
679 MFREE(m, n);
680 continue;
681 }
682 memcpy(bp, mtod(m, caddr_t), len);
683 bp += len;
684 tlen += len;
685 MFREE(m, n);
686 }
687 return (tlen);
688 }
689
690 /*
691 * Pull data off an interface.
692 * Len is length of data, with local net header stripped.
693 * We copy the data into mbufs. When full cluster sized units are present
694 * we copy into clusters.
695 */
696 struct mbuf *
697 hme_get(sc, ri, totlen)
698 struct hme_softc *sc;
699 int ri, totlen;
700 {
701 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
702 struct mbuf *m, *m0, *newm;
703 caddr_t bp;
704 int len;
705
706 MGETHDR(m0, M_DONTWAIT, MT_DATA);
707 if (m0 == 0)
708 return (0);
709 m0->m_pkthdr.rcvif = ifp;
710 m0->m_pkthdr.len = totlen;
711 len = MHLEN;
712 m = m0;
713
714 bp = sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
715
716 while (totlen > 0) {
717 if (totlen >= MINCLSIZE) {
718 MCLGET(m, M_DONTWAIT);
719 if ((m->m_flags & M_EXT) == 0)
720 goto bad;
721 len = MCLBYTES;
722 }
723
724 if (m == m0) {
725 caddr_t newdata = (caddr_t)
726 ALIGN(m->m_data + sizeof(struct ether_header)) -
727 sizeof(struct ether_header);
728 len -= newdata - m->m_data;
729 m->m_data = newdata;
730 }
731
732 m->m_len = len = min(totlen, len);
733 memcpy(mtod(m, caddr_t), bp, len);
734 bp += len;
735
736 totlen -= len;
737 if (totlen > 0) {
738 MGET(newm, M_DONTWAIT, MT_DATA);
739 if (newm == 0)
740 goto bad;
741 len = MLEN;
742 m = m->m_next = newm;
743 }
744 }
745
746 return (m0);
747
748 bad:
749 m_freem(m0);
750 return (0);
751 }
752
753 /*
754 * Pass a packet to the higher levels.
755 */
756 void
757 hme_read(sc, ix, len)
758 struct hme_softc *sc;
759 int ix, len;
760 {
761 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
762 struct mbuf *m;
763
764 if (len <= sizeof(struct ether_header) ||
765 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ?
766 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) :
767 ETHERMTU + sizeof(struct ether_header))) {
768 #ifdef HMEDEBUG
769 printf("%s: invalid packet size %d; dropping\n",
770 sc->sc_dev.dv_xname, len);
771 #endif
772 ifp->if_ierrors++;
773 return;
774 }
775
776 /* Pull packet off interface. */
777 m = hme_get(sc, ix, len);
778 if (m == 0) {
779 ifp->if_ierrors++;
780 return;
781 }
782
783 ifp->if_ipackets++;
784
785 #if NBPFILTER > 0
786 /*
787 * Check if there's a BPF listener on this interface.
788 * If so, hand off the raw packet to BPF.
789 */
790 if (ifp->if_bpf)
791 bpf_mtap(ifp->if_bpf, m);
792 #endif
793
794 /* Pass the packet up. */
795 (*ifp->if_input)(ifp, m);
796 }
797
798 void
799 hme_start(ifp)
800 struct ifnet *ifp;
801 {
802 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
803 caddr_t txd = sc->sc_rb.rb_txd;
804 struct mbuf *m;
805 unsigned int ri, len;
806 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
807
808 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
809 return;
810
811 ri = sc->sc_rb.rb_tdhead;
812
813 for (;;) {
814 IFQ_DEQUEUE(&ifp->if_snd, m);
815 if (m == 0)
816 break;
817
818 #if NBPFILTER > 0
819 /*
820 * If BPF is listening on this interface, let it see the
821 * packet before we commit it to the wire.
822 */
823 if (ifp->if_bpf)
824 bpf_mtap(ifp->if_bpf, m);
825 #endif
826
827 /*
828 * Copy the mbuf chain into the transmit buffer.
829 */
830 len = hme_put(sc, ri, m);
831
832 /*
833 * Initialize transmit registers and start transmission
834 */
835 HME_XD_SETFLAGS(sc->sc_pci, txd, ri,
836 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
837 HME_XD_ENCODE_TSIZE(len));
838
839 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/
840 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
841 HME_ETX_TP_DMAWAKEUP);
842
843 if (++ri == ntbuf)
844 ri = 0;
845
846 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
847 ifp->if_flags |= IFF_OACTIVE;
848 break;
849 }
850 }
851
852 sc->sc_rb.rb_tdhead = ri;
853 }
854
855 /*
856 * Transmit interrupt.
857 */
858 int
859 hme_tint(sc)
860 struct hme_softc *sc;
861 {
862 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
863 bus_space_tag_t t = sc->sc_bustag;
864 bus_space_handle_t mac = sc->sc_mac;
865 unsigned int ri, txflags;
866
867 /*
868 * Unload collision counters
869 */
870 ifp->if_collisions +=
871 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
872 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
873 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
874 bus_space_read_4(t, mac, HME_MACI_LTCNT);
875
876 /*
877 * then clear the hardware counters.
878 */
879 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
880 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
881 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
882 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
883
884 /* Fetch current position in the transmit ring */
885 ri = sc->sc_rb.rb_tdtail;
886
887 for (;;) {
888 if (sc->sc_rb.rb_td_nbusy <= 0)
889 break;
890
891 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
892
893 if (txflags & HME_XD_OWN)
894 break;
895
896 ifp->if_flags &= ~IFF_OACTIVE;
897 ifp->if_opackets++;
898
899 if (++ri == sc->sc_rb.rb_ntbuf)
900 ri = 0;
901
902 --sc->sc_rb.rb_td_nbusy;
903 }
904
905 /* Update ring */
906 sc->sc_rb.rb_tdtail = ri;
907
908 hme_start(ifp);
909
910 if (sc->sc_rb.rb_td_nbusy == 0)
911 ifp->if_timer = 0;
912
913 return (1);
914 }
915
916 /*
917 * Receive interrupt.
918 */
919 int
920 hme_rint(sc)
921 struct hme_softc *sc;
922 {
923 caddr_t xdr = sc->sc_rb.rb_rxd;
924 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
925 unsigned int ri, len;
926 u_int32_t flags;
927
928 ri = sc->sc_rb.rb_rdtail;
929
930 /*
931 * Process all buffers with valid data.
932 */
933 for (;;) {
934 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
935 if (flags & HME_XD_OWN)
936 break;
937
938 if (flags & HME_XD_OFL) {
939 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
940 sc->sc_dev.dv_xname, ri, flags);
941 } else {
942 len = HME_XD_DECODE_RSIZE(flags);
943 hme_read(sc, ri, len);
944 }
945
946 /* This buffer can be used by the hardware again */
947 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri,
948 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
949
950 if (++ri == nrbuf)
951 ri = 0;
952 }
953
954 sc->sc_rb.rb_rdtail = ri;
955
956 return (1);
957 }
958
959 int
960 hme_eint(sc, status)
961 struct hme_softc *sc;
962 u_int status;
963 {
964 char bits[128];
965
966 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
967 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
968 return (1);
969 }
970
971 printf("%s: status=%s\n", sc->sc_dev.dv_xname,
972 bitmask_snprintf(status, HME_SEB_STAT_BITS, bits,sizeof(bits)));
973 return (1);
974 }
975
976 int
977 hme_intr(v)
978 void *v;
979 {
980 struct hme_softc *sc = (struct hme_softc *)v;
981 bus_space_tag_t t = sc->sc_bustag;
982 bus_space_handle_t seb = sc->sc_seb;
983 u_int32_t status;
984 int r = 0;
985
986 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
987
988 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
989 r |= hme_eint(sc, status);
990
991 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
992 r |= hme_tint(sc);
993
994 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
995 r |= hme_rint(sc);
996
997 return (r);
998 }
999
1000
1001 void
1002 hme_watchdog(ifp)
1003 struct ifnet *ifp;
1004 {
1005 struct hme_softc *sc = ifp->if_softc;
1006
1007 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1008 ++ifp->if_oerrors;
1009
1010 hme_reset(sc);
1011 }
1012
1013 /*
1014 * Initialize the MII Management Interface
1015 */
1016 void
1017 hme_mifinit(sc)
1018 struct hme_softc *sc;
1019 {
1020 bus_space_tag_t t = sc->sc_bustag;
1021 bus_space_handle_t mif = sc->sc_mif;
1022 u_int32_t v;
1023
1024 /* Configure the MIF in frame mode */
1025 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1026 v &= ~HME_MIF_CFG_BBMODE;
1027 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1028 }
1029
1030 /*
1031 * MII interface
1032 */
1033 static int
1034 hme_mii_readreg(self, phy, reg)
1035 struct device *self;
1036 int phy, reg;
1037 {
1038 struct hme_softc *sc = (void *)self;
1039 bus_space_tag_t t = sc->sc_bustag;
1040 bus_space_handle_t mif = sc->sc_mif;
1041 int n;
1042 u_int32_t v;
1043
1044 /* Select the desired PHY in the MIF configuration register */
1045 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1046 /* Clear PHY select bit */
1047 v &= ~HME_MIF_CFG_PHY;
1048 if (phy == HME_PHYAD_EXTERNAL)
1049 /* Set PHY select bit to get at external device */
1050 v |= HME_MIF_CFG_PHY;
1051 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1052
1053 /* Construct the frame command */
1054 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1055 HME_MIF_FO_TAMSB |
1056 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1057 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1058 (reg << HME_MIF_FO_REGAD_SHIFT);
1059
1060 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1061 for (n = 0; n < 100; n++) {
1062 DELAY(1);
1063 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1064 if (v & HME_MIF_FO_TALSB)
1065 return (v & HME_MIF_FO_DATA);
1066 }
1067
1068 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1069 return (0);
1070 }
1071
1072 static void
1073 hme_mii_writereg(self, phy, reg, val)
1074 struct device *self;
1075 int phy, reg, val;
1076 {
1077 struct hme_softc *sc = (void *)self;
1078 bus_space_tag_t t = sc->sc_bustag;
1079 bus_space_handle_t mif = sc->sc_mif;
1080 int n;
1081 u_int32_t v;
1082
1083 /* Select the desired PHY in the MIF configuration register */
1084 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1085 /* Clear PHY select bit */
1086 v &= ~HME_MIF_CFG_PHY;
1087 if (phy == HME_PHYAD_EXTERNAL)
1088 /* Set PHY select bit to get at external device */
1089 v |= HME_MIF_CFG_PHY;
1090 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1091
1092 /* Construct the frame command */
1093 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1094 HME_MIF_FO_TAMSB |
1095 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1096 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1097 (reg << HME_MIF_FO_REGAD_SHIFT) |
1098 (val & HME_MIF_FO_DATA);
1099
1100 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1101 for (n = 0; n < 100; n++) {
1102 DELAY(1);
1103 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1104 if (v & HME_MIF_FO_TALSB)
1105 return;
1106 }
1107
1108 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1109 }
1110
1111 static void
1112 hme_mii_statchg(dev)
1113 struct device *dev;
1114 {
1115 struct hme_softc *sc = (void *)dev;
1116 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1117 int phy = sc->sc_phys[instance];
1118 bus_space_tag_t t = sc->sc_bustag;
1119 bus_space_handle_t mif = sc->sc_mif;
1120 bus_space_handle_t mac = sc->sc_mac;
1121 u_int32_t v;
1122
1123 #ifdef HMEDEBUG
1124 if (sc->sc_debug)
1125 printf("hme_mii_statchg: status change: phy = %d\n", phy);
1126 #endif
1127
1128 /* Select the current PHY in the MIF configuration register */
1129 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1130 v &= ~HME_MIF_CFG_PHY;
1131 if (phy == HME_PHYAD_EXTERNAL)
1132 v |= HME_MIF_CFG_PHY;
1133 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1134
1135 /* Set the MAC Full Duplex bit appropriately */
1136 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1137 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1138 v |= HME_MAC_TXCFG_FULLDPLX;
1139 else
1140 v &= ~HME_MAC_TXCFG_FULLDPLX;
1141 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1142
1143 /* If an external transceiver is selected, enable its MII drivers */
1144 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1145 v &= ~HME_MAC_XIF_MIIENABLE;
1146 if (phy == HME_PHYAD_EXTERNAL)
1147 v |= HME_MAC_XIF_MIIENABLE;
1148 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1149 }
1150
1151 int
1152 hme_mediachange(ifp)
1153 struct ifnet *ifp;
1154 {
1155 struct hme_softc *sc = ifp->if_softc;
1156
1157 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1158 return (EINVAL);
1159
1160 return (mii_mediachg(&sc->sc_mii));
1161 }
1162
1163 void
1164 hme_mediastatus(ifp, ifmr)
1165 struct ifnet *ifp;
1166 struct ifmediareq *ifmr;
1167 {
1168 struct hme_softc *sc = ifp->if_softc;
1169
1170 if ((ifp->if_flags & IFF_UP) == 0)
1171 return;
1172
1173 mii_pollstat(&sc->sc_mii);
1174 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1175 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1176 }
1177
1178 /*
1179 * Process an ioctl request.
1180 */
1181 int
1182 hme_ioctl(ifp, cmd, data)
1183 struct ifnet *ifp;
1184 u_long cmd;
1185 caddr_t data;
1186 {
1187 struct hme_softc *sc = ifp->if_softc;
1188 struct ifaddr *ifa = (struct ifaddr *)data;
1189 struct ifreq *ifr = (struct ifreq *)data;
1190 int s, error = 0;
1191
1192 s = splnet();
1193
1194 switch (cmd) {
1195
1196 case SIOCSIFADDR:
1197 ifp->if_flags |= IFF_UP;
1198
1199 switch (ifa->ifa_addr->sa_family) {
1200 #ifdef INET
1201 case AF_INET:
1202 hme_init(sc);
1203 arp_ifinit(ifp, ifa);
1204 break;
1205 #endif
1206 #ifdef NS
1207 case AF_NS:
1208 {
1209 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1210
1211 if (ns_nullhost(*ina))
1212 ina->x_host =
1213 *(union ns_host *)LLADDR(ifp->if_sadl);
1214 else {
1215 memcpy(LLADDR(ifp->if_sadl),
1216 ina->x_host.c_host, sizeof(sc->sc_enaddr));
1217 }
1218 /* Set new address. */
1219 hme_init(sc);
1220 break;
1221 }
1222 #endif
1223 default:
1224 hme_init(sc);
1225 break;
1226 }
1227 break;
1228
1229 case SIOCSIFFLAGS:
1230 if ((ifp->if_flags & IFF_UP) == 0 &&
1231 (ifp->if_flags & IFF_RUNNING) != 0) {
1232 /*
1233 * If interface is marked down and it is running, then
1234 * stop it.
1235 */
1236 hme_stop(sc);
1237 ifp->if_flags &= ~IFF_RUNNING;
1238 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1239 (ifp->if_flags & IFF_RUNNING) == 0) {
1240 /*
1241 * If interface is marked up and it is stopped, then
1242 * start it.
1243 */
1244 hme_init(sc);
1245 } else if ((ifp->if_flags & IFF_UP) != 0) {
1246 /*
1247 * Reset the interface to pick up changes in any other
1248 * flags that affect hardware registers.
1249 */
1250 /*hme_stop(sc);*/
1251 hme_init(sc);
1252 }
1253 #ifdef HMEDEBUG
1254 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1255 #endif
1256 break;
1257
1258 case SIOCADDMULTI:
1259 case SIOCDELMULTI:
1260 error = (cmd == SIOCADDMULTI) ?
1261 ether_addmulti(ifr, &sc->sc_ethercom) :
1262 ether_delmulti(ifr, &sc->sc_ethercom);
1263
1264 if (error == ENETRESET) {
1265 /*
1266 * Multicast list has changed; set the hardware filter
1267 * accordingly.
1268 */
1269 hme_setladrf(sc);
1270 error = 0;
1271 }
1272 break;
1273
1274 case SIOCGIFMEDIA:
1275 case SIOCSIFMEDIA:
1276 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1277 break;
1278
1279 default:
1280 error = EINVAL;
1281 break;
1282 }
1283
1284 splx(s);
1285 return (error);
1286 }
1287
1288 void
1289 hme_shutdown(arg)
1290 void *arg;
1291 {
1292
1293 hme_stop((struct hme_softc *)arg);
1294 }
1295
1296 /*
1297 * Set up the logical address filter.
1298 */
1299 void
1300 hme_setladrf(sc)
1301 struct hme_softc *sc;
1302 {
1303 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1304 struct ether_multi *enm;
1305 struct ether_multistep step;
1306 struct ethercom *ec = &sc->sc_ethercom;
1307 bus_space_tag_t t = sc->sc_bustag;
1308 bus_space_handle_t mac = sc->sc_mac;
1309 u_char *cp;
1310 u_int32_t crc;
1311 u_int32_t hash[4];
1312 u_int32_t v;
1313 int len;
1314
1315 /* Clear hash table */
1316 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1317
1318 /* Get current RX configuration */
1319 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1320
1321 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1322 /* Turn on promiscuous mode; turn off the hash filter */
1323 v |= HME_MAC_RXCFG_PMISC;
1324 v &= ~HME_MAC_RXCFG_HENABLE;
1325 ifp->if_flags |= IFF_ALLMULTI;
1326 goto chipit;
1327 }
1328
1329 /* Turn off promiscuous mode; turn on the hash filter */
1330 v &= ~HME_MAC_RXCFG_PMISC;
1331 v |= HME_MAC_RXCFG_HENABLE;
1332
1333 /*
1334 * Set up multicast address filter by passing all multicast addresses
1335 * through a crc generator, and then using the high order 6 bits as an
1336 * index into the 64 bit logical address filter. The high order bit
1337 * selects the word, while the rest of the bits select the bit within
1338 * the word.
1339 */
1340
1341 ETHER_FIRST_MULTI(step, ec, enm);
1342 while (enm != NULL) {
1343 if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
1344 /*
1345 * We must listen to a range of multicast addresses.
1346 * For now, just accept all multicasts, rather than
1347 * trying to set only those filter bits needed to match
1348 * the range. (At this time, the only use of address
1349 * ranges is for IP multicast routing, for which the
1350 * range is big enough to require all bits set.)
1351 */
1352 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1353 ifp->if_flags |= IFF_ALLMULTI;
1354 goto chipit;
1355 }
1356
1357 cp = enm->enm_addrlo;
1358 crc = 0xffffffff;
1359 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1360 int octet = *cp++;
1361 int i;
1362
1363 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
1364 for (i = 0; i < 8; i++) {
1365 if ((crc & 1) ^ (octet & 1)) {
1366 crc >>= 1;
1367 crc ^= MC_POLY_LE;
1368 } else {
1369 crc >>= 1;
1370 }
1371 octet >>= 1;
1372 }
1373 }
1374 /* Just want the 6 most significant bits. */
1375 crc >>= 26;
1376
1377 /* Set the corresponding bit in the filter. */
1378 hash[crc >> 4] |= 1 << (crc & 0xf);
1379
1380 ETHER_NEXT_MULTI(step, enm);
1381 }
1382
1383 ifp->if_flags &= ~IFF_ALLMULTI;
1384
1385 chipit:
1386 /* Now load the hash table into the chip */
1387 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1388 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1389 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1390 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1391 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1392 }
1393
1394 /*
1395 * Routines for accessing the transmit and receive buffers.
1396 * The various CPU and adapter configurations supported by this
1397 * driver require three different access methods for buffers
1398 * and descriptors:
1399 * (1) contig (contiguous data; no padding),
1400 * (2) gap2 (two bytes of data followed by two bytes of padding),
1401 * (3) gap16 (16 bytes of data followed by 16 bytes of padding).
1402 */
1403
1404 #if 0
1405 /*
1406 * contig: contiguous data with no padding.
1407 *
1408 * Buffers may have any alignment.
1409 */
1410
1411 void
1412 hme_copytobuf_contig(sc, from, ri, len)
1413 struct hme_softc *sc;
1414 void *from;
1415 int ri, len;
1416 {
1417 volatile caddr_t buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
1418
1419 /*
1420 * Just call memcpy() to do the work.
1421 */
1422 memcpy(buf, from, len);
1423 }
1424
1425 void
1426 hme_copyfrombuf_contig(sc, to, boff, len)
1427 struct hme_softc *sc;
1428 void *to;
1429 int boff, len;
1430 {
1431 volatile caddr_t buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
1432
1433 /*
1434 * Just call memcpy() to do the work.
1435 */
1436 memcpy(to, buf, len);
1437 }
1438 #endif
1439