hme.c revision 1.18 1 /* $NetBSD: hme.c,v 1.18 2000/11/15 01:02:16 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * HME Ethernet module driver.
41 */
42
43 #define HMEDEBUG
44
45 #include "opt_inet.h"
46 #include "opt_ns.h"
47 #include "bpfilter.h"
48 #include "rnd.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/mbuf.h>
54 #include <sys/syslog.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/malloc.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #if NRND > 0
61 #include <sys/rnd.h>
62 #endif
63
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_ether.h>
67 #include <net/if_media.h>
68
69 #ifdef INET
70 #include <netinet/in.h>
71 #include <netinet/if_inarp.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #endif
76
77 #ifdef NS
78 #include <netns/ns.h>
79 #include <netns/ns_if.h>
80 #endif
81
82 #if NBPFILTER > 0
83 #include <net/bpf.h>
84 #include <net/bpfdesc.h>
85 #endif
86
87 #include <dev/mii/mii.h>
88 #include <dev/mii/miivar.h>
89
90 #include <machine/bus.h>
91
92 #include <dev/ic/hmereg.h>
93 #include <dev/ic/hmevar.h>
94
95 void hme_start __P((struct ifnet *));
96 void hme_stop __P((struct hme_softc *));
97 int hme_ioctl __P((struct ifnet *, u_long, caddr_t));
98 void hme_tick __P((void *));
99 void hme_watchdog __P((struct ifnet *));
100 void hme_shutdown __P((void *));
101 void hme_init __P((struct hme_softc *));
102 void hme_meminit __P((struct hme_softc *));
103 void hme_mifinit __P((struct hme_softc *));
104 void hme_reset __P((struct hme_softc *));
105 void hme_setladrf __P((struct hme_softc *));
106
107 /* MII methods & callbacks */
108 static int hme_mii_readreg __P((struct device *, int, int));
109 static void hme_mii_writereg __P((struct device *, int, int, int));
110 static void hme_mii_statchg __P((struct device *));
111
112 int hme_mediachange __P((struct ifnet *));
113 void hme_mediastatus __P((struct ifnet *, struct ifmediareq *));
114
115 struct mbuf *hme_get __P((struct hme_softc *, int, int));
116 int hme_put __P((struct hme_softc *, int, struct mbuf *));
117 void hme_read __P((struct hme_softc *, int, int));
118 int hme_eint __P((struct hme_softc *, u_int));
119 int hme_rint __P((struct hme_softc *));
120 int hme_tint __P((struct hme_softc *));
121
122 static int ether_cmp __P((u_char *, u_char *));
123
124 /* Default buffer copy routines */
125 void hme_copytobuf_contig __P((struct hme_softc *, void *, int, int));
126 void hme_copyfrombuf_contig __P((struct hme_softc *, void *, int, int));
127 void hme_zerobuf_contig __P((struct hme_softc *, int, int));
128
129
130 void
131 hme_config(sc)
132 struct hme_softc *sc;
133 {
134 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
135 struct mii_data *mii = &sc->sc_mii;
136 struct mii_softc *child;
137 bus_dma_tag_t dmatag = sc->sc_dmatag;
138 bus_dma_segment_t seg;
139 bus_size_t size;
140 int rseg, error;
141
142 /*
143 * HME common initialization.
144 *
145 * hme_softc fields that must be initialized by the front-end:
146 *
147 * the bus tag:
148 * sc_bustag
149 *
150 * the dma bus tag:
151 * sc_dmatag
152 *
153 * the bus handles:
154 * sc_seb (Shared Ethernet Block registers)
155 * sc_erx (Receiver Unit registers)
156 * sc_etx (Transmitter Unit registers)
157 * sc_mac (MAC registers)
158 * sc_mif (Managment Interface registers)
159 *
160 * the maximum bus burst size:
161 * sc_burst
162 *
163 * (notyet:DMA capable memory for the ring descriptors & packet buffers:
164 * rb_membase, rb_dmabase)
165 *
166 * the local Ethernet address:
167 * sc_enaddr
168 *
169 */
170
171 /* Make sure the chip is stopped. */
172 hme_stop(sc);
173
174
175 /*
176 * Allocate descriptors and buffers
177 * XXX - do all this differently.. and more configurably,
178 * eg. use things as `dma_load_mbuf()' on transmit,
179 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped
180 * all the time) on the reveiver side.
181 *
182 * Note: receive buffers must be 64-byte aligned.
183 * Also, apparently, the buffers must extend to a DMA burst
184 * boundary beyond the maximum packet size.
185 */
186 #define _HME_NDESC 32
187 #define _HME_BUFSZ 1600
188
189 /* Note: the # of descriptors must be a multiple of 16 */
190 sc->sc_rb.rb_ntbuf = _HME_NDESC;
191 sc->sc_rb.rb_nrbuf = _HME_NDESC;
192
193 /*
194 * Allocate DMA capable memory
195 * Buffer descriptors must be aligned on a 2048 byte boundary;
196 * take this into account when calculating the size. Note that
197 * the maximum number of descriptors (256) occupies 2048 bytes,
198 * so we allocate that much regardless of _HME_NDESC.
199 */
200 size = 2048 + /* TX descriptors */
201 2048 + /* RX descriptors */
202 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */
203 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* TX buffers */
204
205 /* Allocate DMA buffer */
206 if ((error = bus_dmamem_alloc(dmatag, size,
207 2048, 0,
208 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
209 printf("%s: DMA buffer alloc error %d\n",
210 sc->sc_dev.dv_xname, error);
211 return;
212 }
213
214 /* Map DMA memory in CPU addressable space */
215 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
216 &sc->sc_rb.rb_membase,
217 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
218 printf("%s: DMA buffer map error %d\n",
219 sc->sc_dev.dv_xname, error);
220 bus_dmamap_unload(dmatag, sc->sc_dmamap);
221 bus_dmamem_free(dmatag, &seg, rseg);
222 return;
223 }
224
225 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
226 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
227 printf("%s: DMA map create error %d\n",
228 sc->sc_dev.dv_xname, error);
229 return;
230 }
231
232 /* Load the buffer */
233 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
234 sc->sc_rb.rb_membase, size, NULL,
235 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
236 printf("%s: DMA buffer map load error %d\n",
237 sc->sc_dev.dv_xname, error);
238 bus_dmamem_free(dmatag, &seg, rseg);
239 return;
240 }
241 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
242
243 printf(": address %s\n", ether_sprintf(sc->sc_enaddr));
244
245 /* Initialize ifnet structure. */
246 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
247 ifp->if_softc = sc;
248 ifp->if_start = hme_start;
249 ifp->if_ioctl = hme_ioctl;
250 ifp->if_watchdog = hme_watchdog;
251 ifp->if_flags =
252 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
253
254 /* Initialize ifmedia structures and MII info */
255 mii->mii_ifp = ifp;
256 mii->mii_readreg = hme_mii_readreg;
257 mii->mii_writereg = hme_mii_writereg;
258 mii->mii_statchg = hme_mii_statchg;
259
260 ifmedia_init(&mii->mii_media, 0, hme_mediachange, hme_mediastatus);
261
262 hme_mifinit(sc);
263
264 mii_attach(&sc->sc_dev, mii, 0xffffffff,
265 MII_PHY_ANY, MII_OFFSET_ANY, 0);
266
267 child = LIST_FIRST(&mii->mii_phys);
268 if (child == NULL) {
269 /* No PHY attached */
270 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
271 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
272 } else {
273 /*
274 * Walk along the list of attached MII devices and
275 * establish an `MII instance' to `phy number'
276 * mapping. We'll use this mapping in media change
277 * requests to determine which phy to use to program
278 * the MIF configuration register.
279 */
280 for (; child != NULL; child = LIST_NEXT(child, mii_list)) {
281 /*
282 * Note: we support just two PHYs: the built-in
283 * internal device and an external on the MII
284 * connector.
285 */
286 if (child->mii_phy > 1 || child->mii_inst > 1) {
287 printf("%s: cannot accomodate MII device %s"
288 " at phy %d, instance %d\n",
289 sc->sc_dev.dv_xname,
290 child->mii_dev.dv_xname,
291 child->mii_phy, child->mii_inst);
292 continue;
293 }
294
295 sc->sc_phys[child->mii_inst] = child->mii_phy;
296 }
297
298 /*
299 * XXX - we can really do the following ONLY if the
300 * phy indeed has the auto negotiation capability!!
301 */
302 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
303 }
304
305 /* Attach the interface. */
306 if_attach(ifp);
307 ether_ifattach(ifp, sc->sc_enaddr);
308
309 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc);
310 if (sc->sc_sh == NULL)
311 panic("hme_config: can't establish shutdownhook");
312
313 #if 0
314 printf("%s: %d receive buffers, %d transmit buffers\n",
315 sc->sc_dev.dv_xname, sc->sc_nrbuf, sc->sc_ntbuf);
316 sc->sc_rbufaddr = malloc(sc->sc_nrbuf * sizeof(int), M_DEVBUF,
317 M_WAITOK);
318 sc->sc_tbufaddr = malloc(sc->sc_ntbuf * sizeof(int), M_DEVBUF,
319 M_WAITOK);
320 #endif
321
322 #if NRND > 0
323 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
324 RND_TYPE_NET, 0);
325 #endif
326
327 callout_init(&sc->sc_tick_ch);
328 }
329
330 void
331 hme_tick(arg)
332 void *arg;
333 {
334 struct hme_softc *sc = arg;
335 int s;
336
337 s = splnet();
338 mii_tick(&sc->sc_mii);
339 splx(s);
340
341 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
342 }
343
344 void
345 hme_reset(sc)
346 struct hme_softc *sc;
347 {
348 int s;
349
350 s = splnet();
351 hme_init(sc);
352 splx(s);
353 }
354
355 void
356 hme_stop(sc)
357 struct hme_softc *sc;
358 {
359 bus_space_tag_t t = sc->sc_bustag;
360 bus_space_handle_t seb = sc->sc_seb;
361 int n;
362
363 callout_stop(&sc->sc_tick_ch);
364 mii_down(&sc->sc_mii);
365
366 /* Reset transmitter and receiver */
367 bus_space_write_4(t, seb, HME_SEBI_RESET,
368 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX));
369
370 for (n = 0; n < 20; n++) {
371 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET);
372 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
373 return;
374 DELAY(20);
375 }
376
377 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname);
378 }
379
380 void
381 hme_meminit(sc)
382 struct hme_softc *sc;
383 {
384 bus_addr_t txbufdma, rxbufdma;
385 bus_addr_t dma;
386 caddr_t p;
387 unsigned int ntbuf, nrbuf, i;
388 struct hme_ring *hr = &sc->sc_rb;
389
390 p = hr->rb_membase;
391 dma = hr->rb_dmabase;
392
393 ntbuf = hr->rb_ntbuf;
394 nrbuf = hr->rb_nrbuf;
395
396 /*
397 * Allocate transmit descriptors
398 */
399 hr->rb_txd = p;
400 hr->rb_txddma = dma;
401 p += ntbuf * HME_XD_SIZE;
402 dma += ntbuf * HME_XD_SIZE;
403 /* We have reserved descriptor space until the next 2048 byte boundary.*/
404 dma = (bus_addr_t)roundup((u_long)dma, 2048);
405 p = (caddr_t)roundup((u_long)p, 2048);
406
407 /*
408 * Allocate receive descriptors
409 */
410 hr->rb_rxd = p;
411 hr->rb_rxddma = dma;
412 p += nrbuf * HME_XD_SIZE;
413 dma += nrbuf * HME_XD_SIZE;
414 /* Again move forward to the next 2048 byte boundary.*/
415 dma = (bus_addr_t)roundup((u_long)dma, 2048);
416 p = (caddr_t)roundup((u_long)p, 2048);
417
418
419 /*
420 * Allocate transmit buffers
421 */
422 hr->rb_txbuf = p;
423 txbufdma = dma;
424 p += ntbuf * _HME_BUFSZ;
425 dma += ntbuf * _HME_BUFSZ;
426
427 /*
428 * Allocate receive buffers
429 */
430 hr->rb_rxbuf = p;
431 rxbufdma = dma;
432 p += nrbuf * _HME_BUFSZ;
433 dma += nrbuf * _HME_BUFSZ;
434
435 /*
436 * Initialize transmit buffer descriptors
437 */
438 for (i = 0; i < ntbuf; i++) {
439 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ);
440 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
441 }
442
443 /*
444 * Initialize receive buffer descriptors
445 */
446 for (i = 0; i < nrbuf; i++) {
447 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ);
448 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i,
449 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
450 }
451
452 hr->rb_tdhead = hr->rb_tdtail = 0;
453 hr->rb_td_nbusy = 0;
454 hr->rb_rdtail = 0;
455 }
456
457 /*
458 * Initialization of interface; set up initialization block
459 * and transmit/receive descriptor rings.
460 */
461 void
462 hme_init(sc)
463 struct hme_softc *sc;
464 {
465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
466 bus_space_tag_t t = sc->sc_bustag;
467 bus_space_handle_t seb = sc->sc_seb;
468 bus_space_handle_t etx = sc->sc_etx;
469 bus_space_handle_t erx = sc->sc_erx;
470 bus_space_handle_t mac = sc->sc_mac;
471 bus_space_handle_t mif = sc->sc_mif;
472 u_int8_t *ea;
473 u_int32_t v;
474
475 /*
476 * Initialization sequence. The numbered steps below correspond
477 * to the sequence outlined in section 6.3.5.1 in the Ethernet
478 * Channel Engine manual (part of the PCIO manual).
479 * See also the STP2002-STQ document from Sun Microsystems.
480 */
481
482 /* step 1 & 2. Reset the Ethernet Channel */
483 hme_stop(sc);
484
485 /* Re-initialize the MIF */
486 hme_mifinit(sc);
487
488 /* Call MI reset function if any */
489 if (sc->sc_hwreset)
490 (*sc->sc_hwreset)(sc);
491
492 #if 0
493 /* Mask all MIF interrupts, just in case */
494 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff);
495 #endif
496
497 /* step 3. Setup data structures in host memory */
498 hme_meminit(sc);
499
500 /* step 4. TX MAC registers & counters */
501 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
502 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
503 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
504 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
505
506 /* Load station MAC address */
507 ea = sc->sc_enaddr;
508 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
509 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
510 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
511
512 /*
513 * Init seed for backoff
514 * (source suggested by manual: low 10 bits of MAC address)
515 */
516 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
517 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v);
518
519
520 /* Note: Accepting power-on default for other MAC registers here.. */
521
522
523 /* step 5. RX MAC registers & counters */
524 hme_setladrf(sc);
525
526 /* step 6 & 7. Program Descriptor Ring Base Addresses */
527 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma);
528 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf);
529
530 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
531
532
533 /* step 8. Global Configuration & Interrupt Mask */
534 bus_space_write_4(t, seb, HME_SEBI_IMASK,
535 ~(
536 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
537 HME_SEB_STAT_HOSTTOTX |
538 HME_SEB_STAT_RXTOHOST |
539 HME_SEB_STAT_TXALL |
540 HME_SEB_STAT_TXPERR |
541 HME_SEB_STAT_RCNTEXP |
542 HME_SEB_STAT_ALL_ERRORS ));
543
544 switch (sc->sc_burst) {
545 default:
546 v = 0;
547 break;
548 case 16:
549 v = HME_SEB_CFG_BURST16;
550 break;
551 case 32:
552 v = HME_SEB_CFG_BURST32;
553 break;
554 case 64:
555 v = HME_SEB_CFG_BURST64;
556 break;
557 }
558 bus_space_write_4(t, seb, HME_SEBI_CFG, v);
559
560 /* step 9. ETX Configuration: use mostly default values */
561
562 /* Enable DMA */
563 v = bus_space_read_4(t, etx, HME_ETXI_CFG);
564 v |= HME_ETX_CFG_DMAENABLE;
565 bus_space_write_4(t, etx, HME_ETXI_CFG, v);
566
567 /* Transmit Descriptor ring size: in increments of 16 */
568 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1);
569
570
571 /* step 10. ERX Configuration */
572 v = bus_space_read_4(t, erx, HME_ERXI_CFG);
573
574 /* Encode Receive Descriptor ring size: four possible values */
575 switch (_HME_NDESC /*XXX*/) {
576 case 32:
577 v |= HME_ERX_CFG_RINGSIZE32;
578 break;
579 case 64:
580 v |= HME_ERX_CFG_RINGSIZE64;
581 break;
582 case 128:
583 v |= HME_ERX_CFG_RINGSIZE128;
584 break;
585 case 256:
586 v |= HME_ERX_CFG_RINGSIZE256;
587 break;
588 default:
589 printf("hme: invalid Receive Descriptor ring size\n");
590 break;
591 }
592
593 /* Enable DMA */
594 v |= HME_ERX_CFG_DMAENABLE;
595 bus_space_write_4(t, erx, HME_ERXI_CFG, v);
596
597 /* step 11. XIF Configuration */
598 v = bus_space_read_4(t, mac, HME_MACI_XIF);
599 v |= HME_MAC_XIF_OE;
600 /* If an external transceiver is connected, enable its MII drivers */
601 if ((bus_space_read_4(t, mif, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
602 v |= HME_MAC_XIF_MIIENABLE;
603 bus_space_write_4(t, mac, HME_MACI_XIF, v);
604
605
606 /* step 12. RX_MAC Configuration Register */
607 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
608 v |= HME_MAC_RXCFG_ENABLE;
609 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
610
611 /* step 13. TX_MAC Configuration Register */
612 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
613 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
614 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
615
616 /* step 14. Issue Transmit Pending command */
617
618 /* Call MI initialization function if any */
619 if (sc->sc_hwinit)
620 (*sc->sc_hwinit)(sc);
621
622 /* Start the one second timer. */
623 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
624
625 ifp->if_flags |= IFF_RUNNING;
626 ifp->if_flags &= ~IFF_OACTIVE;
627 ifp->if_timer = 0;
628 hme_start(ifp);
629 }
630
631 /*
632 * Compare two Ether/802 addresses for equality, inlined and unrolled for
633 * speed.
634 */
635 static __inline__ int
636 ether_cmp(a, b)
637 u_char *a, *b;
638 {
639
640 if (a[5] != b[5] || a[4] != b[4] || a[3] != b[3] ||
641 a[2] != b[2] || a[1] != b[1] || a[0] != b[0])
642 return (0);
643 return (1);
644 }
645
646
647 /*
648 * Routine to copy from mbuf chain to transmit buffer in
649 * network buffer memory.
650 * Returns the amount of data copied.
651 */
652 int
653 hme_put(sc, ri, m)
654 struct hme_softc *sc;
655 int ri; /* Ring index */
656 struct mbuf *m;
657 {
658 struct mbuf *n;
659 int len, tlen = 0;
660 caddr_t bp;
661
662 bp = sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ;
663 for (; m; m = n) {
664 len = m->m_len;
665 if (len == 0) {
666 MFREE(m, n);
667 continue;
668 }
669 bcopy(mtod(m, caddr_t), bp, len);
670 bp += len;
671 tlen += len;
672 MFREE(m, n);
673 }
674 return (tlen);
675 }
676
677 /*
678 * Pull data off an interface.
679 * Len is length of data, with local net header stripped.
680 * We copy the data into mbufs. When full cluster sized units are present
681 * we copy into clusters.
682 */
683 struct mbuf *
684 hme_get(sc, ri, totlen)
685 struct hme_softc *sc;
686 int ri, totlen;
687 {
688 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
689 struct mbuf *m, *m0, *newm;
690 caddr_t bp;
691 int len;
692
693 MGETHDR(m0, M_DONTWAIT, MT_DATA);
694 if (m0 == 0)
695 return (0);
696 m0->m_pkthdr.rcvif = ifp;
697 m0->m_pkthdr.len = totlen;
698 len = MHLEN;
699 m = m0;
700
701 bp = sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ;
702
703 while (totlen > 0) {
704 if (totlen >= MINCLSIZE) {
705 MCLGET(m, M_DONTWAIT);
706 if ((m->m_flags & M_EXT) == 0)
707 goto bad;
708 len = MCLBYTES;
709 }
710
711 if (m == m0) {
712 caddr_t newdata = (caddr_t)
713 ALIGN(m->m_data + sizeof(struct ether_header)) -
714 sizeof(struct ether_header);
715 len -= newdata - m->m_data;
716 m->m_data = newdata;
717 }
718
719 m->m_len = len = min(totlen, len);
720 bcopy(bp, mtod(m, caddr_t), len);
721 bp += len;
722
723 totlen -= len;
724 if (totlen > 0) {
725 MGET(newm, M_DONTWAIT, MT_DATA);
726 if (newm == 0)
727 goto bad;
728 len = MLEN;
729 m = m->m_next = newm;
730 }
731 }
732
733 return (m0);
734
735 bad:
736 m_freem(m0);
737 return (0);
738 }
739
740 /*
741 * Pass a packet to the higher levels.
742 */
743 void
744 hme_read(sc, ix, len)
745 struct hme_softc *sc;
746 int ix, len;
747 {
748 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
749 struct mbuf *m;
750
751 if (len <= sizeof(struct ether_header) ||
752 len > ETHERMTU + sizeof(struct ether_header)) {
753 #ifdef HMEDEBUG
754 printf("%s: invalid packet size %d; dropping\n",
755 sc->sc_dev.dv_xname, len);
756 #endif
757 ifp->if_ierrors++;
758 return;
759 }
760
761 /* Pull packet off interface. */
762 m = hme_get(sc, ix, len);
763 if (m == 0) {
764 ifp->if_ierrors++;
765 return;
766 }
767
768 ifp->if_ipackets++;
769
770 #if NBPFILTER > 0
771 /*
772 * Check if there's a BPF listener on this interface.
773 * If so, hand off the raw packet to BPF.
774 */
775 if (ifp->if_bpf)
776 bpf_mtap(ifp->if_bpf, m);
777 #endif
778
779 /* Pass the packet up. */
780 (*ifp->if_input)(ifp, m);
781 }
782
783 void
784 hme_start(ifp)
785 struct ifnet *ifp;
786 {
787 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
788 caddr_t txd = sc->sc_rb.rb_txd;
789 struct mbuf *m;
790 unsigned int ri, len;
791 unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
792
793 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
794 return;
795
796 ri = sc->sc_rb.rb_tdhead;
797
798 for (;;) {
799 IF_DEQUEUE(&ifp->if_snd, m);
800 if (m == 0)
801 break;
802
803 #if NBPFILTER > 0
804 /*
805 * If BPF is listening on this interface, let it see the
806 * packet before we commit it to the wire.
807 */
808 if (ifp->if_bpf)
809 bpf_mtap(ifp->if_bpf, m);
810 #endif
811
812 /*
813 * Copy the mbuf chain into the transmit buffer.
814 */
815 len = hme_put(sc, ri, m);
816
817 /*
818 * Initialize transmit registers and start transmission
819 */
820 HME_XD_SETFLAGS(sc->sc_pci, txd, ri,
821 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP |
822 HME_XD_ENCODE_TSIZE(len));
823
824 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/
825 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING,
826 HME_ETX_TP_DMAWAKEUP);
827
828 if (++ri == ntbuf)
829 ri = 0;
830
831 if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
832 ifp->if_flags |= IFF_OACTIVE;
833 break;
834 }
835 }
836
837 sc->sc_rb.rb_tdhead = ri;
838 }
839
840 /*
841 * Transmit interrupt.
842 */
843 int
844 hme_tint(sc)
845 struct hme_softc *sc;
846 {
847 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
848 bus_space_tag_t t = sc->sc_bustag;
849 bus_space_handle_t mac = sc->sc_mac;
850 unsigned int ri, txflags;
851
852 /*
853 * Unload collision counters
854 */
855 ifp->if_collisions +=
856 bus_space_read_4(t, mac, HME_MACI_NCCNT) +
857 bus_space_read_4(t, mac, HME_MACI_FCCNT) +
858 bus_space_read_4(t, mac, HME_MACI_EXCNT) +
859 bus_space_read_4(t, mac, HME_MACI_LTCNT);
860
861 /*
862 * then clear the hardware counters.
863 */
864 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0);
865 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0);
866 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0);
867 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0);
868
869 /* Fetch current position in the transmit ring */
870 ri = sc->sc_rb.rb_tdtail;
871
872 for (;;) {
873 if (sc->sc_rb.rb_td_nbusy <= 0)
874 break;
875
876 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
877
878 if (txflags & HME_XD_OWN)
879 break;
880
881 ifp->if_flags &= ~IFF_OACTIVE;
882 ifp->if_opackets++;
883
884 if (++ri == sc->sc_rb.rb_ntbuf)
885 ri = 0;
886
887 --sc->sc_rb.rb_td_nbusy;
888 }
889
890 /* Update ring */
891 sc->sc_rb.rb_tdtail = ri;
892
893 hme_start(ifp);
894
895 if (sc->sc_rb.rb_td_nbusy == 0)
896 ifp->if_timer = 0;
897
898 return (1);
899 }
900
901 /*
902 * Receive interrupt.
903 */
904 int
905 hme_rint(sc)
906 struct hme_softc *sc;
907 {
908 caddr_t xdr = sc->sc_rb.rb_rxd;
909 unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
910 unsigned int ri, len;
911 u_int32_t flags;
912
913 ri = sc->sc_rb.rb_rdtail;
914
915 /*
916 * Process all buffers with valid data.
917 */
918 for (;;) {
919 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
920 if (flags & HME_XD_OWN)
921 break;
922
923 if (flags & HME_XD_OFL) {
924 printf("%s: buffer overflow, ri=%d; flags=0x%x\n",
925 sc->sc_dev.dv_xname, ri, flags);
926 } else {
927 len = HME_XD_DECODE_RSIZE(flags);
928 hme_read(sc, ri, len);
929 }
930
931 /* This buffer can be used by the hardware again */
932 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri,
933 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ));
934
935 if (++ri == nrbuf)
936 ri = 0;
937 }
938
939 sc->sc_rb.rb_rdtail = ri;
940
941 return (1);
942 }
943
944 int
945 hme_eint(sc, status)
946 struct hme_softc *sc;
947 u_int status;
948 {
949 char bits[128];
950
951 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
952 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname);
953 return (1);
954 }
955
956 printf("%s: status=%s\n", sc->sc_dev.dv_xname,
957 bitmask_snprintf(status, HME_SEB_STAT_BITS, bits,sizeof(bits)));
958 return (1);
959 }
960
961 int
962 hme_intr(v)
963 void *v;
964 {
965 struct hme_softc *sc = (struct hme_softc *)v;
966 bus_space_tag_t t = sc->sc_bustag;
967 bus_space_handle_t seb = sc->sc_seb;
968 u_int32_t status;
969 int r = 0;
970
971 status = bus_space_read_4(t, seb, HME_SEBI_STAT);
972
973 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
974 r |= hme_eint(sc, status);
975
976 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
977 r |= hme_tint(sc);
978
979 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
980 r |= hme_rint(sc);
981
982 return (r);
983 }
984
985
986 void
987 hme_watchdog(ifp)
988 struct ifnet *ifp;
989 {
990 struct hme_softc *sc = ifp->if_softc;
991
992 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
993 ++ifp->if_oerrors;
994
995 hme_reset(sc);
996 }
997
998 /*
999 * Initialize the MII Management Interface
1000 */
1001 void
1002 hme_mifinit(sc)
1003 struct hme_softc *sc;
1004 {
1005 bus_space_tag_t t = sc->sc_bustag;
1006 bus_space_handle_t mif = sc->sc_mif;
1007 u_int32_t v;
1008
1009 /* Configure the MIF in frame mode */
1010 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1011 v &= ~HME_MIF_CFG_BBMODE;
1012 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1013 }
1014
1015 /*
1016 * MII interface
1017 */
1018 static int
1019 hme_mii_readreg(self, phy, reg)
1020 struct device *self;
1021 int phy, reg;
1022 {
1023 struct hme_softc *sc = (void *)self;
1024 bus_space_tag_t t = sc->sc_bustag;
1025 bus_space_handle_t mif = sc->sc_mif;
1026 int n;
1027 u_int32_t v;
1028
1029 /* Select the desired PHY in the MIF configuration register */
1030 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1031 /* Clear PHY select bit */
1032 v &= ~HME_MIF_CFG_PHY;
1033 if (phy == HME_PHYAD_EXTERNAL)
1034 /* Set PHY select bit to get at external device */
1035 v |= HME_MIF_CFG_PHY;
1036 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1037
1038 /* Construct the frame command */
1039 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1040 HME_MIF_FO_TAMSB |
1041 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1042 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1043 (reg << HME_MIF_FO_REGAD_SHIFT);
1044
1045 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1046 for (n = 0; n < 100; n++) {
1047 DELAY(1);
1048 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1049 if (v & HME_MIF_FO_TALSB)
1050 return (v & HME_MIF_FO_DATA);
1051 }
1052
1053 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname);
1054 return (0);
1055 }
1056
1057 static void
1058 hme_mii_writereg(self, phy, reg, val)
1059 struct device *self;
1060 int phy, reg, val;
1061 {
1062 struct hme_softc *sc = (void *)self;
1063 bus_space_tag_t t = sc->sc_bustag;
1064 bus_space_handle_t mif = sc->sc_mif;
1065 int n;
1066 u_int32_t v;
1067
1068 /* Select the desired PHY in the MIF configuration register */
1069 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1070 /* Clear PHY select bit */
1071 v &= ~HME_MIF_CFG_PHY;
1072 if (phy == HME_PHYAD_EXTERNAL)
1073 /* Set PHY select bit to get at external device */
1074 v |= HME_MIF_CFG_PHY;
1075 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1076
1077 /* Construct the frame command */
1078 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1079 HME_MIF_FO_TAMSB |
1080 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1081 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1082 (reg << HME_MIF_FO_REGAD_SHIFT) |
1083 (val & HME_MIF_FO_DATA);
1084
1085 bus_space_write_4(t, mif, HME_MIFI_FO, v);
1086 for (n = 0; n < 100; n++) {
1087 DELAY(1);
1088 v = bus_space_read_4(t, mif, HME_MIFI_FO);
1089 if (v & HME_MIF_FO_TALSB)
1090 return;
1091 }
1092
1093 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname);
1094 }
1095
1096 static void
1097 hme_mii_statchg(dev)
1098 struct device *dev;
1099 {
1100 struct hme_softc *sc = (void *)dev;
1101 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media);
1102 int phy = sc->sc_phys[instance];
1103 bus_space_tag_t t = sc->sc_bustag;
1104 bus_space_handle_t mif = sc->sc_mif;
1105 bus_space_handle_t mac = sc->sc_mac;
1106 u_int32_t v;
1107
1108 #ifdef HMEDEBUG
1109 if (sc->sc_debug)
1110 printf("hme_mii_statchg: status change: phy = %d\n", phy);
1111 #endif
1112
1113 /* Select the current PHY in the MIF configuration register */
1114 v = bus_space_read_4(t, mif, HME_MIFI_CFG);
1115 v &= ~HME_MIF_CFG_PHY;
1116 if (phy == HME_PHYAD_EXTERNAL)
1117 v |= HME_MIF_CFG_PHY;
1118 bus_space_write_4(t, mif, HME_MIFI_CFG, v);
1119
1120 /* Set the MAC Full Duplex bit appropriately */
1121 v = bus_space_read_4(t, mac, HME_MACI_TXCFG);
1122 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0)
1123 v |= HME_MAC_TXCFG_FULLDPLX;
1124 else
1125 v &= ~HME_MAC_TXCFG_FULLDPLX;
1126 bus_space_write_4(t, mac, HME_MACI_TXCFG, v);
1127
1128 /* If an external transceiver is selected, enable its MII drivers */
1129 v = bus_space_read_4(t, mac, HME_MACI_XIF);
1130 v &= ~HME_MAC_XIF_MIIENABLE;
1131 if (phy == HME_PHYAD_EXTERNAL)
1132 v |= HME_MAC_XIF_MIIENABLE;
1133 bus_space_write_4(t, mac, HME_MACI_XIF, v);
1134 }
1135
1136 int
1137 hme_mediachange(ifp)
1138 struct ifnet *ifp;
1139 {
1140 struct hme_softc *sc = ifp->if_softc;
1141
1142 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
1143 return (EINVAL);
1144
1145 return (mii_mediachg(&sc->sc_mii));
1146 }
1147
1148 void
1149 hme_mediastatus(ifp, ifmr)
1150 struct ifnet *ifp;
1151 struct ifmediareq *ifmr;
1152 {
1153 struct hme_softc *sc = ifp->if_softc;
1154
1155 if ((ifp->if_flags & IFF_UP) == 0)
1156 return;
1157
1158 mii_pollstat(&sc->sc_mii);
1159 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1160 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1161 }
1162
1163 /*
1164 * Process an ioctl request.
1165 */
1166 int
1167 hme_ioctl(ifp, cmd, data)
1168 struct ifnet *ifp;
1169 u_long cmd;
1170 caddr_t data;
1171 {
1172 struct hme_softc *sc = ifp->if_softc;
1173 struct ifaddr *ifa = (struct ifaddr *)data;
1174 struct ifreq *ifr = (struct ifreq *)data;
1175 int s, error = 0;
1176
1177 s = splnet();
1178
1179 switch (cmd) {
1180
1181 case SIOCSIFADDR:
1182 ifp->if_flags |= IFF_UP;
1183
1184 switch (ifa->ifa_addr->sa_family) {
1185 #ifdef INET
1186 case AF_INET:
1187 hme_init(sc);
1188 arp_ifinit(ifp, ifa);
1189 break;
1190 #endif
1191 #ifdef NS
1192 case AF_NS:
1193 {
1194 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1195
1196 if (ns_nullhost(*ina))
1197 ina->x_host =
1198 *(union ns_host *)LLADDR(ifp->if_sadl);
1199 else {
1200 bcopy(ina->x_host.c_host,
1201 LLADDR(ifp->if_sadl),
1202 sizeof(sc->sc_enaddr));
1203 }
1204 /* Set new address. */
1205 hme_init(sc);
1206 break;
1207 }
1208 #endif
1209 default:
1210 hme_init(sc);
1211 break;
1212 }
1213 break;
1214
1215 case SIOCSIFFLAGS:
1216 if ((ifp->if_flags & IFF_UP) == 0 &&
1217 (ifp->if_flags & IFF_RUNNING) != 0) {
1218 /*
1219 * If interface is marked down and it is running, then
1220 * stop it.
1221 */
1222 hme_stop(sc);
1223 ifp->if_flags &= ~IFF_RUNNING;
1224 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1225 (ifp->if_flags & IFF_RUNNING) == 0) {
1226 /*
1227 * If interface is marked up and it is stopped, then
1228 * start it.
1229 */
1230 hme_init(sc);
1231 } else if ((ifp->if_flags & IFF_UP) != 0) {
1232 /*
1233 * Reset the interface to pick up changes in any other
1234 * flags that affect hardware registers.
1235 */
1236 /*hme_stop(sc);*/
1237 hme_init(sc);
1238 }
1239 #ifdef HMEDEBUG
1240 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1241 #endif
1242 break;
1243
1244 case SIOCADDMULTI:
1245 case SIOCDELMULTI:
1246 error = (cmd == SIOCADDMULTI) ?
1247 ether_addmulti(ifr, &sc->sc_ethercom) :
1248 ether_delmulti(ifr, &sc->sc_ethercom);
1249
1250 if (error == ENETRESET) {
1251 /*
1252 * Multicast list has changed; set the hardware filter
1253 * accordingly.
1254 */
1255 hme_setladrf(sc);
1256 error = 0;
1257 }
1258 break;
1259
1260 case SIOCGIFMEDIA:
1261 case SIOCSIFMEDIA:
1262 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1263 break;
1264
1265 default:
1266 error = EINVAL;
1267 break;
1268 }
1269
1270 splx(s);
1271 return (error);
1272 }
1273
1274 void
1275 hme_shutdown(arg)
1276 void *arg;
1277 {
1278
1279 hme_stop((struct hme_softc *)arg);
1280 }
1281
1282 /*
1283 * Set up the logical address filter.
1284 */
1285 void
1286 hme_setladrf(sc)
1287 struct hme_softc *sc;
1288 {
1289 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1290 struct ether_multi *enm;
1291 struct ether_multistep step;
1292 struct ethercom *ec = &sc->sc_ethercom;
1293 bus_space_tag_t t = sc->sc_bustag;
1294 bus_space_handle_t mac = sc->sc_mac;
1295 u_char *cp;
1296 u_int32_t crc;
1297 u_int32_t hash[4];
1298 u_int32_t v;
1299 int len;
1300
1301 /* Clear hash table */
1302 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1303
1304 /* Get current RX configuration */
1305 v = bus_space_read_4(t, mac, HME_MACI_RXCFG);
1306
1307 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1308 /* Turn on promiscuous mode; turn off the hash filter */
1309 v |= HME_MAC_RXCFG_PMISC;
1310 v &= ~HME_MAC_RXCFG_HENABLE;
1311 ifp->if_flags |= IFF_ALLMULTI;
1312 goto chipit;
1313 }
1314
1315 /* Turn off promiscuous mode; turn on the hash filter */
1316 v &= ~HME_MAC_RXCFG_PMISC;
1317 v |= HME_MAC_RXCFG_HENABLE;
1318
1319 /*
1320 * Set up multicast address filter by passing all multicast addresses
1321 * through a crc generator, and then using the high order 6 bits as an
1322 * index into the 64 bit logical address filter. The high order bit
1323 * selects the word, while the rest of the bits select the bit within
1324 * the word.
1325 */
1326
1327 ETHER_FIRST_MULTI(step, ec, enm);
1328 while (enm != NULL) {
1329 if (ether_cmp(enm->enm_addrlo, enm->enm_addrhi)) {
1330 /*
1331 * We must listen to a range of multicast addresses.
1332 * For now, just accept all multicasts, rather than
1333 * trying to set only those filter bits needed to match
1334 * the range. (At this time, the only use of address
1335 * ranges is for IP multicast routing, for which the
1336 * range is big enough to require all bits set.)
1337 */
1338 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1339 ifp->if_flags |= IFF_ALLMULTI;
1340 goto chipit;
1341 }
1342
1343 cp = enm->enm_addrlo;
1344 crc = 0xffffffff;
1345 for (len = sizeof(enm->enm_addrlo); --len >= 0;) {
1346 int octet = *cp++;
1347 int i;
1348
1349 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
1350 for (i = 0; i < 8; i++) {
1351 if ((crc & 1) ^ (octet & 1)) {
1352 crc >>= 1;
1353 crc ^= MC_POLY_LE;
1354 } else {
1355 crc >>= 1;
1356 }
1357 octet >>= 1;
1358 }
1359 }
1360 /* Just want the 6 most significant bits. */
1361 crc >>= 26;
1362
1363 /* Set the corresponding bit in the filter. */
1364 hash[crc >> 4] |= 1 << (crc & 0xf);
1365
1366 ETHER_NEXT_MULTI(step, enm);
1367 }
1368
1369 ifp->if_flags &= ~IFF_ALLMULTI;
1370
1371 chipit:
1372 /* Now load the hash table into the chip */
1373 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]);
1374 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]);
1375 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]);
1376 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]);
1377 bus_space_write_4(t, mac, HME_MACI_RXCFG, v);
1378 }
1379
1380 /*
1381 * Routines for accessing the transmit and receive buffers.
1382 * The various CPU and adapter configurations supported by this
1383 * driver require three different access methods for buffers
1384 * and descriptors:
1385 * (1) contig (contiguous data; no padding),
1386 * (2) gap2 (two bytes of data followed by two bytes of padding),
1387 * (3) gap16 (16 bytes of data followed by 16 bytes of padding).
1388 */
1389
1390 #if 0
1391 /*
1392 * contig: contiguous data with no padding.
1393 *
1394 * Buffers may have any alignment.
1395 */
1396
1397 void
1398 hme_copytobuf_contig(sc, from, ri, len)
1399 struct hme_softc *sc;
1400 void *from;
1401 int ri, len;
1402 {
1403 volatile caddr_t buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ);
1404
1405 /*
1406 * Just call bcopy() to do the work.
1407 */
1408 bcopy(from, buf, len);
1409 }
1410
1411 void
1412 hme_copyfrombuf_contig(sc, to, boff, len)
1413 struct hme_softc *sc;
1414 void *to;
1415 int boff, len;
1416 {
1417 volatile caddr_t buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ);
1418
1419 /*
1420 * Just call bcopy() to do the work.
1421 */
1422 bcopy(buf, to, len);
1423 }
1424 #endif
1425