if_bge.c revision 1.69 1 /* $NetBSD: if_bge.c,v 1.69 2004/04/10 19:23:49 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.69 2004/04/10 19:23:49 thorpej Exp $");
83
84 #include "bpfilter.h"
85 #include "vlan.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/device.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
97
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_media.h>
101 #include <net/if_ether.h>
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #endif
109
110 #if NBPFILTER > 0
111 #include <net/bpf.h>
112 #endif
113
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122
123 #include <dev/pci/if_bgereg.h>
124
125 #include <uvm/uvm_extern.h>
126
127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
128
129
130 /*
131 * Tunable thresholds for rx-side bge interrupt mitigation.
132 */
133
134 /*
135 * The pairs of values below were obtained from empirical measurement
136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
137 * interrupt for every N packets received, where N is, approximately,
138 * the second value (rx_max_bds) in each pair. The values are chosen
139 * such that moving from one pair to the succeeding pair was observed
140 * to roughly halve interrupt rate under sustained input packet load.
141 * The values were empirically chosen to avoid overflowing internal
142 * limits on the bcm5700: inreasing rx_ticks much beyond 600
143 * results in internal wrapping and higher interrupt rates.
144 * The limit of 46 frames was chosen to match NFS workloads.
145 *
146 * These values also work well on bcm5701, bcm5704C, and (less
147 * tested) bcm5703. On other chipsets, (including the Altima chip
148 * family), the larger values may overflow internal chip limits,
149 * leading to increasing interrupt rates rather than lower interrupt
150 * rates.
151 *
152 * Applications using heavy interrupt mitigation (interrupting every
153 * 32 or 46 frames) in both directions may need to increase the TCP
154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
155 * full link bandwidth, due to ACKs and window updates lingering
156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
157 */
158 struct bge_load_rx_thresh {
159 int rx_ticks;
160 int rx_max_bds; }
161 bge_rx_threshes[] = {
162 { 32, 2 },
163 { 50, 4 },
164 { 100, 8 },
165 { 192, 16 },
166 { 416, 32 },
167 { 598, 46 }
168 };
169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
170
171 /* XXX patchable; should be sysctl'able */
172 static int bge_auto_thresh = 1;
173 static int bge_rx_thresh_lvl;
174
175 #ifdef __NetBSD__
176 static int bge_rxthresh_nodenum;
177 #endif /* __NetBSD__ */
178
179 int bge_probe(struct device *, struct cfdata *, void *);
180 void bge_attach(struct device *, struct device *, void *);
181 void bge_release_resources(struct bge_softc *);
182 void bge_txeof(struct bge_softc *);
183 void bge_rxeof(struct bge_softc *);
184
185 void bge_tick(void *);
186 void bge_stats_update(struct bge_softc *);
187 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
188 static __inline int bge_cksum_pad(struct mbuf *pkt);
189 static __inline int bge_compact_dma_runt(struct mbuf *pkt);
190
191 int bge_intr(void *);
192 void bge_start(struct ifnet *);
193 int bge_ioctl(struct ifnet *, u_long, caddr_t);
194 int bge_init(struct ifnet *);
195 void bge_stop(struct bge_softc *);
196 void bge_watchdog(struct ifnet *);
197 void bge_shutdown(void *);
198 int bge_ifmedia_upd(struct ifnet *);
199 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
200
201 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
202 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
203
204 void bge_setmulti(struct bge_softc *);
205
206 void bge_handle_events(struct bge_softc *);
207 int bge_alloc_jumbo_mem(struct bge_softc *);
208 void bge_free_jumbo_mem(struct bge_softc *);
209 void *bge_jalloc(struct bge_softc *);
210 void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
211 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
212 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
213 int bge_init_rx_ring_std(struct bge_softc *);
214 void bge_free_rx_ring_std(struct bge_softc *);
215 int bge_init_rx_ring_jumbo(struct bge_softc *);
216 void bge_free_rx_ring_jumbo(struct bge_softc *);
217 void bge_free_tx_ring(struct bge_softc *);
218 int bge_init_tx_ring(struct bge_softc *);
219
220 int bge_chipinit(struct bge_softc *);
221 int bge_blockinit(struct bge_softc *);
222 int bge_setpowerstate(struct bge_softc *, int);
223
224 #ifdef notdef
225 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
226 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
227 void bge_vpd_read(struct bge_softc *);
228 #endif
229
230 u_int32_t bge_readmem_ind(struct bge_softc *, int);
231 void bge_writemem_ind(struct bge_softc *, int, int);
232 #ifdef notdef
233 u_int32_t bge_readreg_ind(struct bge_softc *, int);
234 #endif
235 void bge_writereg_ind(struct bge_softc *, int, int);
236
237 int bge_miibus_readreg(struct device *, int, int);
238 void bge_miibus_writereg(struct device *, int, int, int);
239 void bge_miibus_statchg(struct device *);
240
241 void bge_reset(struct bge_softc *);
242
243 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/);
244 void bge_update_all_threshes(int /*lvl*/);
245
246 void bge_dump_status(struct bge_softc *);
247 void bge_dump_rxbd(struct bge_rx_bd *);
248
249 #define BGE_DEBUG
250 #ifdef BGE_DEBUG
251 #define DPRINTF(x) if (bgedebug) printf x
252 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
253 int bgedebug = 0;
254 #else
255 #define DPRINTF(x)
256 #define DPRINTFN(n,x)
257 #endif
258
259 /* Various chip quirks. */
260 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001
261 #define BGE_QUIRK_CSUM_BROKEN 0x00000002
262 #define BGE_QUIRK_ONLY_PHY_1 0x00000004
263 #define BGE_QUIRK_5700_SMALLDMA 0x00000008
264 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010
265 #define BGE_QUIRK_PRODUCER_BUG 0x00000020
266 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040
267 #define BGE_QUIRK_5705_CORE 0x00000080
268 #define BGE_QUIRK_FEWER_MBUFS 0x00000100
269
270 /* following bugs are common to bcm5700 rev B, all flavours */
271 #define BGE_QUIRK_5700_COMMON \
272 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
273
274 CFATTACH_DECL(bge, sizeof(struct bge_softc),
275 bge_probe, bge_attach, NULL, NULL);
276
277 u_int32_t
278 bge_readmem_ind(sc, off)
279 struct bge_softc *sc;
280 int off;
281 {
282 struct pci_attach_args *pa = &(sc->bge_pa);
283 pcireg_t val;
284
285 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
286 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
287 return val;
288 }
289
290 void
291 bge_writemem_ind(sc, off, val)
292 struct bge_softc *sc;
293 int off, val;
294 {
295 struct pci_attach_args *pa = &(sc->bge_pa);
296
297 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
298 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
299 }
300
301 #ifdef notdef
302 u_int32_t
303 bge_readreg_ind(sc, off)
304 struct bge_softc *sc;
305 int off;
306 {
307 struct pci_attach_args *pa = &(sc->bge_pa);
308
309 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
310 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
311 }
312 #endif
313
314 void
315 bge_writereg_ind(sc, off, val)
316 struct bge_softc *sc;
317 int off, val;
318 {
319 struct pci_attach_args *pa = &(sc->bge_pa);
320
321 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
322 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
323 }
324
325 #ifdef notdef
326 u_int8_t
327 bge_vpd_readbyte(sc, addr)
328 struct bge_softc *sc;
329 int addr;
330 {
331 int i;
332 u_int32_t val;
333 struct pci_attach_args *pa = &(sc->bge_pa);
334
335 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
336 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
337 DELAY(10);
338 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
339 BGE_VPD_FLAG)
340 break;
341 }
342
343 if (i == BGE_TIMEOUT) {
344 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
345 return(0);
346 }
347
348 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
349
350 return((val >> ((addr % 4) * 8)) & 0xFF);
351 }
352
353 void
354 bge_vpd_read_res(sc, res, addr)
355 struct bge_softc *sc;
356 struct vpd_res *res;
357 int addr;
358 {
359 int i;
360 u_int8_t *ptr;
361
362 ptr = (u_int8_t *)res;
363 for (i = 0; i < sizeof(struct vpd_res); i++)
364 ptr[i] = bge_vpd_readbyte(sc, i + addr);
365 }
366
367 void
368 bge_vpd_read(sc)
369 struct bge_softc *sc;
370 {
371 int pos = 0, i;
372 struct vpd_res res;
373
374 if (sc->bge_vpd_prodname != NULL)
375 free(sc->bge_vpd_prodname, M_DEVBUF);
376 if (sc->bge_vpd_readonly != NULL)
377 free(sc->bge_vpd_readonly, M_DEVBUF);
378 sc->bge_vpd_prodname = NULL;
379 sc->bge_vpd_readonly = NULL;
380
381 bge_vpd_read_res(sc, &res, pos);
382
383 if (res.vr_id != VPD_RES_ID) {
384 printf("%s: bad VPD resource id: expected %x got %x\n",
385 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
386 return;
387 }
388
389 pos += sizeof(res);
390 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
391 if (sc->bge_vpd_prodname == NULL)
392 panic("bge_vpd_read");
393 for (i = 0; i < res.vr_len; i++)
394 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
395 sc->bge_vpd_prodname[i] = '\0';
396 pos += i;
397
398 bge_vpd_read_res(sc, &res, pos);
399
400 if (res.vr_id != VPD_RES_READ) {
401 printf("%s: bad VPD resource id: expected %x got %x\n",
402 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
403 return;
404 }
405
406 pos += sizeof(res);
407 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
408 if (sc->bge_vpd_readonly == NULL)
409 panic("bge_vpd_read");
410 for (i = 0; i < res.vr_len + 1; i++)
411 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
412 }
413 #endif
414
415 /*
416 * Read a byte of data stored in the EEPROM at address 'addr.' The
417 * BCM570x supports both the traditional bitbang interface and an
418 * auto access interface for reading the EEPROM. We use the auto
419 * access method.
420 */
421 u_int8_t
422 bge_eeprom_getbyte(sc, addr, dest)
423 struct bge_softc *sc;
424 int addr;
425 u_int8_t *dest;
426 {
427 int i;
428 u_int32_t byte = 0;
429
430 /*
431 * Enable use of auto EEPROM access so we can avoid
432 * having to use the bitbang method.
433 */
434 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
435
436 /* Reset the EEPROM, load the clock period. */
437 CSR_WRITE_4(sc, BGE_EE_ADDR,
438 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
439 DELAY(20);
440
441 /* Issue the read EEPROM command. */
442 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
443
444 /* Wait for completion */
445 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
446 DELAY(10);
447 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
448 break;
449 }
450
451 if (i == BGE_TIMEOUT) {
452 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
453 return(0);
454 }
455
456 /* Get result. */
457 byte = CSR_READ_4(sc, BGE_EE_DATA);
458
459 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
460
461 return(0);
462 }
463
464 /*
465 * Read a sequence of bytes from the EEPROM.
466 */
467 int
468 bge_read_eeprom(sc, dest, off, cnt)
469 struct bge_softc *sc;
470 caddr_t dest;
471 int off;
472 int cnt;
473 {
474 int err = 0, i;
475 u_int8_t byte = 0;
476
477 for (i = 0; i < cnt; i++) {
478 err = bge_eeprom_getbyte(sc, off + i, &byte);
479 if (err)
480 break;
481 *(dest + i) = byte;
482 }
483
484 return(err ? 1 : 0);
485 }
486
487 int
488 bge_miibus_readreg(dev, phy, reg)
489 struct device *dev;
490 int phy, reg;
491 {
492 struct bge_softc *sc = (struct bge_softc *)dev;
493 u_int32_t val;
494 u_int32_t saved_autopoll;
495 int i;
496
497 /*
498 * Several chips with builtin PHYs will incorrectly answer to
499 * other PHY instances than the builtin PHY at id 1.
500 */
501 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
502 return(0);
503
504 /* Reading with autopolling on may trigger PCI errors */
505 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
506 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
507 CSR_WRITE_4(sc, BGE_MI_MODE,
508 saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
509 DELAY(40);
510 }
511
512 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
513 BGE_MIPHY(phy)|BGE_MIREG(reg));
514
515 for (i = 0; i < BGE_TIMEOUT; i++) {
516 val = CSR_READ_4(sc, BGE_MI_COMM);
517 if (!(val & BGE_MICOMM_BUSY))
518 break;
519 delay(10);
520 }
521
522 if (i == BGE_TIMEOUT) {
523 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
524 val = 0;
525 goto done;
526 }
527
528 val = CSR_READ_4(sc, BGE_MI_COMM);
529
530 done:
531 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
532 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
533 DELAY(40);
534 }
535
536 if (val & BGE_MICOMM_READFAIL)
537 return(0);
538
539 return(val & 0xFFFF);
540 }
541
542 void
543 bge_miibus_writereg(dev, phy, reg, val)
544 struct device *dev;
545 int phy, reg, val;
546 {
547 struct bge_softc *sc = (struct bge_softc *)dev;
548 u_int32_t saved_autopoll;
549 int i;
550
551 /* Touching the PHY while autopolling is on may trigger PCI errors */
552 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
553 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
554 delay(40);
555 CSR_WRITE_4(sc, BGE_MI_MODE,
556 saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
557 delay(10); /* 40 usec is supposed to be adequate */
558 }
559
560 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
561 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
562
563 for (i = 0; i < BGE_TIMEOUT; i++) {
564 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
565 break;
566 delay(10);
567 }
568
569 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
570 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
571 delay(40);
572 }
573
574 if (i == BGE_TIMEOUT) {
575 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
576 }
577 }
578
579 void
580 bge_miibus_statchg(dev)
581 struct device *dev;
582 {
583 struct bge_softc *sc = (struct bge_softc *)dev;
584 struct mii_data *mii = &sc->bge_mii;
585
586 /*
587 * Get flow control negotiation result.
588 */
589 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
590 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
591 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
592 mii->mii_media_active &= ~IFM_ETH_FMASK;
593 }
594
595 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
596 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
597 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
598 } else {
599 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
600 }
601
602 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
603 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
604 } else {
605 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
606 }
607
608 /*
609 * 802.3x flow control
610 */
611 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) {
612 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
613 } else {
614 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
615 }
616 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) {
617 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
618 } else {
619 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
620 }
621 }
622
623 /*
624 * Update rx threshold levels to values in a particular slot
625 * of the interrupt-mitigation table bge_rx_threshes.
626 */
627 void
628 bge_set_thresh(struct ifnet *ifp, int lvl)
629 {
630 struct bge_softc *sc = ifp->if_softc;
631 int s;
632
633 /* For now, just save the new Rx-intr thresholds and record
634 * that a threshold update is pending. Updating the hardware
635 * registers here (even at splhigh()) is observed to
636 * occasionaly cause glitches where Rx-interrupts are not
637 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
638 */
639 s = splnet();
640 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
641 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
642 sc->bge_pending_rxintr_change = 1;
643 splx(s);
644
645 return;
646 }
647
648
649 /*
650 * Update Rx thresholds of all bge devices
651 */
652 void
653 bge_update_all_threshes(int lvl)
654 {
655 struct ifnet *ifp;
656 const char * const namebuf = "bge";
657 int namelen;
658
659 if (lvl < 0)
660 lvl = 0;
661 else if( lvl >= NBGE_RX_THRESH)
662 lvl = NBGE_RX_THRESH - 1;
663
664 namelen = strlen(namebuf);
665 /*
666 * Now search all the interfaces for this name/number
667 */
668 TAILQ_FOREACH(ifp, &ifnet, if_list) {
669 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
670 continue;
671 /* We got a match: update if doing auto-threshold-tuning */
672 if (bge_auto_thresh)
673 bge_set_thresh(ifp, lvl);
674 }
675 }
676
677 /*
678 * Handle events that have triggered interrupts.
679 */
680 void
681 bge_handle_events(sc)
682 struct bge_softc *sc;
683 {
684
685 return;
686 }
687
688 /*
689 * Memory management for jumbo frames.
690 */
691
692 int
693 bge_alloc_jumbo_mem(sc)
694 struct bge_softc *sc;
695 {
696 caddr_t ptr, kva;
697 bus_dma_segment_t seg;
698 int i, rseg, state, error;
699 struct bge_jpool_entry *entry;
700
701 state = error = 0;
702
703 /* Grab a big chunk o' storage. */
704 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
705 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
706 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
707 return ENOBUFS;
708 }
709
710 state = 1;
711 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
712 BUS_DMA_NOWAIT)) {
713 printf("%s: can't map DMA buffers (%d bytes)\n",
714 sc->bge_dev.dv_xname, (int)BGE_JMEM);
715 error = ENOBUFS;
716 goto out;
717 }
718
719 state = 2;
720 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
721 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
722 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname);
723 error = ENOBUFS;
724 goto out;
725 }
726
727 state = 3;
728 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
729 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
730 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname);
731 error = ENOBUFS;
732 goto out;
733 }
734
735 state = 4;
736 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
737 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
738
739 SLIST_INIT(&sc->bge_jfree_listhead);
740 SLIST_INIT(&sc->bge_jinuse_listhead);
741
742 /*
743 * Now divide it up into 9K pieces and save the addresses
744 * in an array.
745 */
746 ptr = sc->bge_cdata.bge_jumbo_buf;
747 for (i = 0; i < BGE_JSLOTS; i++) {
748 sc->bge_cdata.bge_jslots[i] = ptr;
749 ptr += BGE_JLEN;
750 entry = malloc(sizeof(struct bge_jpool_entry),
751 M_DEVBUF, M_NOWAIT);
752 if (entry == NULL) {
753 printf("%s: no memory for jumbo buffer queue!\n",
754 sc->bge_dev.dv_xname);
755 error = ENOBUFS;
756 goto out;
757 }
758 entry->slot = i;
759 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
760 entry, jpool_entries);
761 }
762 out:
763 if (error != 0) {
764 switch (state) {
765 case 4:
766 bus_dmamap_unload(sc->bge_dmatag,
767 sc->bge_cdata.bge_rx_jumbo_map);
768 case 3:
769 bus_dmamap_destroy(sc->bge_dmatag,
770 sc->bge_cdata.bge_rx_jumbo_map);
771 case 2:
772 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
773 case 1:
774 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
775 break;
776 default:
777 break;
778 }
779 }
780
781 return error;
782 }
783
784 /*
785 * Allocate a jumbo buffer.
786 */
787 void *
788 bge_jalloc(sc)
789 struct bge_softc *sc;
790 {
791 struct bge_jpool_entry *entry;
792
793 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
794
795 if (entry == NULL) {
796 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
797 return(NULL);
798 }
799
800 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
801 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
802 return(sc->bge_cdata.bge_jslots[entry->slot]);
803 }
804
805 /*
806 * Release a jumbo buffer.
807 */
808 void
809 bge_jfree(m, buf, size, arg)
810 struct mbuf *m;
811 caddr_t buf;
812 size_t size;
813 void *arg;
814 {
815 struct bge_jpool_entry *entry;
816 struct bge_softc *sc;
817 int i, s;
818
819 /* Extract the softc struct pointer. */
820 sc = (struct bge_softc *)arg;
821
822 if (sc == NULL)
823 panic("bge_jfree: can't find softc pointer!");
824
825 /* calculate the slot this buffer belongs to */
826
827 i = ((caddr_t)buf
828 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
829
830 if ((i < 0) || (i >= BGE_JSLOTS))
831 panic("bge_jfree: asked to free buffer that we don't manage!");
832
833 s = splvm();
834 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
835 if (entry == NULL)
836 panic("bge_jfree: buffer not in use!");
837 entry->slot = i;
838 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
839 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
840
841 if (__predict_true(m != NULL))
842 pool_cache_put(&mbpool_cache, m);
843 splx(s);
844 }
845
846
847 /*
848 * Intialize a standard receive ring descriptor.
849 */
850 int
851 bge_newbuf_std(sc, i, m, dmamap)
852 struct bge_softc *sc;
853 int i;
854 struct mbuf *m;
855 bus_dmamap_t dmamap;
856 {
857 struct mbuf *m_new = NULL;
858 struct bge_rx_bd *r;
859 int error;
860
861 if (dmamap == NULL) {
862 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
863 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
864 if (error != 0)
865 return error;
866 }
867
868 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
869
870 if (m == NULL) {
871 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
872 if (m_new == NULL) {
873 return(ENOBUFS);
874 }
875
876 MCLGET(m_new, M_DONTWAIT);
877 if (!(m_new->m_flags & M_EXT)) {
878 m_freem(m_new);
879 return(ENOBUFS);
880 }
881 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
882 if (!sc->bge_rx_alignment_bug)
883 m_adj(m_new, ETHER_ALIGN);
884
885 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
886 BUS_DMA_READ|BUS_DMA_NOWAIT))
887 return(ENOBUFS);
888 } else {
889 m_new = m;
890 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
891 m_new->m_data = m_new->m_ext.ext_buf;
892 if (!sc->bge_rx_alignment_bug)
893 m_adj(m_new, ETHER_ALIGN);
894 }
895
896 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
897 r = &sc->bge_rdata->bge_rx_std_ring[i];
898 bge_set_hostaddr(&r->bge_addr,
899 dmamap->dm_segs[0].ds_addr);
900 r->bge_flags = BGE_RXBDFLAG_END;
901 r->bge_len = m_new->m_len;
902 r->bge_idx = i;
903
904 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
905 offsetof(struct bge_ring_data, bge_rx_std_ring) +
906 i * sizeof (struct bge_rx_bd),
907 sizeof (struct bge_rx_bd),
908 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
909
910 return(0);
911 }
912
913 /*
914 * Initialize a jumbo receive ring descriptor. This allocates
915 * a jumbo buffer from the pool managed internally by the driver.
916 */
917 int
918 bge_newbuf_jumbo(sc, i, m)
919 struct bge_softc *sc;
920 int i;
921 struct mbuf *m;
922 {
923 struct mbuf *m_new = NULL;
924 struct bge_rx_bd *r;
925
926 if (m == NULL) {
927 caddr_t *buf = NULL;
928
929 /* Allocate the mbuf. */
930 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
931 if (m_new == NULL) {
932 return(ENOBUFS);
933 }
934
935 /* Allocate the jumbo buffer */
936 buf = bge_jalloc(sc);
937 if (buf == NULL) {
938 m_freem(m_new);
939 printf("%s: jumbo allocation failed "
940 "-- packet dropped!\n", sc->bge_dev.dv_xname);
941 return(ENOBUFS);
942 }
943
944 /* Attach the buffer to the mbuf. */
945 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
946 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
947 bge_jfree, sc);
948 } else {
949 m_new = m;
950 m_new->m_data = m_new->m_ext.ext_buf;
951 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
952 }
953
954 if (!sc->bge_rx_alignment_bug)
955 m_adj(m_new, ETHER_ALIGN);
956 /* Set up the descriptor. */
957 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
958 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
959 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
960 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
961 r->bge_len = m_new->m_len;
962 r->bge_idx = i;
963
964 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
965 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
966 i * sizeof (struct bge_rx_bd),
967 sizeof (struct bge_rx_bd),
968 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
969
970 return(0);
971 }
972
973 /*
974 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
975 * that's 1MB or memory, which is a lot. For now, we fill only the first
976 * 256 ring entries and hope that our CPU is fast enough to keep up with
977 * the NIC.
978 */
979 int
980 bge_init_rx_ring_std(sc)
981 struct bge_softc *sc;
982 {
983 int i;
984
985 if (sc->bge_flags & BGE_RXRING_VALID)
986 return 0;
987
988 for (i = 0; i < BGE_SSLOTS; i++) {
989 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
990 return(ENOBUFS);
991 }
992
993 sc->bge_std = i - 1;
994 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
995
996 sc->bge_flags |= BGE_RXRING_VALID;
997
998 return(0);
999 }
1000
1001 void
1002 bge_free_rx_ring_std(sc)
1003 struct bge_softc *sc;
1004 {
1005 int i;
1006
1007 if (!(sc->bge_flags & BGE_RXRING_VALID))
1008 return;
1009
1010 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1011 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1012 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1013 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1014 bus_dmamap_destroy(sc->bge_dmatag,
1015 sc->bge_cdata.bge_rx_std_map[i]);
1016 }
1017 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1018 sizeof(struct bge_rx_bd));
1019 }
1020
1021 sc->bge_flags &= ~BGE_RXRING_VALID;
1022 }
1023
1024 int
1025 bge_init_rx_ring_jumbo(sc)
1026 struct bge_softc *sc;
1027 {
1028 int i;
1029 volatile struct bge_rcb *rcb;
1030
1031 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1032 return 0;
1033
1034 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1035 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1036 return(ENOBUFS);
1037 };
1038
1039 sc->bge_jumbo = i - 1;
1040 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1041
1042 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1043 rcb->bge_maxlen_flags = 0;
1044 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1045
1046 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1047
1048 return(0);
1049 }
1050
1051 void
1052 bge_free_rx_ring_jumbo(sc)
1053 struct bge_softc *sc;
1054 {
1055 int i;
1056
1057 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1058 return;
1059
1060 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1061 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1062 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1063 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1064 }
1065 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1066 sizeof(struct bge_rx_bd));
1067 }
1068
1069 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1070 }
1071
1072 void
1073 bge_free_tx_ring(sc)
1074 struct bge_softc *sc;
1075 {
1076 int i, freed;
1077 struct txdmamap_pool_entry *dma;
1078
1079 if (!(sc->bge_flags & BGE_TXRING_VALID))
1080 return;
1081
1082 freed = 0;
1083
1084 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1085 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1086 freed++;
1087 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1088 sc->bge_cdata.bge_tx_chain[i] = NULL;
1089 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1090 link);
1091 sc->txdma[i] = 0;
1092 }
1093 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1094 sizeof(struct bge_tx_bd));
1095 }
1096
1097 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1098 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1099 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1100 free(dma, M_DEVBUF);
1101 }
1102
1103 sc->bge_flags &= ~BGE_TXRING_VALID;
1104 }
1105
1106 int
1107 bge_init_tx_ring(sc)
1108 struct bge_softc *sc;
1109 {
1110 int i;
1111 bus_dmamap_t dmamap;
1112 struct txdmamap_pool_entry *dma;
1113
1114 if (sc->bge_flags & BGE_TXRING_VALID)
1115 return 0;
1116
1117 sc->bge_txcnt = 0;
1118 sc->bge_tx_saved_considx = 0;
1119 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1120 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1121 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1122
1123 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1124 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1125 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1126
1127 SLIST_INIT(&sc->txdma_list);
1128 for (i = 0; i < BGE_RSLOTS; i++) {
1129 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
1130 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1131 &dmamap))
1132 return(ENOBUFS);
1133 if (dmamap == NULL)
1134 panic("dmamap NULL in bge_init_tx_ring");
1135 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1136 if (dma == NULL) {
1137 printf("%s: can't alloc txdmamap_pool_entry\n",
1138 sc->bge_dev.dv_xname);
1139 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1140 return (ENOMEM);
1141 }
1142 dma->dmamap = dmamap;
1143 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1144 }
1145
1146 sc->bge_flags |= BGE_TXRING_VALID;
1147
1148 return(0);
1149 }
1150
1151 void
1152 bge_setmulti(sc)
1153 struct bge_softc *sc;
1154 {
1155 struct ethercom *ac = &sc->ethercom;
1156 struct ifnet *ifp = &ac->ec_if;
1157 struct ether_multi *enm;
1158 struct ether_multistep step;
1159 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1160 u_int32_t h;
1161 int i;
1162
1163 if (ifp->if_flags & IFF_PROMISC)
1164 goto allmulti;
1165
1166 /* Now program new ones. */
1167 ETHER_FIRST_MULTI(step, ac, enm);
1168 while (enm != NULL) {
1169 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1170 /*
1171 * We must listen to a range of multicast addresses.
1172 * For now, just accept all multicasts, rather than
1173 * trying to set only those filter bits needed to match
1174 * the range. (At this time, the only use of address
1175 * ranges is for IP multicast routing, for which the
1176 * range is big enough to require all bits set.)
1177 */
1178 goto allmulti;
1179 }
1180
1181 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1182
1183 /* Just want the 7 least-significant bits. */
1184 h &= 0x7f;
1185
1186 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1187 ETHER_NEXT_MULTI(step, enm);
1188 }
1189
1190 ifp->if_flags &= ~IFF_ALLMULTI;
1191 goto setit;
1192
1193 allmulti:
1194 ifp->if_flags |= IFF_ALLMULTI;
1195 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1196
1197 setit:
1198 for (i = 0; i < 4; i++)
1199 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1200 }
1201
1202 const int bge_swapbits[] = {
1203 0,
1204 BGE_MODECTL_BYTESWAP_DATA,
1205 BGE_MODECTL_WORDSWAP_DATA,
1206 BGE_MODECTL_BYTESWAP_NONFRAME,
1207 BGE_MODECTL_WORDSWAP_NONFRAME,
1208
1209 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1210 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1211 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1212
1213 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1214 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1215
1216 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1217
1218 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1219 BGE_MODECTL_BYTESWAP_NONFRAME,
1220 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1221 BGE_MODECTL_WORDSWAP_NONFRAME,
1222 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1223 BGE_MODECTL_WORDSWAP_NONFRAME,
1224 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1225 BGE_MODECTL_WORDSWAP_NONFRAME,
1226
1227 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1228 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1229 };
1230
1231 int bge_swapindex = 0;
1232
1233 /*
1234 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1235 * self-test results.
1236 */
1237 int
1238 bge_chipinit(sc)
1239 struct bge_softc *sc;
1240 {
1241 u_int32_t cachesize;
1242 int i;
1243 u_int32_t dma_rw_ctl;
1244 struct pci_attach_args *pa = &(sc->bge_pa);
1245
1246
1247 /* Set endianness before we access any non-PCI registers. */
1248 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1249 BGE_INIT);
1250
1251 /* Set power state to D0. */
1252 bge_setpowerstate(sc, 0);
1253
1254 /*
1255 * Check the 'ROM failed' bit on the RX CPU to see if
1256 * self-tests passed.
1257 */
1258 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1259 printf("%s: RX CPU self-diagnostics failed!\n",
1260 sc->bge_dev.dv_xname);
1261 return(ENODEV);
1262 }
1263
1264 /* Clear the MAC control register */
1265 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1266
1267 /*
1268 * Clear the MAC statistics block in the NIC's
1269 * internal memory.
1270 */
1271 for (i = BGE_STATS_BLOCK;
1272 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1273 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1274
1275 for (i = BGE_STATUS_BLOCK;
1276 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1277 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1278
1279 /* Set up the PCI DMA control register. */
1280 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
1281 BGE_PCISTATE_PCI_BUSMODE) {
1282 /* Conventional PCI bus */
1283 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname));
1284 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1285 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1286 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1287 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1288 dma_rw_ctl |= 0x0F;
1289 }
1290 } else {
1291 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname));
1292 /* PCI-X bus */
1293 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1294 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1295 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1296 (0x0F);
1297 /*
1298 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1299 * for hardware bugs, which means we should also clear
1300 * the low-order MINDMA bits. In addition, the 5704
1301 * uses a different encoding of read/write watermarks.
1302 */
1303 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1304 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1305 /* should be 0x1f0000 */
1306 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1307 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1308 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1309 }
1310 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
1311 dma_rw_ctl &= 0xfffffff0;
1312 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1313 }
1314 }
1315
1316 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1317
1318 /*
1319 * Set up general mode register.
1320 */
1321 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1322 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1323 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1324
1325 /* Get cache line size. */
1326 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1327
1328 /*
1329 * Avoid violating PCI spec on certain chip revs.
1330 */
1331 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1332 PCIM_CMD_MWIEN) {
1333 switch(cachesize) {
1334 case 1:
1335 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1336 BGE_PCI_WRITE_BNDRY_16BYTES);
1337 break;
1338 case 2:
1339 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1340 BGE_PCI_WRITE_BNDRY_32BYTES);
1341 break;
1342 case 4:
1343 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1344 BGE_PCI_WRITE_BNDRY_64BYTES);
1345 break;
1346 case 8:
1347 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1348 BGE_PCI_WRITE_BNDRY_128BYTES);
1349 break;
1350 case 16:
1351 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1352 BGE_PCI_WRITE_BNDRY_256BYTES);
1353 break;
1354 case 32:
1355 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1356 BGE_PCI_WRITE_BNDRY_512BYTES);
1357 break;
1358 case 64:
1359 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1360 BGE_PCI_WRITE_BNDRY_1024BYTES);
1361 break;
1362 default:
1363 /* Disable PCI memory write and invalidate. */
1364 #if 0
1365 if (bootverbose)
1366 printf("%s: cache line size %d not "
1367 "supported; disabling PCI MWI\n",
1368 sc->bge_dev.dv_xname, cachesize);
1369 #endif
1370 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1371 PCIM_CMD_MWIEN);
1372 break;
1373 }
1374 }
1375
1376 /*
1377 * Disable memory write invalidate. Apparently it is not supported
1378 * properly by these devices.
1379 */
1380 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
1381
1382
1383 #ifdef __brokenalpha__
1384 /*
1385 * Must insure that we do not cross an 8K (bytes) boundary
1386 * for DMA reads. Our highest limit is 1K bytes. This is a
1387 * restriction on some ALPHA platforms with early revision
1388 * 21174 PCI chipsets, such as the AlphaPC 164lx
1389 */
1390 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1391 #endif
1392
1393 /* Set the timer prescaler (always 66MHz) */
1394 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1395
1396 return(0);
1397 }
1398
1399 int
1400 bge_blockinit(sc)
1401 struct bge_softc *sc;
1402 {
1403 volatile struct bge_rcb *rcb;
1404 bus_size_t rcb_addr;
1405 int i;
1406 struct ifnet *ifp = &sc->ethercom.ec_if;
1407 bge_hostaddr taddr;
1408
1409 /*
1410 * Initialize the memory window pointer register so that
1411 * we can access the first 32K of internal NIC RAM. This will
1412 * allow us to set up the TX send ring RCBs and the RX return
1413 * ring RCBs, plus other things which live in NIC memory.
1414 */
1415
1416 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1417 BGE_PCI_MEMWIN_BASEADDR, 0);
1418
1419 /* Configure mbuf memory pool */
1420 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1421 if (sc->bge_extram) {
1422 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1423 BGE_EXT_SSRAM);
1424 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1425 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1426 else
1427 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1428 } else {
1429 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1430 BGE_BUFFPOOL_1);
1431 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1432 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1433 else
1434 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1435 }
1436
1437 /* Configure DMA resource pool */
1438 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1439 BGE_DMA_DESCRIPTORS);
1440 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1441 }
1442
1443 /* Configure mbuf pool watermarks */
1444 #ifdef ORIG_WPAUL_VALUES
1445 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1446 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1447 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1448 #else
1449 /* new broadcom docs strongly recommend these: */
1450 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1451 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1452 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1453 } else {
1454 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1455 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1456 }
1457 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1458 #endif
1459
1460 /* Configure DMA resource watermarks */
1461 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1462 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1463
1464 /* Enable buffer manager */
1465 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1466 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1467 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1468
1469 /* Poll for buffer manager start indication */
1470 for (i = 0; i < BGE_TIMEOUT; i++) {
1471 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1472 break;
1473 DELAY(10);
1474 }
1475
1476 if (i == BGE_TIMEOUT) {
1477 printf("%s: buffer manager failed to start\n",
1478 sc->bge_dev.dv_xname);
1479 return(ENXIO);
1480 }
1481 }
1482
1483 /* Enable flow-through queues */
1484 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1485 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1486
1487 /* Wait until queue initialization is complete */
1488 for (i = 0; i < BGE_TIMEOUT; i++) {
1489 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1490 break;
1491 DELAY(10);
1492 }
1493
1494 if (i == BGE_TIMEOUT) {
1495 printf("%s: flow-through queue init failed\n",
1496 sc->bge_dev.dv_xname);
1497 return(ENXIO);
1498 }
1499
1500 /* Initialize the standard RX ring control block */
1501 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1502 bge_set_hostaddr(&rcb->bge_hostaddr,
1503 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1504 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1505 rcb->bge_maxlen_flags =
1506 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1507 } else {
1508 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1509 }
1510 if (sc->bge_extram)
1511 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1512 else
1513 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1514 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1515 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1516 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1517 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1518
1519 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1520 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1521 } else {
1522 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1523 }
1524
1525 /*
1526 * Initialize the jumbo RX ring control block
1527 * We set the 'ring disabled' bit in the flags
1528 * field until we're actually ready to start
1529 * using this ring (i.e. once we set the MTU
1530 * high enough to require it).
1531 */
1532 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1533 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1534 bge_set_hostaddr(&rcb->bge_hostaddr,
1535 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1536 rcb->bge_maxlen_flags =
1537 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1538 BGE_RCB_FLAG_RING_DISABLED);
1539 if (sc->bge_extram)
1540 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1541 else
1542 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1543
1544 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1545 rcb->bge_hostaddr.bge_addr_hi);
1546 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1547 rcb->bge_hostaddr.bge_addr_lo);
1548 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1549 rcb->bge_maxlen_flags);
1550 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1551
1552 /* Set up dummy disabled mini ring RCB */
1553 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1554 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1555 BGE_RCB_FLAG_RING_DISABLED);
1556 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1557 rcb->bge_maxlen_flags);
1558
1559 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1560 offsetof(struct bge_ring_data, bge_info),
1561 sizeof (struct bge_gib),
1562 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1563 }
1564
1565 /*
1566 * Set the BD ring replentish thresholds. The recommended
1567 * values are 1/8th the number of descriptors allocated to
1568 * each ring.
1569 */
1570 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1571 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1572
1573 /*
1574 * Disable all unused send rings by setting the 'ring disabled'
1575 * bit in the flags field of all the TX send ring control blocks.
1576 * These are located in NIC memory.
1577 */
1578 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1579 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1580 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1581 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1582 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1583 rcb_addr += sizeof(struct bge_rcb);
1584 }
1585
1586 /* Configure TX RCB 0 (we use only the first ring) */
1587 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1588 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1589 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1590 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1591 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1592 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1593 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1594 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1595 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1596 }
1597
1598 /* Disable all unused RX return rings */
1599 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1600 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1601 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1602 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1603 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1604 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1605 BGE_RCB_FLAG_RING_DISABLED));
1606 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1607 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1608 (i * (sizeof(u_int64_t))), 0);
1609 rcb_addr += sizeof(struct bge_rcb);
1610 }
1611
1612 /* Initialize RX ring indexes */
1613 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1614 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1615 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1616
1617 /*
1618 * Set up RX return ring 0
1619 * Note that the NIC address for RX return rings is 0x00000000.
1620 * The return rings live entirely within the host, so the
1621 * nicaddr field in the RCB isn't used.
1622 */
1623 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1624 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1625 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1626 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1627 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1628 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1629 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1630
1631 /* Set random backoff seed for TX */
1632 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1633 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1634 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1635 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1636 BGE_TX_BACKOFF_SEED_MASK);
1637
1638 /* Set inter-packet gap */
1639 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1640
1641 /*
1642 * Specify which ring to use for packets that don't match
1643 * any RX rules.
1644 */
1645 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1646
1647 /*
1648 * Configure number of RX lists. One interrupt distribution
1649 * list, sixteen active lists, one bad frames class.
1650 */
1651 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1652
1653 /* Inialize RX list placement stats mask. */
1654 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1655 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1656
1657 /* Disable host coalescing until we get it set up */
1658 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1659
1660 /* Poll to make sure it's shut down. */
1661 for (i = 0; i < BGE_TIMEOUT; i++) {
1662 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1663 break;
1664 DELAY(10);
1665 }
1666
1667 if (i == BGE_TIMEOUT) {
1668 printf("%s: host coalescing engine failed to idle\n",
1669 sc->bge_dev.dv_xname);
1670 return(ENXIO);
1671 }
1672
1673 /* Set up host coalescing defaults */
1674 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1675 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1676 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1677 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1678 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1679 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1680 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1681 }
1682 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1683 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1684
1685 /* Set up address of statistics block */
1686 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1687 bge_set_hostaddr(&taddr,
1688 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1689 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1690 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1691 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1692 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1693 }
1694
1695 /* Set up address of status block */
1696 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1697 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1698 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1699 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1700 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1701 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1702
1703 /* Turn on host coalescing state machine */
1704 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1705
1706 /* Turn on RX BD completion state machine and enable attentions */
1707 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1708 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1709
1710 /* Turn on RX list placement state machine */
1711 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1712
1713 /* Turn on RX list selector state machine. */
1714 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1715 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1716 }
1717
1718 /* Turn on DMA, clear stats */
1719 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1720 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1721 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1722 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1723 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1724
1725 /* Set misc. local control, enable interrupts on attentions */
1726 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1727
1728 #ifdef notdef
1729 /* Assert GPIO pins for PHY reset */
1730 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1731 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1732 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1733 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1734 #endif
1735
1736 #if defined(not_quite_yet)
1737 /* Linux driver enables enable gpio pin #1 on 5700s */
1738 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
1739 sc->bge_local_ctrl_reg |=
1740 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
1741 }
1742 #endif
1743 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1744
1745 /* Turn on DMA completion state machine */
1746 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1747 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1748 }
1749
1750 /* Turn on write DMA state machine */
1751 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1752 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1753
1754 /* Turn on read DMA state machine */
1755 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1756 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1757
1758 /* Turn on RX data completion state machine */
1759 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1760
1761 /* Turn on RX BD initiator state machine */
1762 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1763
1764 /* Turn on RX data and RX BD initiator state machine */
1765 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1766
1767 /* Turn on Mbuf cluster free state machine */
1768 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1769 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1770 }
1771
1772 /* Turn on send BD completion state machine */
1773 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1774
1775 /* Turn on send data completion state machine */
1776 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1777
1778 /* Turn on send data initiator state machine */
1779 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1780
1781 /* Turn on send BD initiator state machine */
1782 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1783
1784 /* Turn on send BD selector state machine */
1785 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1786
1787 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1788 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1789 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1790
1791 /* ack/clear link change events */
1792 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1793 BGE_MACSTAT_CFG_CHANGED);
1794 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1795
1796 /* Enable PHY auto polling (for MII/GMII only) */
1797 if (sc->bge_tbi) {
1798 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1799 } else {
1800 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1801 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1802 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1803 BGE_EVTENB_MI_INTERRUPT);
1804 }
1805
1806 /* Enable link state change attentions. */
1807 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1808
1809 return(0);
1810 }
1811
1812 static const struct bge_revision {
1813 uint32_t br_chipid;
1814 uint32_t br_quirks;
1815 const char *br_name;
1816 } bge_revisions[] = {
1817 { BGE_CHIPID_BCM5700_A0,
1818 BGE_QUIRK_LINK_STATE_BROKEN,
1819 "BCM5700 A0" },
1820
1821 { BGE_CHIPID_BCM5700_A1,
1822 BGE_QUIRK_LINK_STATE_BROKEN,
1823 "BCM5700 A1" },
1824
1825 { BGE_CHIPID_BCM5700_B0,
1826 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
1827 "BCM5700 B0" },
1828
1829 { BGE_CHIPID_BCM5700_B1,
1830 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1831 "BCM5700 B1" },
1832
1833 { BGE_CHIPID_BCM5700_B2,
1834 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1835 "BCM5700 B2" },
1836
1837 /* This is treated like a BCM5700 Bx */
1838 { BGE_CHIPID_BCM5700_ALTIMA,
1839 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1840 "BCM5700 Altima" },
1841
1842 { BGE_CHIPID_BCM5700_C0,
1843 0,
1844 "BCM5700 C0" },
1845
1846 { BGE_CHIPID_BCM5701_A0,
1847 0, /*XXX really, just not known */
1848 "BCM5701 A0" },
1849
1850 { BGE_CHIPID_BCM5701_B0,
1851 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1852 "BCM5701 B0" },
1853
1854 { BGE_CHIPID_BCM5701_B2,
1855 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1856 "BCM5701 B2" },
1857
1858 { BGE_CHIPID_BCM5701_B5,
1859 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1860 "BCM5701 B5" },
1861
1862 { BGE_CHIPID_BCM5703_A0,
1863 0,
1864 "BCM5703 A0" },
1865
1866 { BGE_CHIPID_BCM5703_A1,
1867 0,
1868 "BCM5703 A1" },
1869
1870 { BGE_CHIPID_BCM5703_A2,
1871 BGE_QUIRK_ONLY_PHY_1,
1872 "BCM5703 A2" },
1873
1874 { BGE_CHIPID_BCM5703_A3,
1875 BGE_QUIRK_ONLY_PHY_1,
1876 "BCM5703 A3" },
1877
1878 { BGE_CHIPID_BCM5704_A0,
1879 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1880 "BCM5704 A0" },
1881
1882 { BGE_CHIPID_BCM5704_A1,
1883 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1884 "BCM5704 A1" },
1885
1886 { BGE_CHIPID_BCM5704_A2,
1887 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1888 "BCM5704 A2" },
1889
1890 { BGE_CHIPID_BCM5704_A3,
1891 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1892 "BCM5704 A3" },
1893
1894 { BGE_CHIPID_BCM5705_A0,
1895 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1896 "BCM5705 A0" },
1897
1898 { BGE_CHIPID_BCM5705_A1,
1899 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1900 "BCM5705 A1" },
1901
1902 { BGE_CHIPID_BCM5705_A2,
1903 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1904 "BCM5705 A2" },
1905
1906 { BGE_CHIPID_BCM5705_A3,
1907 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1908 "BCM5705 A3" },
1909
1910 { 0, 0, NULL }
1911 };
1912
1913 /*
1914 * Some defaults for major revisions, so that newer steppings
1915 * that we don't know about have a shot at working.
1916 */
1917 static const struct bge_revision bge_majorrevs[] = {
1918 { BGE_ASICREV_BCM5700,
1919 BGE_QUIRK_LINK_STATE_BROKEN,
1920 "unknown BCM5700" },
1921
1922 { BGE_ASICREV_BCM5701,
1923 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1924 "unknown BCM5701" },
1925
1926 { BGE_ASICREV_BCM5703,
1927 0,
1928 "unknown BCM5703" },
1929
1930 { BGE_ASICREV_BCM5704,
1931 BGE_QUIRK_ONLY_PHY_1,
1932 "unknown BCM5704" },
1933
1934 { BGE_ASICREV_BCM5705,
1935 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1936 "unknown BCM5705" },
1937
1938 { 0,
1939 0,
1940 NULL }
1941 };
1942
1943
1944 static const struct bge_revision *
1945 bge_lookup_rev(uint32_t chipid)
1946 {
1947 const struct bge_revision *br;
1948
1949 for (br = bge_revisions; br->br_name != NULL; br++) {
1950 if (br->br_chipid == chipid)
1951 return (br);
1952 }
1953
1954 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1955 if (br->br_chipid == BGE_ASICREV(chipid))
1956 return (br);
1957 }
1958
1959 return (NULL);
1960 }
1961
1962 static const struct bge_product {
1963 pci_vendor_id_t bp_vendor;
1964 pci_product_id_t bp_product;
1965 const char *bp_name;
1966 } bge_products[] = {
1967 /*
1968 * The BCM5700 documentation seems to indicate that the hardware
1969 * still has the Alteon vendor ID burned into it, though it
1970 * should always be overridden by the value in the EEPROM. We'll
1971 * check for it anyway.
1972 */
1973 { PCI_VENDOR_ALTEON,
1974 PCI_PRODUCT_ALTEON_BCM5700,
1975 "Broadcom BCM5700 Gigabit Ethernet",
1976 },
1977 { PCI_VENDOR_ALTEON,
1978 PCI_PRODUCT_ALTEON_BCM5701,
1979 "Broadcom BCM5701 Gigabit Ethernet",
1980 },
1981
1982 { PCI_VENDOR_ALTIMA,
1983 PCI_PRODUCT_ALTIMA_AC1000,
1984 "Altima AC1000 Gigabit Ethernet",
1985 },
1986 { PCI_VENDOR_ALTIMA,
1987 PCI_PRODUCT_ALTIMA_AC1001,
1988 "Altima AC1001 Gigabit Ethernet",
1989 },
1990 { PCI_VENDOR_ALTIMA,
1991 PCI_PRODUCT_ALTIMA_AC9100,
1992 "Altima AC9100 Gigabit Ethernet",
1993 },
1994
1995 { PCI_VENDOR_BROADCOM,
1996 PCI_PRODUCT_BROADCOM_BCM5700,
1997 "Broadcom BCM5700 Gigabit Ethernet",
1998 },
1999 { PCI_VENDOR_BROADCOM,
2000 PCI_PRODUCT_BROADCOM_BCM5701,
2001 "Broadcom BCM5701 Gigabit Ethernet",
2002 },
2003 { PCI_VENDOR_BROADCOM,
2004 PCI_PRODUCT_BROADCOM_BCM5702,
2005 "Broadcom BCM5702 Gigabit Ethernet",
2006 },
2007 { PCI_VENDOR_BROADCOM,
2008 PCI_PRODUCT_BROADCOM_BCM5702X,
2009 "Broadcom BCM5702X Gigabit Ethernet" },
2010
2011 { PCI_VENDOR_BROADCOM,
2012 PCI_PRODUCT_BROADCOM_BCM5703,
2013 "Broadcom BCM5703 Gigabit Ethernet",
2014 },
2015 { PCI_VENDOR_BROADCOM,
2016 PCI_PRODUCT_BROADCOM_BCM5703X,
2017 "Broadcom BCM5703X Gigabit Ethernet",
2018 },
2019 { PCI_VENDOR_BROADCOM,
2020 PCI_PRODUCT_BROADCOM_BCM5703A3,
2021 "Broadcom BCM5703A3 Gigabit Ethernet",
2022 },
2023
2024 { PCI_VENDOR_BROADCOM,
2025 PCI_PRODUCT_BROADCOM_BCM5704C,
2026 "Broadcom BCM5704C Dual Gigabit Ethernet",
2027 },
2028 { PCI_VENDOR_BROADCOM,
2029 PCI_PRODUCT_BROADCOM_BCM5704S,
2030 "Broadcom BCM5704S Dual Gigabit Ethernet",
2031 },
2032
2033 { PCI_VENDOR_BROADCOM,
2034 PCI_PRODUCT_BROADCOM_BCM5705,
2035 "Broadcom BCM5705 Gigabit Ethernet",
2036 },
2037 { PCI_VENDOR_BROADCOM,
2038 PCI_PRODUCT_BROADCOM_BCM5705_ALT,
2039 "Broadcom BCM5705 Gigabit Ethernet",
2040 },
2041 { PCI_VENDOR_BROADCOM,
2042 PCI_PRODUCT_BROADCOM_BCM5705M,
2043 "Broadcom BCM5705M Gigabit Ethernet",
2044 },
2045
2046 { PCI_VENDOR_BROADCOM,
2047 PCI_PRODUCT_BROADCOM_BCM5901,
2048 "Broadcom BCM5901 Fast Ethernet",
2049 },
2050 { PCI_VENDOR_BROADCOM,
2051 PCI_PRODUCT_BROADCOM_BCM5901A2,
2052 "Broadcom BCM5901A2 Fast Ethernet",
2053 },
2054
2055 { PCI_VENDOR_BROADCOM,
2056 PCI_PRODUCT_BROADCOM_BCM5782,
2057 "Broadcom BCM5782 Gigabit Ethernet",
2058 },
2059
2060 { PCI_VENDOR_SCHNEIDERKOCH,
2061 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
2062 "SysKonnect SK-9Dx1 Gigabit Ethernet",
2063 },
2064
2065 { PCI_VENDOR_3COM,
2066 PCI_PRODUCT_3COM_3C996,
2067 "3Com 3c996 Gigabit Ethernet",
2068 },
2069
2070 { 0,
2071 0,
2072 NULL },
2073 };
2074
2075 static const struct bge_product *
2076 bge_lookup(const struct pci_attach_args *pa)
2077 {
2078 const struct bge_product *bp;
2079
2080 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2081 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2082 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2083 return (bp);
2084 }
2085
2086 return (NULL);
2087 }
2088
2089 int
2090 bge_setpowerstate(sc, powerlevel)
2091 struct bge_softc *sc;
2092 int powerlevel;
2093 {
2094 #ifdef NOTYET
2095 u_int32_t pm_ctl = 0;
2096
2097 /* XXX FIXME: make sure indirect accesses enabled? */
2098 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2099 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2100 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2101
2102 /* clear the PME_assert bit and power state bits, enable PME */
2103 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2104 pm_ctl &= ~PCIM_PSTAT_DMASK;
2105 pm_ctl |= (1 << 8);
2106
2107 if (powerlevel == 0) {
2108 pm_ctl |= PCIM_PSTAT_D0;
2109 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2110 pm_ctl, 2);
2111 DELAY(10000);
2112 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2113 DELAY(10000);
2114
2115 #ifdef NOTYET
2116 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2117 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2118 #endif
2119 DELAY(40); DELAY(40); DELAY(40);
2120 DELAY(10000); /* above not quite adequate on 5700 */
2121 return 0;
2122 }
2123
2124
2125 /*
2126 * Entering ACPI power states D1-D3 is achieved by wiggling
2127 * GMII gpio pins. Example code assumes all hardware vendors
2128 * followed Broadom's sample pcb layout. Until we verify that
2129 * for all supported OEM cards, states D1-D3 are unsupported.
2130 */
2131 printf("%s: power state %d unimplemented; check GPIO pins\n",
2132 sc->bge_dev.dv_xname, powerlevel);
2133 #endif
2134 return EOPNOTSUPP;
2135 }
2136
2137
2138 /*
2139 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2140 * against our list and return its name if we find a match. Note
2141 * that since the Broadcom controller contains VPD support, we
2142 * can get the device name string from the controller itself instead
2143 * of the compiled-in string. This is a little slow, but it guarantees
2144 * we'll always announce the right product name.
2145 */
2146 int
2147 bge_probe(parent, match, aux)
2148 struct device *parent;
2149 struct cfdata *match;
2150 void *aux;
2151 {
2152 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2153
2154 if (bge_lookup(pa) != NULL)
2155 return (1);
2156
2157 return (0);
2158 }
2159
2160 void
2161 bge_attach(parent, self, aux)
2162 struct device *parent, *self;
2163 void *aux;
2164 {
2165 struct bge_softc *sc = (struct bge_softc *)self;
2166 struct pci_attach_args *pa = aux;
2167 const struct bge_product *bp;
2168 const struct bge_revision *br;
2169 pci_chipset_tag_t pc = pa->pa_pc;
2170 pci_intr_handle_t ih;
2171 const char *intrstr = NULL;
2172 bus_dma_segment_t seg;
2173 int rseg;
2174 u_int32_t hwcfg = 0;
2175 u_int32_t mac_addr = 0;
2176 u_int32_t command;
2177 struct ifnet *ifp;
2178 caddr_t kva;
2179 u_char eaddr[ETHER_ADDR_LEN];
2180 pcireg_t memtype;
2181 bus_addr_t memaddr;
2182 bus_size_t memsize;
2183 u_int32_t pm_ctl;
2184
2185 bp = bge_lookup(pa);
2186 KASSERT(bp != NULL);
2187
2188 sc->bge_pa = *pa;
2189
2190 aprint_naive(": Ethernet controller\n");
2191 aprint_normal(": %s\n", bp->bp_name);
2192
2193 /*
2194 * Map control/status registers.
2195 */
2196 DPRINTFN(5, ("Map control/status regs\n"));
2197 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2198 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2199 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
2200 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2201
2202 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2203 aprint_error("%s: failed to enable memory mapping!\n",
2204 sc->bge_dev.dv_xname);
2205 return;
2206 }
2207
2208 DPRINTFN(5, ("pci_mem_find\n"));
2209 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2210 switch (memtype) {
2211 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2212 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2213 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2214 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2215 &memaddr, &memsize) == 0)
2216 break;
2217 default:
2218 aprint_error("%s: can't find mem space\n",
2219 sc->bge_dev.dv_xname);
2220 return;
2221 }
2222
2223 DPRINTFN(5, ("pci_intr_map\n"));
2224 if (pci_intr_map(pa, &ih)) {
2225 aprint_error("%s: couldn't map interrupt\n",
2226 sc->bge_dev.dv_xname);
2227 return;
2228 }
2229
2230 DPRINTFN(5, ("pci_intr_string\n"));
2231 intrstr = pci_intr_string(pc, ih);
2232
2233 DPRINTFN(5, ("pci_intr_establish\n"));
2234 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2235
2236 if (sc->bge_intrhand == NULL) {
2237 aprint_error("%s: couldn't establish interrupt",
2238 sc->bge_dev.dv_xname);
2239 if (intrstr != NULL)
2240 aprint_normal(" at %s", intrstr);
2241 aprint_normal("\n");
2242 return;
2243 }
2244 aprint_normal("%s: interrupting at %s\n",
2245 sc->bge_dev.dv_xname, intrstr);
2246
2247 /*
2248 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2249 * can clobber the chip's PCI config-space power control registers,
2250 * leaving the card in D3 powersave state.
2251 * We do not have memory-mapped registers in this state,
2252 * so force device into D0 state before starting initialization.
2253 */
2254 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2255 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2256 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2257 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2258 DELAY(1000); /* 27 usec is allegedly sufficent */
2259
2260 /* Try to reset the chip. */
2261 DPRINTFN(5, ("bge_reset\n"));
2262 bge_reset(sc);
2263
2264 if (bge_chipinit(sc)) {
2265 aprint_error("%s: chip initialization failed\n",
2266 sc->bge_dev.dv_xname);
2267 bge_release_resources(sc);
2268 return;
2269 }
2270
2271 /*
2272 * Get station address from the EEPROM.
2273 */
2274 mac_addr = bge_readmem_ind(sc, 0x0c14);
2275 if ((mac_addr >> 16) == 0x484b) {
2276 eaddr[0] = (u_char)(mac_addr >> 8);
2277 eaddr[1] = (u_char)(mac_addr >> 0);
2278 mac_addr = bge_readmem_ind(sc, 0x0c18);
2279 eaddr[2] = (u_char)(mac_addr >> 24);
2280 eaddr[3] = (u_char)(mac_addr >> 16);
2281 eaddr[4] = (u_char)(mac_addr >> 8);
2282 eaddr[5] = (u_char)(mac_addr >> 0);
2283 } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
2284 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2285 aprint_error("%s: failed to read station address\n",
2286 sc->bge_dev.dv_xname);
2287 bge_release_resources(sc);
2288 return;
2289 }
2290
2291 /*
2292 * Save ASIC rev. Look up any quirks associated with this
2293 * ASIC.
2294 */
2295 sc->bge_chipid =
2296 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
2297 BGE_PCIMISCCTL_ASICREV;
2298 br = bge_lookup_rev(sc->bge_chipid);
2299
2300 aprint_normal("%s: ", sc->bge_dev.dv_xname);
2301
2302 if (br == NULL) {
2303 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
2304 sc->bge_quirks = 0;
2305 } else {
2306 aprint_normal("ASIC %s (0x%04x)",
2307 br->br_name, sc->bge_chipid >> 16);
2308 sc->bge_quirks |= br->br_quirks;
2309 }
2310 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2311
2312 /* Allocate the general information block and ring buffers. */
2313 if (pci_dma64_available(pa))
2314 sc->bge_dmatag = pa->pa_dmat64;
2315 else
2316 sc->bge_dmatag = pa->pa_dmat;
2317 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2318 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2319 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2320 aprint_error("%s: can't alloc rx buffers\n",
2321 sc->bge_dev.dv_xname);
2322 return;
2323 }
2324 DPRINTFN(5, ("bus_dmamem_map\n"));
2325 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2326 sizeof(struct bge_ring_data), &kva,
2327 BUS_DMA_NOWAIT)) {
2328 aprint_error("%s: can't map DMA buffers (%d bytes)\n",
2329 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
2330 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2331 return;
2332 }
2333 DPRINTFN(5, ("bus_dmamem_create\n"));
2334 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2335 sizeof(struct bge_ring_data), 0,
2336 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2337 aprint_error("%s: can't create DMA map\n",
2338 sc->bge_dev.dv_xname);
2339 bus_dmamem_unmap(sc->bge_dmatag, kva,
2340 sizeof(struct bge_ring_data));
2341 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2342 return;
2343 }
2344 DPRINTFN(5, ("bus_dmamem_load\n"));
2345 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2346 sizeof(struct bge_ring_data), NULL,
2347 BUS_DMA_NOWAIT)) {
2348 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2349 bus_dmamem_unmap(sc->bge_dmatag, kva,
2350 sizeof(struct bge_ring_data));
2351 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2352 return;
2353 }
2354
2355 DPRINTFN(5, ("bzero\n"));
2356 sc->bge_rdata = (struct bge_ring_data *)kva;
2357
2358 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2359
2360 /* Try to allocate memory for jumbo buffers. */
2361 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2362 if (bge_alloc_jumbo_mem(sc)) {
2363 aprint_error("%s: jumbo buffer allocation failed\n",
2364 sc->bge_dev.dv_xname);
2365 } else
2366 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2367 }
2368
2369 /* Set default tuneable values. */
2370 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2371 sc->bge_rx_coal_ticks = 150;
2372 sc->bge_rx_max_coal_bds = 64;
2373 #ifdef ORIG_WPAUL_VALUES
2374 sc->bge_tx_coal_ticks = 150;
2375 sc->bge_tx_max_coal_bds = 128;
2376 #else
2377 sc->bge_tx_coal_ticks = 300;
2378 sc->bge_tx_max_coal_bds = 400;
2379 #endif
2380
2381 /* Set up ifnet structure */
2382 ifp = &sc->ethercom.ec_if;
2383 ifp->if_softc = sc;
2384 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2385 ifp->if_ioctl = bge_ioctl;
2386 ifp->if_start = bge_start;
2387 ifp->if_init = bge_init;
2388 ifp->if_watchdog = bge_watchdog;
2389 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2390 IFQ_SET_READY(&ifp->if_snd);
2391 DPRINTFN(5, ("bcopy\n"));
2392 strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
2393
2394 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
2395 sc->ethercom.ec_if.if_capabilities |=
2396 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
2397 sc->ethercom.ec_capabilities |=
2398 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2399
2400 /*
2401 * Do MII setup.
2402 */
2403 DPRINTFN(5, ("mii setup\n"));
2404 sc->bge_mii.mii_ifp = ifp;
2405 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2406 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2407 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2408
2409 /*
2410 * Figure out what sort of media we have by checking the
2411 * hardware config word in the first 32k of NIC internal memory,
2412 * or fall back to the config word in the EEPROM. Note: on some BCM5700
2413 * cards, this value appears to be unset. If that's the
2414 * case, we have to rely on identifying the NIC by its PCI
2415 * subsystem ID, as we do below for the SysKonnect SK-9D41.
2416 */
2417 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2418 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2419 } else {
2420 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2421 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2422 hwcfg = be32toh(hwcfg);
2423 }
2424 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2425 sc->bge_tbi = 1;
2426
2427 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2428 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
2429 SK_SUBSYSID_9D41)
2430 sc->bge_tbi = 1;
2431
2432 if (sc->bge_tbi) {
2433 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2434 bge_ifmedia_sts);
2435 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2436 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2437 0, NULL);
2438 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2439 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2440 } else {
2441 /*
2442 * Do transceiver setup.
2443 */
2444 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2445 bge_ifmedia_sts);
2446 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2447 MII_PHY_ANY, MII_OFFSET_ANY,
2448 MIIF_FORCEANEG|MIIF_DOPAUSE);
2449
2450 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2451 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2452 ifmedia_add(&sc->bge_mii.mii_media,
2453 IFM_ETHER|IFM_MANUAL, 0, NULL);
2454 ifmedia_set(&sc->bge_mii.mii_media,
2455 IFM_ETHER|IFM_MANUAL);
2456 } else
2457 ifmedia_set(&sc->bge_mii.mii_media,
2458 IFM_ETHER|IFM_AUTO);
2459 }
2460
2461 /*
2462 * When using the BCM5701 in PCI-X mode, data corruption has
2463 * been observed in the first few bytes of some received packets.
2464 * Aligning the packet buffer in memory eliminates the corruption.
2465 * Unfortunately, this misaligns the packet payloads. On platforms
2466 * which do not support unaligned accesses, we will realign the
2467 * payloads by copying the received packets.
2468 */
2469 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
2470 /* If in PCI-X mode, work around the alignment bug. */
2471 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2472 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2473 BGE_PCISTATE_PCI_BUSSPEED)
2474 sc->bge_rx_alignment_bug = 1;
2475 }
2476
2477 /*
2478 * Call MI attach routine.
2479 */
2480 DPRINTFN(5, ("if_attach\n"));
2481 if_attach(ifp);
2482 DPRINTFN(5, ("ether_ifattach\n"));
2483 ether_ifattach(ifp, eaddr);
2484 DPRINTFN(5, ("callout_init\n"));
2485 callout_init(&sc->bge_timeout);
2486 }
2487
2488 void
2489 bge_release_resources(sc)
2490 struct bge_softc *sc;
2491 {
2492 if (sc->bge_vpd_prodname != NULL)
2493 free(sc->bge_vpd_prodname, M_DEVBUF);
2494
2495 if (sc->bge_vpd_readonly != NULL)
2496 free(sc->bge_vpd_readonly, M_DEVBUF);
2497 }
2498
2499 void
2500 bge_reset(sc)
2501 struct bge_softc *sc;
2502 {
2503 struct pci_attach_args *pa = &sc->bge_pa;
2504 u_int32_t cachesize, command, pcistate, new_pcistate;
2505 int i, val = 0;
2506
2507 /* Save some important PCI state. */
2508 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2509 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2510 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2511
2512 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2513 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2514 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2515
2516 /* Issue global reset */
2517 bge_writereg_ind(sc, BGE_MISC_CFG,
2518 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
2519
2520 DELAY(1000);
2521
2522 /* Reset some of the PCI state that got zapped by reset */
2523 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2524 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2525 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2526 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2527 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2528 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2529
2530 /* Enable memory arbiter. */
2531 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2532 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2533 }
2534
2535 /*
2536 * Prevent PXE restart: write a magic number to the
2537 * general communications memory at 0xB50.
2538 */
2539 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2540
2541 /*
2542 * Poll the value location we just wrote until
2543 * we see the 1's complement of the magic number.
2544 * This indicates that the firmware initialization
2545 * is complete.
2546 */
2547 for (i = 0; i < 750; i++) {
2548 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2549 if (val == ~BGE_MAGIC_NUMBER)
2550 break;
2551 DELAY(1000);
2552 }
2553
2554 if (i == 750) {
2555 printf("%s: firmware handshake timed out, val = %x\n",
2556 sc->bge_dev.dv_xname, val);
2557 return;
2558 }
2559
2560 /*
2561 * XXX Wait for the value of the PCISTATE register to
2562 * return to its original pre-reset state. This is a
2563 * fairly good indicator of reset completion. If we don't
2564 * wait for the reset to fully complete, trying to read
2565 * from the device's non-PCI registers may yield garbage
2566 * results.
2567 */
2568 for (i = 0; i < BGE_TIMEOUT; i++) {
2569 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2570 BGE_PCI_PCISTATE);
2571 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2572 (pcistate & ~BGE_PCISTATE_RESERVED))
2573 break;
2574 DELAY(10);
2575 }
2576 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2577 (pcistate & ~BGE_PCISTATE_RESERVED)) {
2578 printf("%s: pcistate failed to revert\n",
2579 sc->bge_dev.dv_xname);
2580 }
2581
2582 /* Enable memory arbiter. */
2583 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2584 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2585 }
2586
2587 /* Fix up byte swapping */
2588 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2589
2590 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2591
2592 DELAY(10000);
2593 }
2594
2595 /*
2596 * Frame reception handling. This is called if there's a frame
2597 * on the receive return list.
2598 *
2599 * Note: we have to be able to handle two possibilities here:
2600 * 1) the frame is from the jumbo recieve ring
2601 * 2) the frame is from the standard receive ring
2602 */
2603
2604 void
2605 bge_rxeof(sc)
2606 struct bge_softc *sc;
2607 {
2608 struct ifnet *ifp;
2609 int stdcnt = 0, jumbocnt = 0;
2610 int have_tag = 0;
2611 u_int16_t vlan_tag = 0;
2612 bus_dmamap_t dmamap;
2613 bus_addr_t offset, toff;
2614 bus_size_t tlen;
2615 int tosync;
2616
2617 ifp = &sc->ethercom.ec_if;
2618
2619 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2620 offsetof(struct bge_ring_data, bge_status_block),
2621 sizeof (struct bge_status_block),
2622 BUS_DMASYNC_POSTREAD);
2623
2624 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2625 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2626 sc->bge_rx_saved_considx;
2627
2628 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2629
2630 if (tosync < 0) {
2631 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2632 sizeof (struct bge_rx_bd);
2633 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2634 toff, tlen, BUS_DMASYNC_POSTREAD);
2635 tosync = -tosync;
2636 }
2637
2638 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2639 offset, tosync * sizeof (struct bge_rx_bd),
2640 BUS_DMASYNC_POSTREAD);
2641
2642 while(sc->bge_rx_saved_considx !=
2643 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2644 struct bge_rx_bd *cur_rx;
2645 u_int32_t rxidx;
2646 struct mbuf *m = NULL;
2647
2648 cur_rx = &sc->bge_rdata->
2649 bge_rx_return_ring[sc->bge_rx_saved_considx];
2650
2651 rxidx = cur_rx->bge_idx;
2652 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2653
2654 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2655 have_tag = 1;
2656 vlan_tag = cur_rx->bge_vlan_tag;
2657 }
2658
2659 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2660 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2661 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2662 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2663 jumbocnt++;
2664 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2665 ifp->if_ierrors++;
2666 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2667 continue;
2668 }
2669 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2670 NULL)== ENOBUFS) {
2671 ifp->if_ierrors++;
2672 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2673 continue;
2674 }
2675 } else {
2676 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2677 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2678 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2679 stdcnt++;
2680 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2681 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2682 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2683 ifp->if_ierrors++;
2684 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2685 continue;
2686 }
2687 if (bge_newbuf_std(sc, sc->bge_std,
2688 NULL, dmamap) == ENOBUFS) {
2689 ifp->if_ierrors++;
2690 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2691 continue;
2692 }
2693 }
2694
2695 ifp->if_ipackets++;
2696 #ifndef __NO_STRICT_ALIGNMENT
2697 /*
2698 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
2699 * the Rx buffer has the layer-2 header unaligned.
2700 * If our CPU requires alignment, re-align by copying.
2701 */
2702 if (sc->bge_rx_alignment_bug) {
2703 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data,
2704 cur_rx->bge_len);
2705 m->m_data += ETHER_ALIGN;
2706 }
2707 #endif
2708
2709 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2710 m->m_pkthdr.rcvif = ifp;
2711
2712 #if NBPFILTER > 0
2713 /*
2714 * Handle BPF listeners. Let the BPF user see the packet.
2715 */
2716 if (ifp->if_bpf)
2717 bpf_mtap(ifp->if_bpf, m);
2718 #endif
2719
2720 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
2721
2722 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
2723 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2724 /*
2725 * Rx transport checksum-offload may also
2726 * have bugs with packets which, when transmitted,
2727 * were `runts' requiring padding.
2728 */
2729 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2730 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
2731 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
2732 m->m_pkthdr.csum_data =
2733 cur_rx->bge_tcp_udp_csum;
2734 m->m_pkthdr.csum_flags |=
2735 (M_CSUM_TCPv4|M_CSUM_UDPv4|
2736 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
2737 }
2738
2739 /*
2740 * If we received a packet with a vlan tag, pass it
2741 * to vlan_input() instead of ether_input().
2742 */
2743 if (have_tag) {
2744 struct m_tag *mtag;
2745
2746 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2747 M_NOWAIT);
2748 if (mtag != NULL) {
2749 *(u_int *)(mtag + 1) = vlan_tag;
2750 m_tag_prepend(m, mtag);
2751 have_tag = vlan_tag = 0;
2752 } else {
2753 printf("%s: no mbuf for tag\n", ifp->if_xname);
2754 m_freem(m);
2755 have_tag = vlan_tag = 0;
2756 continue;
2757 }
2758 }
2759 (*ifp->if_input)(ifp, m);
2760 }
2761
2762 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2763 if (stdcnt)
2764 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2765 if (jumbocnt)
2766 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2767 }
2768
2769 void
2770 bge_txeof(sc)
2771 struct bge_softc *sc;
2772 {
2773 struct bge_tx_bd *cur_tx = NULL;
2774 struct ifnet *ifp;
2775 struct txdmamap_pool_entry *dma;
2776 bus_addr_t offset, toff;
2777 bus_size_t tlen;
2778 int tosync;
2779 struct mbuf *m;
2780
2781 ifp = &sc->ethercom.ec_if;
2782
2783 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2784 offsetof(struct bge_ring_data, bge_status_block),
2785 sizeof (struct bge_status_block),
2786 BUS_DMASYNC_POSTREAD);
2787
2788 offset = offsetof(struct bge_ring_data, bge_tx_ring);
2789 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2790 sc->bge_tx_saved_considx;
2791
2792 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2793
2794 if (tosync < 0) {
2795 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2796 sizeof (struct bge_tx_bd);
2797 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2798 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2799 tosync = -tosync;
2800 }
2801
2802 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2803 offset, tosync * sizeof (struct bge_tx_bd),
2804 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2805
2806 /*
2807 * Go through our tx ring and free mbufs for those
2808 * frames that have been sent.
2809 */
2810 while (sc->bge_tx_saved_considx !=
2811 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2812 u_int32_t idx = 0;
2813
2814 idx = sc->bge_tx_saved_considx;
2815 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2816 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2817 ifp->if_opackets++;
2818 m = sc->bge_cdata.bge_tx_chain[idx];
2819 if (m != NULL) {
2820 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2821 dma = sc->txdma[idx];
2822 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2823 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2824 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2825 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2826 sc->txdma[idx] = NULL;
2827
2828 m_freem(m);
2829 }
2830 sc->bge_txcnt--;
2831 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2832 ifp->if_timer = 0;
2833 }
2834
2835 if (cur_tx != NULL)
2836 ifp->if_flags &= ~IFF_OACTIVE;
2837 }
2838
2839 int
2840 bge_intr(xsc)
2841 void *xsc;
2842 {
2843 struct bge_softc *sc;
2844 struct ifnet *ifp;
2845
2846 sc = xsc;
2847 ifp = &sc->ethercom.ec_if;
2848
2849 #ifdef notdef
2850 /* Avoid this for now -- checking this register is expensive. */
2851 /* Make sure this is really our interrupt. */
2852 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2853 return (0);
2854 #endif
2855 /* Ack interrupt and stop others from occuring. */
2856 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2857
2858 /*
2859 * Process link state changes.
2860 * Grrr. The link status word in the status block does
2861 * not work correctly on the BCM5700 rev AX and BX chips,
2862 * according to all avaibable information. Hence, we have
2863 * to enable MII interrupts in order to properly obtain
2864 * async link changes. Unfortunately, this also means that
2865 * we have to read the MAC status register to detect link
2866 * changes, thereby adding an additional register access to
2867 * the interrupt handler.
2868 */
2869
2870 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
2871 u_int32_t status;
2872
2873 status = CSR_READ_4(sc, BGE_MAC_STS);
2874 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2875 sc->bge_link = 0;
2876 callout_stop(&sc->bge_timeout);
2877 bge_tick(sc);
2878 /* Clear the interrupt */
2879 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2880 BGE_EVTENB_MI_INTERRUPT);
2881 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
2882 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
2883 BRGPHY_INTRS);
2884 }
2885 } else {
2886 if (sc->bge_rdata->bge_status_block.bge_status &
2887 BGE_STATFLAG_LINKSTATE_CHANGED) {
2888 sc->bge_link = 0;
2889 callout_stop(&sc->bge_timeout);
2890 bge_tick(sc);
2891 /* Clear the interrupt */
2892 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2893 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2894 BGE_MACSTAT_LINK_CHANGED);
2895 }
2896 }
2897
2898 if (ifp->if_flags & IFF_RUNNING) {
2899 /* Check RX return ring producer/consumer */
2900 bge_rxeof(sc);
2901
2902 /* Check TX ring producer/consumer */
2903 bge_txeof(sc);
2904 }
2905
2906 if (sc->bge_pending_rxintr_change) {
2907 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
2908 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
2909 uint32_t junk;
2910
2911 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
2912 DELAY(10);
2913 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
2914
2915 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
2916 DELAY(10);
2917 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
2918
2919 sc->bge_pending_rxintr_change = 0;
2920 }
2921 bge_handle_events(sc);
2922
2923 /* Re-enable interrupts. */
2924 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2925
2926 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2927 bge_start(ifp);
2928
2929 return (1);
2930 }
2931
2932 void
2933 bge_tick(xsc)
2934 void *xsc;
2935 {
2936 struct bge_softc *sc = xsc;
2937 struct mii_data *mii = &sc->bge_mii;
2938 struct ifmedia *ifm = NULL;
2939 struct ifnet *ifp = &sc->ethercom.ec_if;
2940 int s;
2941
2942 s = splnet();
2943
2944 bge_stats_update(sc);
2945 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2946 if (sc->bge_link) {
2947 splx(s);
2948 return;
2949 }
2950
2951 if (sc->bge_tbi) {
2952 ifm = &sc->bge_ifmedia;
2953 if (CSR_READ_4(sc, BGE_MAC_STS) &
2954 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2955 sc->bge_link++;
2956 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2957 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2958 bge_start(ifp);
2959 }
2960 splx(s);
2961 return;
2962 }
2963
2964 mii_tick(mii);
2965
2966 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2967 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2968 sc->bge_link++;
2969 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2970 bge_start(ifp);
2971 }
2972
2973 splx(s);
2974 }
2975
2976 void
2977 bge_stats_update(sc)
2978 struct bge_softc *sc;
2979 {
2980 struct ifnet *ifp = &sc->ethercom.ec_if;
2981 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2982 bus_size_t rstats = BGE_RX_STATS;
2983
2984 #define READ_RSTAT(sc, stats, stat) \
2985 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
2986
2987 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
2988 ifp->if_collisions +=
2989 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
2990 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
2991 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
2992 READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
2993 return;
2994 }
2995
2996 #undef READ_RSTAT
2997 #define READ_STAT(sc, stats, stat) \
2998 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2999
3000 ifp->if_collisions +=
3001 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3002 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3003 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3004 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3005 ifp->if_collisions;
3006
3007 #undef READ_STAT
3008
3009 #ifdef notdef
3010 ifp->if_collisions +=
3011 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3012 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3013 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3014 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3015 ifp->if_collisions;
3016 #endif
3017 }
3018
3019 /*
3020 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3021 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3022 * but when such padded frames employ the bge IP/TCP checksum offload,
3023 * the hardware checksum assist gives incorrect results (possibly
3024 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3025 * If we pad such runts with zeros, the onboard checksum comes out correct.
3026 */
3027 static __inline int
3028 bge_cksum_pad(struct mbuf *pkt)
3029 {
3030 struct mbuf *last = NULL;
3031 int padlen;
3032
3033 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3034
3035 /* if there's only the packet-header and we can pad there, use it. */
3036 if (pkt->m_pkthdr.len == pkt->m_len &&
3037 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) {
3038 last = pkt;
3039 } else {
3040 /*
3041 * Walk packet chain to find last mbuf. We will either
3042 * pad there, or append a new mbuf and pad it
3043 * (thus perhaps avoiding the bcm5700 dma-min bug).
3044 */
3045 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3046 (void) 0; /* do nothing*/
3047 }
3048
3049 /* `last' now points to last in chain. */
3050 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) {
3051 (void) 0; /* we can pad here, in-place. */
3052 } else {
3053 /* Allocate new empty mbuf, pad it. Compact later. */
3054 struct mbuf *n;
3055 MGET(n, M_DONTWAIT, MT_DATA);
3056 n->m_len = 0;
3057 last->m_next = n;
3058 last = n;
3059 }
3060 }
3061
3062 #ifdef DEBUG
3063 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/
3064 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ );
3065 #endif
3066 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3067 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3068 last->m_len += padlen;
3069 pkt->m_pkthdr.len += padlen;
3070 return 0;
3071 }
3072
3073 /*
3074 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3075 */
3076 static __inline int
3077 bge_compact_dma_runt(struct mbuf *pkt)
3078 {
3079 struct mbuf *m, *prev;
3080 int totlen, prevlen;
3081
3082 prev = NULL;
3083 totlen = 0;
3084 prevlen = -1;
3085
3086 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3087 int mlen = m->m_len;
3088 int shortfall = 8 - mlen ;
3089
3090 totlen += mlen;
3091 if (mlen == 0) {
3092 continue;
3093 }
3094 if (mlen >= 8)
3095 continue;
3096
3097 /* If we get here, mbuf data is too small for DMA engine.
3098 * Try to fix by shuffling data to prev or next in chain.
3099 * If that fails, do a compacting deep-copy of the whole chain.
3100 */
3101
3102 /* Internal frag. If fits in prev, copy it there. */
3103 if (prev && !M_READONLY(prev) &&
3104 M_TRAILINGSPACE(prev) >= m->m_len) {
3105 bcopy(m->m_data,
3106 prev->m_data+prev->m_len,
3107 mlen);
3108 prev->m_len += mlen;
3109 m->m_len = 0;
3110 /* XXX stitch chain */
3111 prev->m_next = m_free(m);
3112 m = prev;
3113 continue;
3114 }
3115 else if (m->m_next != NULL && !M_READONLY(m) &&
3116 M_TRAILINGSPACE(m) >= shortfall &&
3117 m->m_next->m_len >= (8 + shortfall)) {
3118 /* m is writable and have enough data in next, pull up. */
3119
3120 bcopy(m->m_next->m_data,
3121 m->m_data+m->m_len,
3122 shortfall);
3123 m->m_len += shortfall;
3124 m->m_next->m_len -= shortfall;
3125 m->m_next->m_data += shortfall;
3126 }
3127 else if (m->m_next == NULL || 1) {
3128 /* Got a runt at the very end of the packet.
3129 * borrow data from the tail of the preceding mbuf and
3130 * update its length in-place. (The original data is still
3131 * valid, so we can do this even if prev is not writable.)
3132 */
3133
3134 /* if we'd make prev a runt, just move all of its data. */
3135 #ifdef DEBUG
3136 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3137 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3138 #endif
3139 if ((prev->m_len - shortfall) < 8)
3140 shortfall = prev->m_len;
3141
3142 #ifdef notyet /* just do the safe slow thing for now */
3143 if (!M_READONLY(m)) {
3144 if (M_LEADINGSPACE(m) < shorfall) {
3145 void *m_dat;
3146 m_dat = (m->m_flags & M_PKTHDR) ?
3147 m->m_pktdat : m->dat;
3148 memmove(m_dat, mtod(m, void*), m->m_len);
3149 m->m_data = m_dat;
3150 }
3151 } else
3152 #endif /* just do the safe slow thing */
3153 {
3154 struct mbuf * n = NULL;
3155 int newprevlen = prev->m_len - shortfall;
3156
3157 MGET(n, M_NOWAIT, MT_DATA);
3158 if (n == NULL)
3159 return ENOBUFS;
3160 KASSERT(m->m_len + shortfall < MLEN
3161 /*,
3162 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3163
3164 /* first copy the data we're stealing from prev */
3165 bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
3166
3167 /* update prev->m_len accordingly */
3168 prev->m_len -= shortfall;
3169
3170 /* copy data from runt m */
3171 bcopy(m->m_data, n->m_data + shortfall, m->m_len);
3172
3173 /* n holds what we stole from prev, plus m */
3174 n->m_len = shortfall + m->m_len;
3175
3176 /* stitch n into chain and free m */
3177 n->m_next = m->m_next;
3178 prev->m_next = n;
3179 /* KASSERT(m->m_next == NULL); */
3180 m->m_next = NULL;
3181 m_free(m);
3182 m = n; /* for continuing loop */
3183 }
3184 }
3185 prevlen = m->m_len;
3186 }
3187 return 0;
3188 }
3189
3190 /*
3191 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3192 * pointers to descriptors.
3193 */
3194 int
3195 bge_encap(sc, m_head, txidx)
3196 struct bge_softc *sc;
3197 struct mbuf *m_head;
3198 u_int32_t *txidx;
3199 {
3200 struct bge_tx_bd *f = NULL;
3201 u_int32_t frag, cur, cnt = 0;
3202 u_int16_t csum_flags = 0;
3203 struct txdmamap_pool_entry *dma;
3204 bus_dmamap_t dmamap;
3205 int i = 0;
3206 struct m_tag *mtag;
3207
3208 cur = frag = *txidx;
3209
3210 if (m_head->m_pkthdr.csum_flags) {
3211 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3212 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3213 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3214 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3215 }
3216
3217 /*
3218 * If we were asked to do an outboard checksum, and the NIC
3219 * has the bug where it sometimes adds in the Ethernet padding,
3220 * explicitly pad with zeros so the cksum will be correct either way.
3221 * (For now, do this for all chip versions, until newer
3222 * are confirmed to not require the workaround.)
3223 */
3224 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3225 #ifdef notyet
3226 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3227 #endif
3228 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3229 goto check_dma_bug;
3230
3231 if (bge_cksum_pad(m_head) != 0)
3232 return ENOBUFS;
3233
3234 check_dma_bug:
3235 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
3236 goto doit;
3237 /*
3238 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3239 * less than eight bytes. If we encounter a teeny mbuf
3240 * at the end of a chain, we can pad. Otherwise, copy.
3241 */
3242 if (bge_compact_dma_runt(m_head) != 0)
3243 return ENOBUFS;
3244
3245 doit:
3246 dma = SLIST_FIRST(&sc->txdma_list);
3247 if (dma == NULL)
3248 return ENOBUFS;
3249 dmamap = dma->dmamap;
3250
3251 /*
3252 * Start packing the mbufs in this chain into
3253 * the fragment pointers. Stop when we run out
3254 * of fragments or hit the end of the mbuf chain.
3255 */
3256 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3257 BUS_DMA_NOWAIT))
3258 return(ENOBUFS);
3259
3260 mtag = sc->ethercom.ec_nvlans ?
3261 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
3262
3263 for (i = 0; i < dmamap->dm_nsegs; i++) {
3264 f = &sc->bge_rdata->bge_tx_ring[frag];
3265 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3266 break;
3267 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
3268 f->bge_len = dmamap->dm_segs[i].ds_len;
3269 f->bge_flags = csum_flags;
3270
3271 if (mtag != NULL) {
3272 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3273 f->bge_vlan_tag = *(u_int *)(mtag + 1);
3274 } else {
3275 f->bge_vlan_tag = 0;
3276 }
3277 /*
3278 * Sanity check: avoid coming within 16 descriptors
3279 * of the end of the ring.
3280 */
3281 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
3282 return(ENOBUFS);
3283 cur = frag;
3284 BGE_INC(frag, BGE_TX_RING_CNT);
3285 cnt++;
3286 }
3287
3288 if (i < dmamap->dm_nsegs)
3289 return ENOBUFS;
3290
3291 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3292 BUS_DMASYNC_PREWRITE);
3293
3294 if (frag == sc->bge_tx_saved_considx)
3295 return(ENOBUFS);
3296
3297 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3298 sc->bge_cdata.bge_tx_chain[cur] = m_head;
3299 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3300 sc->txdma[cur] = dma;
3301 sc->bge_txcnt += cnt;
3302
3303 *txidx = frag;
3304
3305 return(0);
3306 }
3307
3308 /*
3309 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3310 * to the mbuf data regions directly in the transmit descriptors.
3311 */
3312 void
3313 bge_start(ifp)
3314 struct ifnet *ifp;
3315 {
3316 struct bge_softc *sc;
3317 struct mbuf *m_head = NULL;
3318 u_int32_t prodidx = 0;
3319 int pkts = 0;
3320
3321 sc = ifp->if_softc;
3322
3323 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3324 return;
3325
3326 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3327
3328 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3329 IFQ_POLL(&ifp->if_snd, m_head);
3330 if (m_head == NULL)
3331 break;
3332
3333 #if 0
3334 /*
3335 * XXX
3336 * safety overkill. If this is a fragmented packet chain
3337 * with delayed TCP/UDP checksums, then only encapsulate
3338 * it if we have enough descriptors to handle the entire
3339 * chain at once.
3340 * (paranoia -- may not actually be needed)
3341 */
3342 if (m_head->m_flags & M_FIRSTFRAG &&
3343 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3344 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3345 m_head->m_pkthdr.csum_data + 16) {
3346 ifp->if_flags |= IFF_OACTIVE;
3347 break;
3348 }
3349 }
3350 #endif
3351
3352 /*
3353 * Pack the data into the transmit ring. If we
3354 * don't have room, set the OACTIVE flag and wait
3355 * for the NIC to drain the ring.
3356 */
3357 if (bge_encap(sc, m_head, &prodidx)) {
3358 ifp->if_flags |= IFF_OACTIVE;
3359 break;
3360 }
3361
3362 /* now we are committed to transmit the packet */
3363 IFQ_DEQUEUE(&ifp->if_snd, m_head);
3364 pkts++;
3365
3366 #if NBPFILTER > 0
3367 /*
3368 * If there's a BPF listener, bounce a copy of this frame
3369 * to him.
3370 */
3371 if (ifp->if_bpf)
3372 bpf_mtap(ifp->if_bpf, m_head);
3373 #endif
3374 }
3375 if (pkts == 0)
3376 return;
3377
3378 /* Transmit */
3379 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3380 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
3381 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3382
3383 /*
3384 * Set a timeout in case the chip goes out to lunch.
3385 */
3386 ifp->if_timer = 5;
3387 }
3388
3389 int
3390 bge_init(ifp)
3391 struct ifnet *ifp;
3392 {
3393 struct bge_softc *sc = ifp->if_softc;
3394 u_int16_t *m;
3395 int s, error;
3396
3397 s = splnet();
3398
3399 ifp = &sc->ethercom.ec_if;
3400
3401 /* Cancel pending I/O and flush buffers. */
3402 bge_stop(sc);
3403 bge_reset(sc);
3404 bge_chipinit(sc);
3405
3406 /*
3407 * Init the various state machines, ring
3408 * control blocks and firmware.
3409 */
3410 error = bge_blockinit(sc);
3411 if (error != 0) {
3412 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
3413 error);
3414 splx(s);
3415 return error;
3416 }
3417
3418 ifp = &sc->ethercom.ec_if;
3419
3420 /* Specify MTU. */
3421 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3422 ETHER_HDR_LEN + ETHER_CRC_LEN);
3423
3424 /* Load our MAC address. */
3425 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
3426 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3427 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3428
3429 /* Enable or disable promiscuous mode as needed. */
3430 if (ifp->if_flags & IFF_PROMISC) {
3431 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3432 } else {
3433 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3434 }
3435
3436 /* Program multicast filter. */
3437 bge_setmulti(sc);
3438
3439 /* Init RX ring. */
3440 bge_init_rx_ring_std(sc);
3441
3442 /* Init jumbo RX ring. */
3443 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3444 bge_init_rx_ring_jumbo(sc);
3445
3446 /* Init our RX return ring index */
3447 sc->bge_rx_saved_considx = 0;
3448
3449 /* Init TX ring. */
3450 bge_init_tx_ring(sc);
3451
3452 /* Turn on transmitter */
3453 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3454
3455 /* Turn on receiver */
3456 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3457
3458 /* Tell firmware we're alive. */
3459 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3460
3461 /* Enable host interrupts. */
3462 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3463 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3464 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3465
3466 bge_ifmedia_upd(ifp);
3467
3468 ifp->if_flags |= IFF_RUNNING;
3469 ifp->if_flags &= ~IFF_OACTIVE;
3470
3471 splx(s);
3472
3473 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3474
3475 return 0;
3476 }
3477
3478 /*
3479 * Set media options.
3480 */
3481 int
3482 bge_ifmedia_upd(ifp)
3483 struct ifnet *ifp;
3484 {
3485 struct bge_softc *sc = ifp->if_softc;
3486 struct mii_data *mii = &sc->bge_mii;
3487 struct ifmedia *ifm = &sc->bge_ifmedia;
3488
3489 /* If this is a 1000baseX NIC, enable the TBI port. */
3490 if (sc->bge_tbi) {
3491 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3492 return(EINVAL);
3493 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3494 case IFM_AUTO:
3495 break;
3496 case IFM_1000_SX:
3497 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3498 BGE_CLRBIT(sc, BGE_MAC_MODE,
3499 BGE_MACMODE_HALF_DUPLEX);
3500 } else {
3501 BGE_SETBIT(sc, BGE_MAC_MODE,
3502 BGE_MACMODE_HALF_DUPLEX);
3503 }
3504 break;
3505 default:
3506 return(EINVAL);
3507 }
3508 /* XXX 802.3x flow control for 1000BASE-SX */
3509 return(0);
3510 }
3511
3512 sc->bge_link = 0;
3513 mii_mediachg(mii);
3514
3515 return(0);
3516 }
3517
3518 /*
3519 * Report current media status.
3520 */
3521 void
3522 bge_ifmedia_sts(ifp, ifmr)
3523 struct ifnet *ifp;
3524 struct ifmediareq *ifmr;
3525 {
3526 struct bge_softc *sc = ifp->if_softc;
3527 struct mii_data *mii = &sc->bge_mii;
3528
3529 if (sc->bge_tbi) {
3530 ifmr->ifm_status = IFM_AVALID;
3531 ifmr->ifm_active = IFM_ETHER;
3532 if (CSR_READ_4(sc, BGE_MAC_STS) &
3533 BGE_MACSTAT_TBI_PCS_SYNCHED)
3534 ifmr->ifm_status |= IFM_ACTIVE;
3535 ifmr->ifm_active |= IFM_1000_SX;
3536 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3537 ifmr->ifm_active |= IFM_HDX;
3538 else
3539 ifmr->ifm_active |= IFM_FDX;
3540 return;
3541 }
3542
3543 mii_pollstat(mii);
3544 ifmr->ifm_status = mii->mii_media_status;
3545 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3546 sc->bge_flowflags;
3547 }
3548
3549 int
3550 bge_ioctl(ifp, command, data)
3551 struct ifnet *ifp;
3552 u_long command;
3553 caddr_t data;
3554 {
3555 struct bge_softc *sc = ifp->if_softc;
3556 struct ifreq *ifr = (struct ifreq *) data;
3557 int s, error = 0;
3558 struct mii_data *mii;
3559
3560 s = splnet();
3561
3562 switch(command) {
3563 case SIOCSIFFLAGS:
3564 if (ifp->if_flags & IFF_UP) {
3565 /*
3566 * If only the state of the PROMISC flag changed,
3567 * then just use the 'set promisc mode' command
3568 * instead of reinitializing the entire NIC. Doing
3569 * a full re-init means reloading the firmware and
3570 * waiting for it to start up, which may take a
3571 * second or two.
3572 */
3573 if (ifp->if_flags & IFF_RUNNING &&
3574 ifp->if_flags & IFF_PROMISC &&
3575 !(sc->bge_if_flags & IFF_PROMISC)) {
3576 BGE_SETBIT(sc, BGE_RX_MODE,
3577 BGE_RXMODE_RX_PROMISC);
3578 } else if (ifp->if_flags & IFF_RUNNING &&
3579 !(ifp->if_flags & IFF_PROMISC) &&
3580 sc->bge_if_flags & IFF_PROMISC) {
3581 BGE_CLRBIT(sc, BGE_RX_MODE,
3582 BGE_RXMODE_RX_PROMISC);
3583 } else
3584 bge_init(ifp);
3585 } else {
3586 if (ifp->if_flags & IFF_RUNNING) {
3587 bge_stop(sc);
3588 }
3589 }
3590 sc->bge_if_flags = ifp->if_flags;
3591 error = 0;
3592 break;
3593 case SIOCSIFMEDIA:
3594 /* XXX Flow control is not supported for 1000BASE-SX */
3595 if (sc->bge_tbi) {
3596 ifr->ifr_media &= ~IFM_ETH_FMASK;
3597 sc->bge_flowflags = 0;
3598 }
3599
3600 /* Flow control requires full-duplex mode. */
3601 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3602 (ifr->ifr_media & IFM_FDX) == 0) {
3603 ifr->ifr_media &= ~IFM_ETH_FMASK;
3604 }
3605 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3606 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3607 /* We an do both TXPAUSE and RXPAUSE. */
3608 ifr->ifr_media |=
3609 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3610 }
3611 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3612 }
3613 /* FALLTHROUGH */
3614 case SIOCGIFMEDIA:
3615 if (sc->bge_tbi) {
3616 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3617 command);
3618 } else {
3619 mii = &sc->bge_mii;
3620 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3621 command);
3622 }
3623 break;
3624 default:
3625 error = ether_ioctl(ifp, command, data);
3626 if (error == ENETRESET) {
3627 bge_setmulti(sc);
3628 error = 0;
3629 }
3630 break;
3631 }
3632
3633 splx(s);
3634
3635 return(error);
3636 }
3637
3638 void
3639 bge_watchdog(ifp)
3640 struct ifnet *ifp;
3641 {
3642 struct bge_softc *sc;
3643
3644 sc = ifp->if_softc;
3645
3646 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3647
3648 ifp->if_flags &= ~IFF_RUNNING;
3649 bge_init(ifp);
3650
3651 ifp->if_oerrors++;
3652 }
3653
3654 static void
3655 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
3656 {
3657 int i;
3658
3659 BGE_CLRBIT(sc, reg, bit);
3660
3661 for (i = 0; i < BGE_TIMEOUT; i++) {
3662 if ((CSR_READ_4(sc, reg) & bit) == 0)
3663 return;
3664 delay(100);
3665 }
3666
3667 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3668 sc->bge_dev.dv_xname, (u_long) reg, bit);
3669 }
3670
3671 /*
3672 * Stop the adapter and free any mbufs allocated to the
3673 * RX and TX lists.
3674 */
3675 void
3676 bge_stop(sc)
3677 struct bge_softc *sc;
3678 {
3679 struct ifnet *ifp = &sc->ethercom.ec_if;
3680
3681 callout_stop(&sc->bge_timeout);
3682
3683 /*
3684 * Disable all of the receiver blocks
3685 */
3686 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3687 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3688 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3689 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3690 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3691 }
3692 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3693 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3694 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3695
3696 /*
3697 * Disable all of the transmit blocks
3698 */
3699 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3700 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3701 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3702 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3703 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3704 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3705 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3706 }
3707 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3708
3709 /*
3710 * Shut down all of the memory managers and related
3711 * state machines.
3712 */
3713 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3714 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3715 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3716 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3717 }
3718
3719 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3720 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3721
3722 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3723 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3724 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3725 }
3726
3727 /* Disable host interrupts. */
3728 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3729 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3730
3731 /*
3732 * Tell firmware we're shutting down.
3733 */
3734 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3735
3736 /* Free the RX lists. */
3737 bge_free_rx_ring_std(sc);
3738
3739 /* Free jumbo RX list. */
3740 bge_free_rx_ring_jumbo(sc);
3741
3742 /* Free TX buffers. */
3743 bge_free_tx_ring(sc);
3744
3745 /*
3746 * Isolate/power down the PHY.
3747 */
3748 if (!sc->bge_tbi)
3749 mii_down(&sc->bge_mii);
3750
3751 sc->bge_link = 0;
3752
3753 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3754
3755 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3756 }
3757
3758 /*
3759 * Stop all chip I/O so that the kernel's probe routines don't
3760 * get confused by errant DMAs when rebooting.
3761 */
3762 void
3763 bge_shutdown(xsc)
3764 void *xsc;
3765 {
3766 struct bge_softc *sc = (struct bge_softc *)xsc;
3767
3768 bge_stop(sc);
3769 bge_reset(sc);
3770 }
3771
3772
3773 static int
3774 sysctl_bge_verify(SYSCTLFN_ARGS)
3775 {
3776 int error, t;
3777 struct sysctlnode node;
3778
3779 node = *rnode;
3780 t = *(int*)rnode->sysctl_data;
3781 node.sysctl_data = &t;
3782 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3783 if (error || newp == NULL)
3784 return (error);
3785
3786 #if 0
3787 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
3788 node.sysctl_num, rnode->sysctl_num));
3789 #endif
3790
3791 if (node.sysctl_num == bge_rxthresh_nodenum) {
3792 if (t < 0 || t >= NBGE_RX_THRESH)
3793 return (EINVAL);
3794 bge_update_all_threshes(t);
3795 } else
3796 return (EINVAL);
3797
3798 *(int*)rnode->sysctl_data = t;
3799
3800 return (0);
3801 }
3802
3803 /*
3804 * Set up sysctl(3) MIB, hw.bge.*.
3805 *
3806 * TBD condition SYSCTL_PERMANENT on being an LKM or not
3807 */
3808 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
3809 {
3810 int rc, bge_root_num;
3811 struct sysctlnode *node;
3812
3813 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
3814 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
3815 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
3816 goto err;
3817 }
3818
3819 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3820 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", NULL,
3821 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
3822 goto err;
3823 }
3824
3825 bge_root_num = node->sysctl_num;
3826
3827 /* BGE Rx interrupt mitigation level */
3828 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3829 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
3830 CTLTYPE_INT, "rx_lvl", NULL, sysctl_bge_verify, 0,
3831 &bge_rx_thresh_lvl,
3832 0, CTL_HW, bge_root_num, CTL_CREATE,
3833 CTL_EOL)) != 0) {
3834 goto err;
3835 }
3836
3837 bge_rxthresh_nodenum = node->sysctl_num;
3838
3839 return;
3840
3841 err:
3842 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
3843 }
3844