if_bge.c revision 1.124 1 /* $NetBSD: if_bge.c,v 1.124 2007/02/17 19:47:06 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.124 2007/02/17 19:47:06 bouyer Exp $");
83
84 #include "bpfilter.h"
85 #include "vlan.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/device.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
97
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_media.h>
101 #include <net/if_ether.h>
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #endif
109
110 /* Headers for TCP Segmentation Offload (TSO) */
111 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
112 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
113 #include <netinet/ip.h> /* for struct ip */
114 #include <netinet/tcp.h> /* for struct tcphdr */
115
116
117 #if NBPFILTER > 0
118 #include <net/bpf.h>
119 #endif
120
121 #include <dev/pci/pcireg.h>
122 #include <dev/pci/pcivar.h>
123 #include <dev/pci/pcidevs.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/miidevs.h>
128 #include <dev/mii/brgphyreg.h>
129
130 #include <dev/pci/if_bgereg.h>
131
132 #include <uvm/uvm_extern.h>
133
134 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
135
136
137 /*
138 * Tunable thresholds for rx-side bge interrupt mitigation.
139 */
140
141 /*
142 * The pairs of values below were obtained from empirical measurement
143 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
144 * interrupt for every N packets received, where N is, approximately,
145 * the second value (rx_max_bds) in each pair. The values are chosen
146 * such that moving from one pair to the succeeding pair was observed
147 * to roughly halve interrupt rate under sustained input packet load.
148 * The values were empirically chosen to avoid overflowing internal
149 * limits on the bcm5700: inreasing rx_ticks much beyond 600
150 * results in internal wrapping and higher interrupt rates.
151 * The limit of 46 frames was chosen to match NFS workloads.
152 *
153 * These values also work well on bcm5701, bcm5704C, and (less
154 * tested) bcm5703. On other chipsets, (including the Altima chip
155 * family), the larger values may overflow internal chip limits,
156 * leading to increasing interrupt rates rather than lower interrupt
157 * rates.
158 *
159 * Applications using heavy interrupt mitigation (interrupting every
160 * 32 or 46 frames) in both directions may need to increase the TCP
161 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
162 * full link bandwidth, due to ACKs and window updates lingering
163 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
164 */
165 static const struct bge_load_rx_thresh {
166 int rx_ticks;
167 int rx_max_bds; }
168 bge_rx_threshes[] = {
169 { 32, 2 },
170 { 50, 4 },
171 { 100, 8 },
172 { 192, 16 },
173 { 416, 32 },
174 { 598, 46 }
175 };
176 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
177
178 /* XXX patchable; should be sysctl'able */
179 static int bge_auto_thresh = 1;
180 static int bge_rx_thresh_lvl;
181
182 static int bge_rxthresh_nodenum;
183
184 static int bge_probe(device_t, cfdata_t, void *);
185 static void bge_attach(device_t, device_t, void *);
186 static void bge_powerhook(int, void *);
187 static void bge_release_resources(struct bge_softc *);
188 static void bge_txeof(struct bge_softc *);
189 static void bge_rxeof(struct bge_softc *);
190
191 static void bge_tick(void *);
192 static void bge_stats_update(struct bge_softc *);
193 static int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
194
195 static int bge_intr(void *);
196 static void bge_start(struct ifnet *);
197 static int bge_ioctl(struct ifnet *, u_long, caddr_t);
198 static int bge_init(struct ifnet *);
199 static void bge_stop(struct bge_softc *);
200 static void bge_watchdog(struct ifnet *);
201 static void bge_shutdown(void *);
202 static int bge_ifmedia_upd(struct ifnet *);
203 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
204
205 static void bge_setmulti(struct bge_softc *);
206
207 static void bge_handle_events(struct bge_softc *);
208 static int bge_alloc_jumbo_mem(struct bge_softc *);
209 #if 0 /* XXX */
210 static void bge_free_jumbo_mem(struct bge_softc *);
211 #endif
212 static void *bge_jalloc(struct bge_softc *);
213 static void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
214 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
215 bus_dmamap_t);
216 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
217 static int bge_init_rx_ring_std(struct bge_softc *);
218 static void bge_free_rx_ring_std(struct bge_softc *);
219 static int bge_init_rx_ring_jumbo(struct bge_softc *);
220 static void bge_free_rx_ring_jumbo(struct bge_softc *);
221 static void bge_free_tx_ring(struct bge_softc *);
222 static int bge_init_tx_ring(struct bge_softc *);
223
224 static int bge_chipinit(struct bge_softc *);
225 static int bge_blockinit(struct bge_softc *);
226 static int bge_setpowerstate(struct bge_softc *, int);
227
228 static void bge_reset(struct bge_softc *);
229
230 #define BGE_DEBUG
231 #ifdef BGE_DEBUG
232 #define DPRINTF(x) if (bgedebug) printf x
233 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
234 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
235 int bgedebug = 0;
236 int bge_tso_debug = 0;
237 #else
238 #define DPRINTF(x)
239 #define DPRINTFN(n,x)
240 #define BGE_TSO_PRINTF(x)
241 #endif
242
243 #ifdef BGE_EVENT_COUNTERS
244 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
245 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
246 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
247 #else
248 #define BGE_EVCNT_INCR(ev) /* nothing */
249 #define BGE_EVCNT_ADD(ev, val) /* nothing */
250 #define BGE_EVCNT_UPD(ev, val) /* nothing */
251 #endif
252
253 /* Various chip quirks. */
254 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001
255 #define BGE_QUIRK_CSUM_BROKEN 0x00000002
256 #define BGE_QUIRK_ONLY_PHY_1 0x00000004
257 #define BGE_QUIRK_5700_SMALLDMA 0x00000008
258 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010
259 #define BGE_QUIRK_PRODUCER_BUG 0x00000020
260 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040
261 #define BGE_QUIRK_5705_CORE 0x00000080
262 #define BGE_QUIRK_FEWER_MBUFS 0x00000100
263
264 /*
265 * XXX: how to handle variants based on 5750 and derivatives:
266 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which
267 * in general behave like a 5705, except with additional quirks.
268 * This driver's current handling of the 5721 is wrong;
269 * how we map ASIC revision to "quirks" needs more thought.
270 * (defined here until the thought is done).
271 */
272 #define BGE_IS_5714_FAMILY(sc) \
273 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
274 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \
275 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 )
276
277 #define BGE_IS_5750_OR_BEYOND(sc) \
278 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \
279 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \
280 BGE_IS_5714_FAMILY(sc) )
281
282 #define BGE_IS_5705_OR_BEYOND(sc) \
283 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \
284 BGE_IS_5750_OR_BEYOND(sc) )
285
286
287 /* following bugs are common to bcm5700 rev B, all flavours */
288 #define BGE_QUIRK_5700_COMMON \
289 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
290
291 CFATTACH_DECL(bge, sizeof(struct bge_softc),
292 bge_probe, bge_attach, NULL, NULL);
293
294 static u_int32_t
295 bge_readmem_ind(struct bge_softc *sc, int off)
296 {
297 struct pci_attach_args *pa = &(sc->bge_pa);
298 pcireg_t val;
299
300 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
301 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
302 return val;
303 }
304
305 static void
306 bge_writemem_ind(struct bge_softc *sc, int off, int val)
307 {
308 struct pci_attach_args *pa = &(sc->bge_pa);
309
310 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
311 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
312 }
313
314 #ifdef notdef
315 static u_int32_t
316 bge_readreg_ind(struct bge_softc *sc, int off)
317 {
318 struct pci_attach_args *pa = &(sc->bge_pa);
319
320 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
321 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
322 }
323 #endif
324
325 static void
326 bge_writereg_ind(struct bge_softc *sc, int off, int val)
327 {
328 struct pci_attach_args *pa = &(sc->bge_pa);
329
330 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
332 }
333
334 #ifdef notdef
335 static u_int8_t
336 bge_vpd_readbyte(struct bge_softc *sc, int addr)
337 {
338 int i;
339 u_int32_t val;
340 struct pci_attach_args *pa = &(sc->bge_pa);
341
342 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
343 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
344 DELAY(10);
345 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
346 BGE_VPD_FLAG)
347 break;
348 }
349
350 if (i == BGE_TIMEOUT) {
351 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
352 return(0);
353 }
354
355 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
356
357 return((val >> ((addr % 4) * 8)) & 0xFF);
358 }
359
360 static void
361 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr)
362 {
363 int i;
364 u_int8_t *ptr;
365
366 ptr = (u_int8_t *)res;
367 for (i = 0; i < sizeof(struct vpd_res); i++)
368 ptr[i] = bge_vpd_readbyte(sc, i + addr);
369 }
370
371 static void
372 bge_vpd_read(struct bge_softc *sc)
373 {
374 int pos = 0, i;
375 struct vpd_res res;
376
377 if (sc->bge_vpd_prodname != NULL)
378 free(sc->bge_vpd_prodname, M_DEVBUF);
379 if (sc->bge_vpd_readonly != NULL)
380 free(sc->bge_vpd_readonly, M_DEVBUF);
381 sc->bge_vpd_prodname = NULL;
382 sc->bge_vpd_readonly = NULL;
383
384 bge_vpd_read_res(sc, &res, pos);
385
386 if (res.vr_id != VPD_RES_ID) {
387 printf("%s: bad VPD resource id: expected %x got %x\n",
388 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
389 return;
390 }
391
392 pos += sizeof(res);
393 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
394 if (sc->bge_vpd_prodname == NULL)
395 panic("bge_vpd_read");
396 for (i = 0; i < res.vr_len; i++)
397 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
398 sc->bge_vpd_prodname[i] = '\0';
399 pos += i;
400
401 bge_vpd_read_res(sc, &res, pos);
402
403 if (res.vr_id != VPD_RES_READ) {
404 printf("%s: bad VPD resource id: expected %x got %x\n",
405 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
406 return;
407 }
408
409 pos += sizeof(res);
410 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
411 if (sc->bge_vpd_readonly == NULL)
412 panic("bge_vpd_read");
413 for (i = 0; i < res.vr_len + 1; i++)
414 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
415 }
416 #endif
417
418 /*
419 * Read a byte of data stored in the EEPROM at address 'addr.' The
420 * BCM570x supports both the traditional bitbang interface and an
421 * auto access interface for reading the EEPROM. We use the auto
422 * access method.
423 */
424 static u_int8_t
425 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
426 {
427 int i;
428 u_int32_t byte = 0;
429
430 /*
431 * Enable use of auto EEPROM access so we can avoid
432 * having to use the bitbang method.
433 */
434 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
435
436 /* Reset the EEPROM, load the clock period. */
437 CSR_WRITE_4(sc, BGE_EE_ADDR,
438 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
439 DELAY(20);
440
441 /* Issue the read EEPROM command. */
442 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
443
444 /* Wait for completion */
445 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
446 DELAY(10);
447 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
448 break;
449 }
450
451 if (i == BGE_TIMEOUT) {
452 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
453 return(0);
454 }
455
456 /* Get result. */
457 byte = CSR_READ_4(sc, BGE_EE_DATA);
458
459 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
460
461 return(0);
462 }
463
464 /*
465 * Read a sequence of bytes from the EEPROM.
466 */
467 static int
468 bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt)
469 {
470 int err = 0, i;
471 u_int8_t byte = 0;
472
473 for (i = 0; i < cnt; i++) {
474 err = bge_eeprom_getbyte(sc, off + i, &byte);
475 if (err)
476 break;
477 *(dest + i) = byte;
478 }
479
480 return(err ? 1 : 0);
481 }
482
483 static int
484 bge_miibus_readreg(device_t dev, int phy, int reg)
485 {
486 struct bge_softc *sc = (struct bge_softc *)dev;
487 u_int32_t val;
488 u_int32_t saved_autopoll;
489 int i;
490
491 /*
492 * Several chips with builtin PHYs will incorrectly answer to
493 * other PHY instances than the builtin PHY at id 1.
494 */
495 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
496 return(0);
497
498 /* Reading with autopolling on may trigger PCI errors */
499 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
500 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
501 CSR_WRITE_4(sc, BGE_MI_MODE,
502 saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
503 DELAY(40);
504 }
505
506 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
507 BGE_MIPHY(phy)|BGE_MIREG(reg));
508
509 for (i = 0; i < BGE_TIMEOUT; i++) {
510 val = CSR_READ_4(sc, BGE_MI_COMM);
511 if (!(val & BGE_MICOMM_BUSY))
512 break;
513 delay(10);
514 }
515
516 if (i == BGE_TIMEOUT) {
517 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
518 val = 0;
519 goto done;
520 }
521
522 val = CSR_READ_4(sc, BGE_MI_COMM);
523
524 done:
525 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
526 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
527 DELAY(40);
528 }
529
530 if (val & BGE_MICOMM_READFAIL)
531 return(0);
532
533 return(val & 0xFFFF);
534 }
535
536 static void
537 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
538 {
539 struct bge_softc *sc = (struct bge_softc *)dev;
540 u_int32_t saved_autopoll;
541 int i;
542
543 /* Touching the PHY while autopolling is on may trigger PCI errors */
544 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
545 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
546 delay(40);
547 CSR_WRITE_4(sc, BGE_MI_MODE,
548 saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
549 delay(10); /* 40 usec is supposed to be adequate */
550 }
551
552 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
553 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
554
555 for (i = 0; i < BGE_TIMEOUT; i++) {
556 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
557 break;
558 delay(10);
559 }
560
561 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
562 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
563 delay(40);
564 }
565
566 if (i == BGE_TIMEOUT) {
567 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
568 }
569 }
570
571 static void
572 bge_miibus_statchg(device_t dev)
573 {
574 struct bge_softc *sc = (struct bge_softc *)dev;
575 struct mii_data *mii = &sc->bge_mii;
576
577 /*
578 * Get flow control negotiation result.
579 */
580 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
581 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
582 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
583 mii->mii_media_active &= ~IFM_ETH_FMASK;
584 }
585
586 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
587 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
588 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
589 } else {
590 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
591 }
592
593 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
594 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
595 } else {
596 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
597 }
598
599 /*
600 * 802.3x flow control
601 */
602 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) {
603 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
604 } else {
605 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
606 }
607 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) {
608 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
609 } else {
610 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
611 }
612 }
613
614 /*
615 * Update rx threshold levels to values in a particular slot
616 * of the interrupt-mitigation table bge_rx_threshes.
617 */
618 static void
619 bge_set_thresh(struct ifnet *ifp, int lvl)
620 {
621 struct bge_softc *sc = ifp->if_softc;
622 int s;
623
624 /* For now, just save the new Rx-intr thresholds and record
625 * that a threshold update is pending. Updating the hardware
626 * registers here (even at splhigh()) is observed to
627 * occasionaly cause glitches where Rx-interrupts are not
628 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
629 */
630 s = splnet();
631 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
632 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
633 sc->bge_pending_rxintr_change = 1;
634 splx(s);
635
636 return;
637 }
638
639
640 /*
641 * Update Rx thresholds of all bge devices
642 */
643 static void
644 bge_update_all_threshes(int lvl)
645 {
646 struct ifnet *ifp;
647 const char * const namebuf = "bge";
648 int namelen;
649
650 if (lvl < 0)
651 lvl = 0;
652 else if( lvl >= NBGE_RX_THRESH)
653 lvl = NBGE_RX_THRESH - 1;
654
655 namelen = strlen(namebuf);
656 /*
657 * Now search all the interfaces for this name/number
658 */
659 IFNET_FOREACH(ifp) {
660 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
661 continue;
662 /* We got a match: update if doing auto-threshold-tuning */
663 if (bge_auto_thresh)
664 bge_set_thresh(ifp, lvl);
665 }
666 }
667
668 /*
669 * Handle events that have triggered interrupts.
670 */
671 static void
672 bge_handle_events(struct bge_softc *sc)
673 {
674
675 return;
676 }
677
678 /*
679 * Memory management for jumbo frames.
680 */
681
682 static int
683 bge_alloc_jumbo_mem(struct bge_softc *sc)
684 {
685 caddr_t ptr, kva;
686 bus_dma_segment_t seg;
687 int i, rseg, state, error;
688 struct bge_jpool_entry *entry;
689
690 state = error = 0;
691
692 /* Grab a big chunk o' storage. */
693 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
694 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
695 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
696 return ENOBUFS;
697 }
698
699 state = 1;
700 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
701 BUS_DMA_NOWAIT)) {
702 printf("%s: can't map DMA buffers (%d bytes)\n",
703 sc->bge_dev.dv_xname, (int)BGE_JMEM);
704 error = ENOBUFS;
705 goto out;
706 }
707
708 state = 2;
709 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
710 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
711 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname);
712 error = ENOBUFS;
713 goto out;
714 }
715
716 state = 3;
717 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
718 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
719 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname);
720 error = ENOBUFS;
721 goto out;
722 }
723
724 state = 4;
725 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
726 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
727
728 SLIST_INIT(&sc->bge_jfree_listhead);
729 SLIST_INIT(&sc->bge_jinuse_listhead);
730
731 /*
732 * Now divide it up into 9K pieces and save the addresses
733 * in an array.
734 */
735 ptr = sc->bge_cdata.bge_jumbo_buf;
736 for (i = 0; i < BGE_JSLOTS; i++) {
737 sc->bge_cdata.bge_jslots[i] = ptr;
738 ptr += BGE_JLEN;
739 entry = malloc(sizeof(struct bge_jpool_entry),
740 M_DEVBUF, M_NOWAIT);
741 if (entry == NULL) {
742 printf("%s: no memory for jumbo buffer queue!\n",
743 sc->bge_dev.dv_xname);
744 error = ENOBUFS;
745 goto out;
746 }
747 entry->slot = i;
748 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
749 entry, jpool_entries);
750 }
751 out:
752 if (error != 0) {
753 switch (state) {
754 case 4:
755 bus_dmamap_unload(sc->bge_dmatag,
756 sc->bge_cdata.bge_rx_jumbo_map);
757 case 3:
758 bus_dmamap_destroy(sc->bge_dmatag,
759 sc->bge_cdata.bge_rx_jumbo_map);
760 case 2:
761 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
762 case 1:
763 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
764 break;
765 default:
766 break;
767 }
768 }
769
770 return error;
771 }
772
773 /*
774 * Allocate a jumbo buffer.
775 */
776 static void *
777 bge_jalloc(struct bge_softc *sc)
778 {
779 struct bge_jpool_entry *entry;
780
781 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
782
783 if (entry == NULL) {
784 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
785 return(NULL);
786 }
787
788 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
789 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
790 return(sc->bge_cdata.bge_jslots[entry->slot]);
791 }
792
793 /*
794 * Release a jumbo buffer.
795 */
796 static void
797 bge_jfree(struct mbuf *m, caddr_t buf, size_t size, void *arg)
798 {
799 struct bge_jpool_entry *entry;
800 struct bge_softc *sc;
801 int i, s;
802
803 /* Extract the softc struct pointer. */
804 sc = (struct bge_softc *)arg;
805
806 if (sc == NULL)
807 panic("bge_jfree: can't find softc pointer!");
808
809 /* calculate the slot this buffer belongs to */
810
811 i = ((caddr_t)buf
812 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
813
814 if ((i < 0) || (i >= BGE_JSLOTS))
815 panic("bge_jfree: asked to free buffer that we don't manage!");
816
817 s = splvm();
818 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
819 if (entry == NULL)
820 panic("bge_jfree: buffer not in use!");
821 entry->slot = i;
822 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
823 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
824
825 if (__predict_true(m != NULL))
826 pool_cache_put(&mbpool_cache, m);
827 splx(s);
828 }
829
830
831 /*
832 * Intialize a standard receive ring descriptor.
833 */
834 static int
835 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap)
836 {
837 struct mbuf *m_new = NULL;
838 struct bge_rx_bd *r;
839 int error;
840
841 if (dmamap == NULL) {
842 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
843 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
844 if (error != 0)
845 return error;
846 }
847
848 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
849
850 if (m == NULL) {
851 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
852 if (m_new == NULL) {
853 return(ENOBUFS);
854 }
855
856 MCLGET(m_new, M_DONTWAIT);
857 if (!(m_new->m_flags & M_EXT)) {
858 m_freem(m_new);
859 return(ENOBUFS);
860 }
861 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
862
863 } else {
864 m_new = m;
865 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
866 m_new->m_data = m_new->m_ext.ext_buf;
867 if (!sc->bge_rx_alignment_bug)
868 m_adj(m_new, ETHER_ALIGN);
869 }
870 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
871 BUS_DMA_READ|BUS_DMA_NOWAIT))
872 return(ENOBUFS);
873 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, MCLBYTES,
874 BUS_DMASYNC_PREREAD);
875 /*
876 * as m_adj() change the len of the chain it's easier to
877 * dmamap_load it before
878 */
879 if (!sc->bge_rx_alignment_bug)
880 m_adj(m_new, ETHER_ALIGN);
881
882 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
883 r = &sc->bge_rdata->bge_rx_std_ring[i];
884 bge_set_hostaddr(&r->bge_addr,
885 dmamap->dm_segs[0].ds_addr);
886 r->bge_flags = BGE_RXBDFLAG_END;
887 r->bge_len = m_new->m_len;
888 r->bge_idx = i;
889
890 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
891 offsetof(struct bge_ring_data, bge_rx_std_ring) +
892 i * sizeof (struct bge_rx_bd),
893 sizeof (struct bge_rx_bd),
894 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
895
896 return(0);
897 }
898
899 /*
900 * Initialize a jumbo receive ring descriptor. This allocates
901 * a jumbo buffer from the pool managed internally by the driver.
902 */
903 static int
904 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
905 {
906 struct mbuf *m_new = NULL;
907 struct bge_rx_bd *r;
908 caddr_t buf = NULL;
909
910 if (m == NULL) {
911
912 /* Allocate the mbuf. */
913 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
914 if (m_new == NULL) {
915 return(ENOBUFS);
916 }
917
918 /* Allocate the jumbo buffer */
919 buf = bge_jalloc(sc);
920 if (buf == NULL) {
921 m_freem(m_new);
922 printf("%s: jumbo allocation failed "
923 "-- packet dropped!\n", sc->bge_dev.dv_xname);
924 return(ENOBUFS);
925 }
926
927 /* Attach the buffer to the mbuf. */
928 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
929 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
930 bge_jfree, sc);
931 m_new->m_flags |= M_EXT_RW;
932 } else {
933 m_new = m;
934 buf = m_new->m_data = m_new->m_ext.ext_buf;
935 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
936 }
937 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
938 buf - sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
939 BUS_DMASYNC_PREREAD);
940
941 if (!sc->bge_rx_alignment_bug)
942 m_adj(m_new, ETHER_ALIGN);
943 /* Set up the descriptor. */
944 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
945 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
946 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
947 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
948 r->bge_len = m_new->m_len;
949 r->bge_idx = i;
950
951 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
952 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
953 i * sizeof (struct bge_rx_bd),
954 sizeof (struct bge_rx_bd),
955 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
956
957 return(0);
958 }
959
960 /*
961 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
962 * that's 1MB or memory, which is a lot. For now, we fill only the first
963 * 256 ring entries and hope that our CPU is fast enough to keep up with
964 * the NIC.
965 */
966 static int
967 bge_init_rx_ring_std(struct bge_softc *sc)
968 {
969 int i;
970
971 if (sc->bge_flags & BGE_RXRING_VALID)
972 return 0;
973
974 for (i = 0; i < BGE_SSLOTS; i++) {
975 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
976 return(ENOBUFS);
977 }
978
979 sc->bge_std = i - 1;
980 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
981
982 sc->bge_flags |= BGE_RXRING_VALID;
983
984 return(0);
985 }
986
987 static void
988 bge_free_rx_ring_std(struct bge_softc *sc)
989 {
990 int i;
991
992 if (!(sc->bge_flags & BGE_RXRING_VALID))
993 return;
994
995 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
996 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
997 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
998 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
999 bus_dmamap_destroy(sc->bge_dmatag,
1000 sc->bge_cdata.bge_rx_std_map[i]);
1001 }
1002 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1003 sizeof(struct bge_rx_bd));
1004 }
1005
1006 sc->bge_flags &= ~BGE_RXRING_VALID;
1007 }
1008
1009 static int
1010 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1011 {
1012 int i;
1013 volatile struct bge_rcb *rcb;
1014
1015 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1016 return 0;
1017
1018 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1019 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1020 return(ENOBUFS);
1021 };
1022
1023 sc->bge_jumbo = i - 1;
1024 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1025
1026 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1027 rcb->bge_maxlen_flags = 0;
1028 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1029
1030 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1031
1032 return(0);
1033 }
1034
1035 static void
1036 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1037 {
1038 int i;
1039
1040 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1041 return;
1042
1043 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1044 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1045 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1046 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1047 }
1048 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1049 sizeof(struct bge_rx_bd));
1050 }
1051
1052 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1053 }
1054
1055 static void
1056 bge_free_tx_ring(struct bge_softc *sc)
1057 {
1058 int i, freed;
1059 struct txdmamap_pool_entry *dma;
1060
1061 if (!(sc->bge_flags & BGE_TXRING_VALID))
1062 return;
1063
1064 freed = 0;
1065
1066 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1067 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1068 freed++;
1069 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1070 sc->bge_cdata.bge_tx_chain[i] = NULL;
1071 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1072 link);
1073 sc->txdma[i] = 0;
1074 }
1075 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1076 sizeof(struct bge_tx_bd));
1077 }
1078
1079 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1080 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1081 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1082 free(dma, M_DEVBUF);
1083 }
1084
1085 sc->bge_flags &= ~BGE_TXRING_VALID;
1086 }
1087
1088 static int
1089 bge_init_tx_ring(struct bge_softc *sc)
1090 {
1091 int i;
1092 bus_dmamap_t dmamap;
1093 struct txdmamap_pool_entry *dma;
1094
1095 if (sc->bge_flags & BGE_TXRING_VALID)
1096 return 0;
1097
1098 sc->bge_txcnt = 0;
1099 sc->bge_tx_saved_considx = 0;
1100
1101 /* Initialize transmit producer index for host-memory send ring. */
1102 sc->bge_tx_prodidx = 0;
1103 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1104 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1105 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1106
1107 /* NIC-memory send ring not used; initialize to zero. */
1108 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1109 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1110 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1111
1112 SLIST_INIT(&sc->txdma_list);
1113 for (i = 0; i < BGE_RSLOTS; i++) {
1114 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1115 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1116 &dmamap))
1117 return(ENOBUFS);
1118 if (dmamap == NULL)
1119 panic("dmamap NULL in bge_init_tx_ring");
1120 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1121 if (dma == NULL) {
1122 printf("%s: can't alloc txdmamap_pool_entry\n",
1123 sc->bge_dev.dv_xname);
1124 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1125 return (ENOMEM);
1126 }
1127 dma->dmamap = dmamap;
1128 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1129 }
1130
1131 sc->bge_flags |= BGE_TXRING_VALID;
1132
1133 return(0);
1134 }
1135
1136 static void
1137 bge_setmulti(struct bge_softc *sc)
1138 {
1139 struct ethercom *ac = &sc->ethercom;
1140 struct ifnet *ifp = &ac->ec_if;
1141 struct ether_multi *enm;
1142 struct ether_multistep step;
1143 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1144 u_int32_t h;
1145 int i;
1146
1147 if (ifp->if_flags & IFF_PROMISC)
1148 goto allmulti;
1149
1150 /* Now program new ones. */
1151 ETHER_FIRST_MULTI(step, ac, enm);
1152 while (enm != NULL) {
1153 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1154 /*
1155 * We must listen to a range of multicast addresses.
1156 * For now, just accept all multicasts, rather than
1157 * trying to set only those filter bits needed to match
1158 * the range. (At this time, the only use of address
1159 * ranges is for IP multicast routing, for which the
1160 * range is big enough to require all bits set.)
1161 */
1162 goto allmulti;
1163 }
1164
1165 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1166
1167 /* Just want the 7 least-significant bits. */
1168 h &= 0x7f;
1169
1170 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1171 ETHER_NEXT_MULTI(step, enm);
1172 }
1173
1174 ifp->if_flags &= ~IFF_ALLMULTI;
1175 goto setit;
1176
1177 allmulti:
1178 ifp->if_flags |= IFF_ALLMULTI;
1179 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1180
1181 setit:
1182 for (i = 0; i < 4; i++)
1183 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1184 }
1185
1186 const int bge_swapbits[] = {
1187 0,
1188 BGE_MODECTL_BYTESWAP_DATA,
1189 BGE_MODECTL_WORDSWAP_DATA,
1190 BGE_MODECTL_BYTESWAP_NONFRAME,
1191 BGE_MODECTL_WORDSWAP_NONFRAME,
1192
1193 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1194 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1195 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1196
1197 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1198 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1199
1200 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1201
1202 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1203 BGE_MODECTL_BYTESWAP_NONFRAME,
1204 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1205 BGE_MODECTL_WORDSWAP_NONFRAME,
1206 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1207 BGE_MODECTL_WORDSWAP_NONFRAME,
1208 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1209 BGE_MODECTL_WORDSWAP_NONFRAME,
1210
1211 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1212 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1213 };
1214
1215 int bge_swapindex = 0;
1216
1217 /*
1218 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1219 * self-test results.
1220 */
1221 static int
1222 bge_chipinit(struct bge_softc *sc)
1223 {
1224 u_int32_t cachesize;
1225 int i;
1226 u_int32_t dma_rw_ctl;
1227 struct pci_attach_args *pa = &(sc->bge_pa);
1228
1229
1230 /* Set endianness before we access any non-PCI registers. */
1231 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1232 BGE_INIT);
1233
1234 /* Set power state to D0. */
1235 bge_setpowerstate(sc, 0);
1236
1237 /*
1238 * Check the 'ROM failed' bit on the RX CPU to see if
1239 * self-tests passed.
1240 */
1241 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1242 printf("%s: RX CPU self-diagnostics failed!\n",
1243 sc->bge_dev.dv_xname);
1244 return(ENODEV);
1245 }
1246
1247 /* Clear the MAC control register */
1248 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1249
1250 /*
1251 * Clear the MAC statistics block in the NIC's
1252 * internal memory.
1253 */
1254 for (i = BGE_STATS_BLOCK;
1255 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1256 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1257
1258 for (i = BGE_STATUS_BLOCK;
1259 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1260 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1261
1262 /* Set up the PCI DMA control register. */
1263 if (sc->bge_pcie) {
1264 u_int32_t device_ctl;
1265
1266 /* From FreeBSD */
1267 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
1268 sc->bge_dev.dv_xname));
1269 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1270 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1271 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1272
1273 /* jonathan: alternative from Linux driver */
1274 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000
1275 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000
1276
1277 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */;
1278 device_ctl = pci_conf_read(pa->pa_pc, pa->pa_tag,
1279 BGE_PCI_CONF_DEV_CTRL);
1280 aprint_debug("%s: pcie mode=0x%x\n", sc->bge_dev.dv_xname,
1281 device_ctl);
1282
1283 if ((device_ctl & 0x00e0) && 0) {
1284 /*
1285 * XXX jonathan (at) NetBSD.org:
1286 * This clause is exactly what the Broadcom-supplied
1287 * Linux does; but given overall register programming
1288 * by if_bge(4), this larger DMA-write watermark
1289 * value causes bcm5721 chips to totally wedge.
1290 */
1291 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256;
1292 } else {
1293 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128;
1294 }
1295 } else if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
1296 BGE_PCISTATE_PCI_BUSMODE) {
1297 /* Conventional PCI bus */
1298 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname));
1299 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1300 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1301 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1302 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1303 dma_rw_ctl |= 0x0F;
1304 }
1305 } else {
1306 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname));
1307 /* PCI-X bus */
1308 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1309 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1310 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1311 (0x0F);
1312 /*
1313 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1314 * for hardware bugs, which means we should also clear
1315 * the low-order MINDMA bits. In addition, the 5704
1316 * uses a different encoding of read/write watermarks.
1317 */
1318 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1319 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1320 /* should be 0x1f0000 */
1321 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1322 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1323 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1324 }
1325 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
1326 dma_rw_ctl &= 0xfffffff0;
1327 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1328 }
1329 else if (BGE_IS_5714_FAMILY(sc)) {
1330 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1331 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1332 /* XXX magic values, Broadcom-supplied Linux driver */
1333 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1334 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1335 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1336 else
1337 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15);
1338 }
1339 }
1340
1341 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1342
1343 /*
1344 * Set up general mode register.
1345 */
1346 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1347 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1348 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1349
1350 /* Get cache line size. */
1351 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1352
1353 /*
1354 * Avoid violating PCI spec on certain chip revs.
1355 */
1356 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1357 PCIM_CMD_MWIEN) {
1358 switch(cachesize) {
1359 case 1:
1360 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1361 BGE_PCI_WRITE_BNDRY_16BYTES);
1362 break;
1363 case 2:
1364 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1365 BGE_PCI_WRITE_BNDRY_32BYTES);
1366 break;
1367 case 4:
1368 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1369 BGE_PCI_WRITE_BNDRY_64BYTES);
1370 break;
1371 case 8:
1372 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1373 BGE_PCI_WRITE_BNDRY_128BYTES);
1374 break;
1375 case 16:
1376 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1377 BGE_PCI_WRITE_BNDRY_256BYTES);
1378 break;
1379 case 32:
1380 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1381 BGE_PCI_WRITE_BNDRY_512BYTES);
1382 break;
1383 case 64:
1384 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1385 BGE_PCI_WRITE_BNDRY_1024BYTES);
1386 break;
1387 default:
1388 /* Disable PCI memory write and invalidate. */
1389 #if 0
1390 if (bootverbose)
1391 printf("%s: cache line size %d not "
1392 "supported; disabling PCI MWI\n",
1393 sc->bge_dev.dv_xname, cachesize);
1394 #endif
1395 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1396 PCIM_CMD_MWIEN);
1397 break;
1398 }
1399 }
1400
1401 /*
1402 * Disable memory write invalidate. Apparently it is not supported
1403 * properly by these devices.
1404 */
1405 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
1406
1407
1408 #ifdef __brokenalpha__
1409 /*
1410 * Must insure that we do not cross an 8K (bytes) boundary
1411 * for DMA reads. Our highest limit is 1K bytes. This is a
1412 * restriction on some ALPHA platforms with early revision
1413 * 21174 PCI chipsets, such as the AlphaPC 164lx
1414 */
1415 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1416 #endif
1417
1418 /* Set the timer prescaler (always 66MHz) */
1419 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1420
1421 return(0);
1422 }
1423
1424 static int
1425 bge_blockinit(struct bge_softc *sc)
1426 {
1427 volatile struct bge_rcb *rcb;
1428 bus_size_t rcb_addr;
1429 int i;
1430 struct ifnet *ifp = &sc->ethercom.ec_if;
1431 bge_hostaddr taddr;
1432
1433 /*
1434 * Initialize the memory window pointer register so that
1435 * we can access the first 32K of internal NIC RAM. This will
1436 * allow us to set up the TX send ring RCBs and the RX return
1437 * ring RCBs, plus other things which live in NIC memory.
1438 */
1439
1440 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1441 BGE_PCI_MEMWIN_BASEADDR, 0);
1442
1443 /* Configure mbuf memory pool */
1444 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1445 if (sc->bge_extram) {
1446 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1447 BGE_EXT_SSRAM);
1448 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1449 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1450 else
1451 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1452 } else {
1453 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1454 BGE_BUFFPOOL_1);
1455 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1456 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1457 else
1458 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1459 }
1460
1461 /* Configure DMA resource pool */
1462 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1463 BGE_DMA_DESCRIPTORS);
1464 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1465 }
1466
1467 /* Configure mbuf pool watermarks */
1468 #ifdef ORIG_WPAUL_VALUES
1469 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1470 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1471 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1472 #else
1473 /* new broadcom docs strongly recommend these: */
1474 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1475 if (ifp->if_mtu > ETHER_MAX_LEN) {
1476 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1477 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1478 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1479 } else {
1480 /* Values from Linux driver... */
1481 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304);
1482 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152);
1483 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380);
1484 }
1485 } else {
1486 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1487 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1488 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1489 }
1490 #endif
1491
1492 /* Configure DMA resource watermarks */
1493 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1494 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1495
1496 /* Enable buffer manager */
1497 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1498 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1499
1500 /* Poll for buffer manager start indication */
1501 for (i = 0; i < BGE_TIMEOUT; i++) {
1502 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1503 break;
1504 DELAY(10);
1505 }
1506
1507 if (i == BGE_TIMEOUT) {
1508 printf("%s: buffer manager failed to start\n",
1509 sc->bge_dev.dv_xname);
1510 return(ENXIO);
1511 }
1512
1513 /* Enable flow-through queues */
1514 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1515 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1516
1517 /* Wait until queue initialization is complete */
1518 for (i = 0; i < BGE_TIMEOUT; i++) {
1519 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1520 break;
1521 DELAY(10);
1522 }
1523
1524 if (i == BGE_TIMEOUT) {
1525 printf("%s: flow-through queue init failed\n",
1526 sc->bge_dev.dv_xname);
1527 return(ENXIO);
1528 }
1529
1530 /* Initialize the standard RX ring control block */
1531 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1532 bge_set_hostaddr(&rcb->bge_hostaddr,
1533 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1534 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1535 rcb->bge_maxlen_flags =
1536 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1537 } else {
1538 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1539 }
1540 if (sc->bge_extram)
1541 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1542 else
1543 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1544 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1545 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1546 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1547 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1548
1549 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1550 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1551 } else {
1552 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1553 }
1554
1555 /*
1556 * Initialize the jumbo RX ring control block
1557 * We set the 'ring disabled' bit in the flags
1558 * field until we're actually ready to start
1559 * using this ring (i.e. once we set the MTU
1560 * high enough to require it).
1561 */
1562 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1563 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1564 bge_set_hostaddr(&rcb->bge_hostaddr,
1565 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1566 rcb->bge_maxlen_flags =
1567 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1568 BGE_RCB_FLAG_RING_DISABLED);
1569 if (sc->bge_extram)
1570 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1571 else
1572 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1573
1574 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1575 rcb->bge_hostaddr.bge_addr_hi);
1576 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1577 rcb->bge_hostaddr.bge_addr_lo);
1578 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1579 rcb->bge_maxlen_flags);
1580 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1581
1582 /* Set up dummy disabled mini ring RCB */
1583 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1584 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1585 BGE_RCB_FLAG_RING_DISABLED);
1586 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1587 rcb->bge_maxlen_flags);
1588
1589 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1590 offsetof(struct bge_ring_data, bge_info),
1591 sizeof (struct bge_gib),
1592 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1593 }
1594
1595 /*
1596 * Set the BD ring replentish thresholds. The recommended
1597 * values are 1/8th the number of descriptors allocated to
1598 * each ring.
1599 */
1600 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1601 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1602
1603 /*
1604 * Disable all unused send rings by setting the 'ring disabled'
1605 * bit in the flags field of all the TX send ring control blocks.
1606 * These are located in NIC memory.
1607 */
1608 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1609 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1610 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1611 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1612 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1613 rcb_addr += sizeof(struct bge_rcb);
1614 }
1615
1616 /* Configure TX RCB 0 (we use only the first ring) */
1617 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1618 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1619 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1620 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1621 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1622 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1623 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1624 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1625 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1626 }
1627
1628 /* Disable all unused RX return rings */
1629 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1630 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1631 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1632 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1633 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1634 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1635 BGE_RCB_FLAG_RING_DISABLED));
1636 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1637 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1638 (i * (sizeof(u_int64_t))), 0);
1639 rcb_addr += sizeof(struct bge_rcb);
1640 }
1641
1642 /* Initialize RX ring indexes */
1643 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1644 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1645 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1646
1647 /*
1648 * Set up RX return ring 0
1649 * Note that the NIC address for RX return rings is 0x00000000.
1650 * The return rings live entirely within the host, so the
1651 * nicaddr field in the RCB isn't used.
1652 */
1653 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1654 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1655 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1656 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1657 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1658 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1659 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1660
1661 /* Set random backoff seed for TX */
1662 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1663 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1664 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1665 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1666 BGE_TX_BACKOFF_SEED_MASK);
1667
1668 /* Set inter-packet gap */
1669 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1670
1671 /*
1672 * Specify which ring to use for packets that don't match
1673 * any RX rules.
1674 */
1675 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1676
1677 /*
1678 * Configure number of RX lists. One interrupt distribution
1679 * list, sixteen active lists, one bad frames class.
1680 */
1681 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1682
1683 /* Inialize RX list placement stats mask. */
1684 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1685 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1686
1687 /* Disable host coalescing until we get it set up */
1688 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1689
1690 /* Poll to make sure it's shut down. */
1691 for (i = 0; i < BGE_TIMEOUT; i++) {
1692 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1693 break;
1694 DELAY(10);
1695 }
1696
1697 if (i == BGE_TIMEOUT) {
1698 printf("%s: host coalescing engine failed to idle\n",
1699 sc->bge_dev.dv_xname);
1700 return(ENXIO);
1701 }
1702
1703 /* Set up host coalescing defaults */
1704 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1705 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1706 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1707 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1708 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1709 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1710 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1711 }
1712 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1713 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1714
1715 /* Set up address of statistics block */
1716 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1717 bge_set_hostaddr(&taddr,
1718 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1719 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1720 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1721 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1722 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1723 }
1724
1725 /* Set up address of status block */
1726 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1727 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1728 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1729 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1730 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1731 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1732
1733 /* Turn on host coalescing state machine */
1734 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1735
1736 /* Turn on RX BD completion state machine and enable attentions */
1737 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1738 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1739
1740 /* Turn on RX list placement state machine */
1741 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1742
1743 /* Turn on RX list selector state machine. */
1744 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1745 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1746 }
1747
1748 /* Turn on DMA, clear stats */
1749 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1750 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1751 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1752 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1753 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1754
1755 /* Set misc. local control, enable interrupts on attentions */
1756 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1757
1758 #ifdef notdef
1759 /* Assert GPIO pins for PHY reset */
1760 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1761 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1762 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1763 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1764 #endif
1765
1766 #if defined(not_quite_yet)
1767 /* Linux driver enables enable gpio pin #1 on 5700s */
1768 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
1769 sc->bge_local_ctrl_reg |=
1770 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
1771 }
1772 #endif
1773 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1774
1775 /* Turn on DMA completion state machine */
1776 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1777 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1778 }
1779
1780 /* Turn on write DMA state machine */
1781 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1782 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1783
1784 /* Turn on read DMA state machine */
1785 {
1786 uint32_t dma_read_modebits;
1787
1788 dma_read_modebits =
1789 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1790
1791 if (sc->bge_pcie && 0) {
1792 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST;
1793 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) {
1794 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128;
1795 }
1796
1797 /* XXX broadcom-supplied linux driver; undocumented */
1798 if (BGE_IS_5750_OR_BEYOND(sc)) {
1799 /*
1800 * XXX: magic values.
1801 * From Broadcom-supplied Linux driver; apparently
1802 * required to workaround a DMA bug affecting TSO
1803 * on bcm575x/bcm5721?
1804 */
1805 dma_read_modebits |= (1 << 27);
1806 }
1807 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits);
1808 }
1809
1810 /* Turn on RX data completion state machine */
1811 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1812
1813 /* Turn on RX BD initiator state machine */
1814 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1815
1816 /* Turn on RX data and RX BD initiator state machine */
1817 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1818
1819 /* Turn on Mbuf cluster free state machine */
1820 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1821 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1822 }
1823
1824 /* Turn on send BD completion state machine */
1825 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1826
1827 /* Turn on send data completion state machine */
1828 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1829
1830 /* Turn on send data initiator state machine */
1831 if (BGE_IS_5750_OR_BEYOND(sc)) {
1832 /* XXX: magic value from Linux driver */
1833 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1834 } else {
1835 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1836 }
1837
1838 /* Turn on send BD initiator state machine */
1839 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1840
1841 /* Turn on send BD selector state machine */
1842 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1843
1844 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1845 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1846 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1847
1848 /* ack/clear link change events */
1849 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1850 BGE_MACSTAT_CFG_CHANGED);
1851 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1852
1853 /* Enable PHY auto polling (for MII/GMII only) */
1854 if (sc->bge_tbi) {
1855 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1856 } else {
1857 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1858 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1859 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1860 BGE_EVTENB_MI_INTERRUPT);
1861 }
1862
1863 /* Enable link state change attentions. */
1864 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1865
1866 return(0);
1867 }
1868
1869 static const struct bge_revision {
1870 uint32_t br_chipid;
1871 uint32_t br_quirks;
1872 const char *br_name;
1873 } bge_revisions[] = {
1874 { BGE_CHIPID_BCM5700_A0,
1875 BGE_QUIRK_LINK_STATE_BROKEN,
1876 "BCM5700 A0" },
1877
1878 { BGE_CHIPID_BCM5700_A1,
1879 BGE_QUIRK_LINK_STATE_BROKEN,
1880 "BCM5700 A1" },
1881
1882 { BGE_CHIPID_BCM5700_B0,
1883 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
1884 "BCM5700 B0" },
1885
1886 { BGE_CHIPID_BCM5700_B1,
1887 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1888 "BCM5700 B1" },
1889
1890 { BGE_CHIPID_BCM5700_B2,
1891 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1892 "BCM5700 B2" },
1893
1894 { BGE_CHIPID_BCM5700_B3,
1895 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1896 "BCM5700 B3" },
1897
1898 /* This is treated like a BCM5700 Bx */
1899 { BGE_CHIPID_BCM5700_ALTIMA,
1900 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1901 "BCM5700 Altima" },
1902
1903 { BGE_CHIPID_BCM5700_C0,
1904 0,
1905 "BCM5700 C0" },
1906
1907 { BGE_CHIPID_BCM5701_A0,
1908 0, /*XXX really, just not known */
1909 "BCM5701 A0" },
1910
1911 { BGE_CHIPID_BCM5701_B0,
1912 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1913 "BCM5701 B0" },
1914
1915 { BGE_CHIPID_BCM5701_B2,
1916 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1917 "BCM5701 B2" },
1918
1919 { BGE_CHIPID_BCM5701_B5,
1920 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1921 "BCM5701 B5" },
1922
1923 { BGE_CHIPID_BCM5703_A0,
1924 0,
1925 "BCM5703 A0" },
1926
1927 { BGE_CHIPID_BCM5703_A1,
1928 0,
1929 "BCM5703 A1" },
1930
1931 { BGE_CHIPID_BCM5703_A2,
1932 BGE_QUIRK_ONLY_PHY_1,
1933 "BCM5703 A2" },
1934
1935 { BGE_CHIPID_BCM5703_A3,
1936 BGE_QUIRK_ONLY_PHY_1,
1937 "BCM5703 A3" },
1938
1939 { BGE_CHIPID_BCM5703_B0,
1940 BGE_QUIRK_ONLY_PHY_1,
1941 "BCM5703 B0" },
1942
1943 { BGE_CHIPID_BCM5704_A0,
1944 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1945 "BCM5704 A0" },
1946
1947 { BGE_CHIPID_BCM5704_A1,
1948 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1949 "BCM5704 A1" },
1950
1951 { BGE_CHIPID_BCM5704_A2,
1952 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1953 "BCM5704 A2" },
1954
1955 { BGE_CHIPID_BCM5704_A3,
1956 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1957 "BCM5704 A3" },
1958
1959 { BGE_CHIPID_BCM5705_A0,
1960 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1961 "BCM5705 A0" },
1962
1963 { BGE_CHIPID_BCM5705_A1,
1964 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1965 "BCM5705 A1" },
1966
1967 { BGE_CHIPID_BCM5705_A2,
1968 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1969 "BCM5705 A2" },
1970
1971 { BGE_CHIPID_BCM5705_A3,
1972 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1973 "BCM5705 A3" },
1974
1975 { BGE_CHIPID_BCM5750_A0,
1976 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1977 "BCM5750 A0" },
1978
1979 { BGE_CHIPID_BCM5750_A1,
1980 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1981 "BCM5750 A1" },
1982
1983 { BGE_CHIPID_BCM5751_A1,
1984 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1985 "BCM5751 A1" },
1986
1987 { BGE_CHIPID_BCM5752_A0,
1988 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1989 "BCM5752 A0" },
1990
1991 { BGE_CHIPID_BCM5752_A1,
1992 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1993 "BCM5752 A1" },
1994
1995 { BGE_CHIPID_BCM5752_A2,
1996 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1997 "BCM5752 A2" },
1998
1999 { 0, 0, NULL }
2000 };
2001
2002 /*
2003 * Some defaults for major revisions, so that newer steppings
2004 * that we don't know about have a shot at working.
2005 */
2006 static const struct bge_revision bge_majorrevs[] = {
2007 { BGE_ASICREV_BCM5700,
2008 BGE_QUIRK_LINK_STATE_BROKEN,
2009 "unknown BCM5700" },
2010
2011 { BGE_ASICREV_BCM5701,
2012 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
2013 "unknown BCM5701" },
2014
2015 { BGE_ASICREV_BCM5703,
2016 0,
2017 "unknown BCM5703" },
2018
2019 { BGE_ASICREV_BCM5704,
2020 BGE_QUIRK_ONLY_PHY_1,
2021 "unknown BCM5704" },
2022
2023 { BGE_ASICREV_BCM5705,
2024 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2025 "unknown BCM5705" },
2026
2027 { BGE_ASICREV_BCM5750,
2028 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2029 "unknown BCM575x family" },
2030
2031 { BGE_ASICREV_BCM5714_A0,
2032 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2033 "unknown BCM5714" },
2034
2035 { BGE_ASICREV_BCM5714,
2036 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2037 "unknown BCM5714" },
2038
2039 { BGE_ASICREV_BCM5752,
2040 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2041 "unknown BCM5752 family" },
2042
2043
2044 { BGE_ASICREV_BCM5780,
2045 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2046 "unknown BCM5780" },
2047
2048 { 0,
2049 0,
2050 NULL }
2051 };
2052
2053
2054 static const struct bge_revision *
2055 bge_lookup_rev(uint32_t chipid)
2056 {
2057 const struct bge_revision *br;
2058
2059 for (br = bge_revisions; br->br_name != NULL; br++) {
2060 if (br->br_chipid == chipid)
2061 return (br);
2062 }
2063
2064 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2065 if (br->br_chipid == BGE_ASICREV(chipid))
2066 return (br);
2067 }
2068
2069 return (NULL);
2070 }
2071
2072 static const struct bge_product {
2073 pci_vendor_id_t bp_vendor;
2074 pci_product_id_t bp_product;
2075 const char *bp_name;
2076 } bge_products[] = {
2077 /*
2078 * The BCM5700 documentation seems to indicate that the hardware
2079 * still has the Alteon vendor ID burned into it, though it
2080 * should always be overridden by the value in the EEPROM. We'll
2081 * check for it anyway.
2082 */
2083 { PCI_VENDOR_ALTEON,
2084 PCI_PRODUCT_ALTEON_BCM5700,
2085 "Broadcom BCM5700 Gigabit Ethernet",
2086 },
2087 { PCI_VENDOR_ALTEON,
2088 PCI_PRODUCT_ALTEON_BCM5701,
2089 "Broadcom BCM5701 Gigabit Ethernet",
2090 },
2091
2092 { PCI_VENDOR_ALTIMA,
2093 PCI_PRODUCT_ALTIMA_AC1000,
2094 "Altima AC1000 Gigabit Ethernet",
2095 },
2096 { PCI_VENDOR_ALTIMA,
2097 PCI_PRODUCT_ALTIMA_AC1001,
2098 "Altima AC1001 Gigabit Ethernet",
2099 },
2100 { PCI_VENDOR_ALTIMA,
2101 PCI_PRODUCT_ALTIMA_AC9100,
2102 "Altima AC9100 Gigabit Ethernet",
2103 },
2104
2105 { PCI_VENDOR_BROADCOM,
2106 PCI_PRODUCT_BROADCOM_BCM5700,
2107 "Broadcom BCM5700 Gigabit Ethernet",
2108 },
2109 { PCI_VENDOR_BROADCOM,
2110 PCI_PRODUCT_BROADCOM_BCM5701,
2111 "Broadcom BCM5701 Gigabit Ethernet",
2112 },
2113 { PCI_VENDOR_BROADCOM,
2114 PCI_PRODUCT_BROADCOM_BCM5702,
2115 "Broadcom BCM5702 Gigabit Ethernet",
2116 },
2117 { PCI_VENDOR_BROADCOM,
2118 PCI_PRODUCT_BROADCOM_BCM5702X,
2119 "Broadcom BCM5702X Gigabit Ethernet" },
2120
2121 { PCI_VENDOR_BROADCOM,
2122 PCI_PRODUCT_BROADCOM_BCM5703,
2123 "Broadcom BCM5703 Gigabit Ethernet",
2124 },
2125 { PCI_VENDOR_BROADCOM,
2126 PCI_PRODUCT_BROADCOM_BCM5703X,
2127 "Broadcom BCM5703X Gigabit Ethernet",
2128 },
2129 { PCI_VENDOR_BROADCOM,
2130 PCI_PRODUCT_BROADCOM_BCM5703_ALT,
2131 "Broadcom BCM5703 Gigabit Ethernet",
2132 },
2133
2134 { PCI_VENDOR_BROADCOM,
2135 PCI_PRODUCT_BROADCOM_BCM5704C,
2136 "Broadcom BCM5704C Dual Gigabit Ethernet",
2137 },
2138 { PCI_VENDOR_BROADCOM,
2139 PCI_PRODUCT_BROADCOM_BCM5704S,
2140 "Broadcom BCM5704S Dual Gigabit Ethernet",
2141 },
2142
2143 { PCI_VENDOR_BROADCOM,
2144 PCI_PRODUCT_BROADCOM_BCM5705,
2145 "Broadcom BCM5705 Gigabit Ethernet",
2146 },
2147 { PCI_VENDOR_BROADCOM,
2148 PCI_PRODUCT_BROADCOM_BCM5705K,
2149 "Broadcom BCM5705K Gigabit Ethernet",
2150 },
2151 { PCI_VENDOR_BROADCOM,
2152 PCI_PRODUCT_BROADCOM_BCM5705M,
2153 "Broadcom BCM5705M Gigabit Ethernet",
2154 },
2155 { PCI_VENDOR_BROADCOM,
2156 PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
2157 "Broadcom BCM5705M Gigabit Ethernet",
2158 },
2159
2160 { PCI_VENDOR_BROADCOM,
2161 PCI_PRODUCT_BROADCOM_BCM5714,
2162 "Broadcom BCM5714/5715 Gigabit Ethernet",
2163 },
2164 { PCI_VENDOR_BROADCOM,
2165 PCI_PRODUCT_BROADCOM_BCM5789,
2166 "Broadcom BCM5789 Gigabit Ethernet",
2167 },
2168
2169 { PCI_VENDOR_BROADCOM,
2170 PCI_PRODUCT_BROADCOM_BCM5721,
2171 "Broadcom BCM5721 Gigabit Ethernet",
2172 },
2173
2174 { PCI_VENDOR_BROADCOM,
2175 PCI_PRODUCT_BROADCOM_BCM5750,
2176 "Broadcom BCM5750 Gigabit Ethernet",
2177 },
2178
2179 { PCI_VENDOR_BROADCOM,
2180 PCI_PRODUCT_BROADCOM_BCM5750M,
2181 "Broadcom BCM5750M Gigabit Ethernet",
2182 },
2183
2184 { PCI_VENDOR_BROADCOM,
2185 PCI_PRODUCT_BROADCOM_BCM5751,
2186 "Broadcom BCM5751 Gigabit Ethernet",
2187 },
2188
2189 { PCI_VENDOR_BROADCOM,
2190 PCI_PRODUCT_BROADCOM_BCM5751M,
2191 "Broadcom BCM5751M Gigabit Ethernet",
2192 },
2193
2194 { PCI_VENDOR_BROADCOM,
2195 PCI_PRODUCT_BROADCOM_BCM5752,
2196 "Broadcom BCM5752 Gigabit Ethernet",
2197 },
2198
2199 { PCI_VENDOR_BROADCOM,
2200 PCI_PRODUCT_BROADCOM_BCM5752M,
2201 "Broadcom BCM5752M Gigabit Ethernet",
2202 },
2203
2204 { PCI_VENDOR_BROADCOM,
2205 PCI_PRODUCT_BROADCOM_BCM5780,
2206 "Broadcom BCM5780 Gigabit Ethernet",
2207 },
2208
2209 { PCI_VENDOR_BROADCOM,
2210 PCI_PRODUCT_BROADCOM_BCM5780S,
2211 "Broadcom BCM5780S Gigabit Ethernet",
2212 },
2213
2214 { PCI_VENDOR_BROADCOM,
2215 PCI_PRODUCT_BROADCOM_BCM5782,
2216 "Broadcom BCM5782 Gigabit Ethernet",
2217 },
2218
2219 { PCI_VENDOR_BROADCOM,
2220 PCI_PRODUCT_BROADCOM_BCM5788,
2221 "Broadcom BCM5788 Gigabit Ethernet",
2222 },
2223 { PCI_VENDOR_BROADCOM,
2224 PCI_PRODUCT_BROADCOM_BCM5789,
2225 "Broadcom BCM5789 Gigabit Ethernet",
2226 },
2227
2228 { PCI_VENDOR_BROADCOM,
2229 PCI_PRODUCT_BROADCOM_BCM5901,
2230 "Broadcom BCM5901 Fast Ethernet",
2231 },
2232 { PCI_VENDOR_BROADCOM,
2233 PCI_PRODUCT_BROADCOM_BCM5901A2,
2234 "Broadcom BCM5901A2 Fast Ethernet",
2235 },
2236
2237 { PCI_VENDOR_SCHNEIDERKOCH,
2238 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
2239 "SysKonnect SK-9Dx1 Gigabit Ethernet",
2240 },
2241
2242 { PCI_VENDOR_3COM,
2243 PCI_PRODUCT_3COM_3C996,
2244 "3Com 3c996 Gigabit Ethernet",
2245 },
2246
2247 { 0,
2248 0,
2249 NULL },
2250 };
2251
2252 static const struct bge_product *
2253 bge_lookup(const struct pci_attach_args *pa)
2254 {
2255 const struct bge_product *bp;
2256
2257 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2258 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2259 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2260 return (bp);
2261 }
2262
2263 return (NULL);
2264 }
2265
2266 static int
2267 bge_setpowerstate(struct bge_softc *sc, int powerlevel)
2268 {
2269 #ifdef NOTYET
2270 u_int32_t pm_ctl = 0;
2271
2272 /* XXX FIXME: make sure indirect accesses enabled? */
2273 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2274 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2275 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2276
2277 /* clear the PME_assert bit and power state bits, enable PME */
2278 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2279 pm_ctl &= ~PCIM_PSTAT_DMASK;
2280 pm_ctl |= (1 << 8);
2281
2282 if (powerlevel == 0) {
2283 pm_ctl |= PCIM_PSTAT_D0;
2284 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2285 pm_ctl, 2);
2286 DELAY(10000);
2287 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2288 DELAY(10000);
2289
2290 #ifdef NOTYET
2291 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2292 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2293 #endif
2294 DELAY(40); DELAY(40); DELAY(40);
2295 DELAY(10000); /* above not quite adequate on 5700 */
2296 return 0;
2297 }
2298
2299
2300 /*
2301 * Entering ACPI power states D1-D3 is achieved by wiggling
2302 * GMII gpio pins. Example code assumes all hardware vendors
2303 * followed Broadom's sample pcb layout. Until we verify that
2304 * for all supported OEM cards, states D1-D3 are unsupported.
2305 */
2306 printf("%s: power state %d unimplemented; check GPIO pins\n",
2307 sc->bge_dev.dv_xname, powerlevel);
2308 #endif
2309 return EOPNOTSUPP;
2310 }
2311
2312
2313 /*
2314 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2315 * against our list and return its name if we find a match. Note
2316 * that since the Broadcom controller contains VPD support, we
2317 * can get the device name string from the controller itself instead
2318 * of the compiled-in string. This is a little slow, but it guarantees
2319 * we'll always announce the right product name.
2320 */
2321 static int
2322 bge_probe(device_t parent, cfdata_t match, void *aux)
2323 {
2324 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2325
2326 if (bge_lookup(pa) != NULL)
2327 return (1);
2328
2329 return (0);
2330 }
2331
2332 static void
2333 bge_attach(device_t parent, device_t self, void *aux)
2334 {
2335 struct bge_softc *sc = (struct bge_softc *)self;
2336 struct pci_attach_args *pa = aux;
2337 const struct bge_product *bp;
2338 const struct bge_revision *br;
2339 pci_chipset_tag_t pc = pa->pa_pc;
2340 pci_intr_handle_t ih;
2341 const char *intrstr = NULL;
2342 bus_dma_segment_t seg;
2343 int rseg;
2344 u_int32_t hwcfg = 0;
2345 u_int32_t mac_addr = 0;
2346 u_int32_t command;
2347 struct ifnet *ifp;
2348 caddr_t kva;
2349 u_char eaddr[ETHER_ADDR_LEN];
2350 pcireg_t memtype;
2351 bus_addr_t memaddr;
2352 bus_size_t memsize;
2353 u_int32_t pm_ctl;
2354
2355 bp = bge_lookup(pa);
2356 KASSERT(bp != NULL);
2357
2358 sc->bge_pa = *pa;
2359
2360 aprint_naive(": Ethernet controller\n");
2361 aprint_normal(": %s\n", bp->bp_name);
2362
2363 /*
2364 * Map control/status registers.
2365 */
2366 DPRINTFN(5, ("Map control/status regs\n"));
2367 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2368 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2369 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
2370 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2371
2372 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2373 aprint_error("%s: failed to enable memory mapping!\n",
2374 sc->bge_dev.dv_xname);
2375 return;
2376 }
2377
2378 DPRINTFN(5, ("pci_mem_find\n"));
2379 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2380 switch (memtype) {
2381 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2382 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2383 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2384 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2385 &memaddr, &memsize) == 0)
2386 break;
2387 default:
2388 aprint_error("%s: can't find mem space\n",
2389 sc->bge_dev.dv_xname);
2390 return;
2391 }
2392
2393 DPRINTFN(5, ("pci_intr_map\n"));
2394 if (pci_intr_map(pa, &ih)) {
2395 aprint_error("%s: couldn't map interrupt\n",
2396 sc->bge_dev.dv_xname);
2397 return;
2398 }
2399
2400 DPRINTFN(5, ("pci_intr_string\n"));
2401 intrstr = pci_intr_string(pc, ih);
2402
2403 DPRINTFN(5, ("pci_intr_establish\n"));
2404 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2405
2406 if (sc->bge_intrhand == NULL) {
2407 aprint_error("%s: couldn't establish interrupt",
2408 sc->bge_dev.dv_xname);
2409 if (intrstr != NULL)
2410 aprint_normal(" at %s", intrstr);
2411 aprint_normal("\n");
2412 return;
2413 }
2414 aprint_normal("%s: interrupting at %s\n",
2415 sc->bge_dev.dv_xname, intrstr);
2416
2417 /*
2418 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2419 * can clobber the chip's PCI config-space power control registers,
2420 * leaving the card in D3 powersave state.
2421 * We do not have memory-mapped registers in this state,
2422 * so force device into D0 state before starting initialization.
2423 */
2424 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2425 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2426 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2427 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2428 DELAY(1000); /* 27 usec is allegedly sufficent */
2429
2430 /*
2431 * Save ASIC rev. Look up any quirks associated with this
2432 * ASIC.
2433 */
2434 sc->bge_chipid =
2435 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
2436 BGE_PCIMISCCTL_ASICREV;
2437
2438 /*
2439 * Detect PCI-Express devices
2440 * XXX: guessed from Linux/FreeBSD; no documentation
2441 */
2442 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
2443 NULL, NULL) != 0)
2444 sc->bge_pcie = 1;
2445 else
2446 sc->bge_pcie = 0;
2447
2448 /* Try to reset the chip. */
2449 DPRINTFN(5, ("bge_reset\n"));
2450 bge_reset(sc);
2451
2452 if (bge_chipinit(sc)) {
2453 aprint_error("%s: chip initialization failed\n",
2454 sc->bge_dev.dv_xname);
2455 bge_release_resources(sc);
2456 return;
2457 }
2458
2459 /*
2460 * Get station address from the EEPROM.
2461 */
2462 mac_addr = bge_readmem_ind(sc, 0x0c14);
2463 if ((mac_addr >> 16) == 0x484b) {
2464 eaddr[0] = (u_char)(mac_addr >> 8);
2465 eaddr[1] = (u_char)(mac_addr >> 0);
2466 mac_addr = bge_readmem_ind(sc, 0x0c18);
2467 eaddr[2] = (u_char)(mac_addr >> 24);
2468 eaddr[3] = (u_char)(mac_addr >> 16);
2469 eaddr[4] = (u_char)(mac_addr >> 8);
2470 eaddr[5] = (u_char)(mac_addr >> 0);
2471 } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
2472 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2473 aprint_error("%s: failed to read station address\n",
2474 sc->bge_dev.dv_xname);
2475 bge_release_resources(sc);
2476 return;
2477 }
2478
2479 br = bge_lookup_rev(sc->bge_chipid);
2480 aprint_normal("%s: ", sc->bge_dev.dv_xname);
2481
2482 if (br == NULL) {
2483 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
2484 sc->bge_quirks = 0;
2485 } else {
2486 aprint_normal("ASIC %s (0x%04x)",
2487 br->br_name, sc->bge_chipid >> 16);
2488 sc->bge_quirks |= br->br_quirks;
2489 }
2490 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2491
2492 /* Allocate the general information block and ring buffers. */
2493 if (pci_dma64_available(pa))
2494 sc->bge_dmatag = pa->pa_dmat64;
2495 else
2496 sc->bge_dmatag = pa->pa_dmat;
2497 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2498 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2499 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2500 aprint_error("%s: can't alloc rx buffers\n",
2501 sc->bge_dev.dv_xname);
2502 return;
2503 }
2504 DPRINTFN(5, ("bus_dmamem_map\n"));
2505 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2506 sizeof(struct bge_ring_data), &kva,
2507 BUS_DMA_NOWAIT)) {
2508 aprint_error("%s: can't map DMA buffers (%d bytes)\n",
2509 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
2510 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2511 return;
2512 }
2513 DPRINTFN(5, ("bus_dmamem_create\n"));
2514 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2515 sizeof(struct bge_ring_data), 0,
2516 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2517 aprint_error("%s: can't create DMA map\n",
2518 sc->bge_dev.dv_xname);
2519 bus_dmamem_unmap(sc->bge_dmatag, kva,
2520 sizeof(struct bge_ring_data));
2521 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2522 return;
2523 }
2524 DPRINTFN(5, ("bus_dmamem_load\n"));
2525 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2526 sizeof(struct bge_ring_data), NULL,
2527 BUS_DMA_NOWAIT)) {
2528 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2529 bus_dmamem_unmap(sc->bge_dmatag, kva,
2530 sizeof(struct bge_ring_data));
2531 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2532 return;
2533 }
2534
2535 DPRINTFN(5, ("bzero\n"));
2536 sc->bge_rdata = (struct bge_ring_data *)kva;
2537
2538 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2539
2540 /* Try to allocate memory for jumbo buffers. */
2541 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2542 if (bge_alloc_jumbo_mem(sc)) {
2543 aprint_error("%s: jumbo buffer allocation failed\n",
2544 sc->bge_dev.dv_xname);
2545 } else
2546 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2547 }
2548
2549 /* Set default tuneable values. */
2550 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2551 sc->bge_rx_coal_ticks = 150;
2552 sc->bge_rx_max_coal_bds = 64;
2553 #ifdef ORIG_WPAUL_VALUES
2554 sc->bge_tx_coal_ticks = 150;
2555 sc->bge_tx_max_coal_bds = 128;
2556 #else
2557 sc->bge_tx_coal_ticks = 300;
2558 sc->bge_tx_max_coal_bds = 400;
2559 #endif
2560 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
2561 sc->bge_tx_coal_ticks = (12 * 5);
2562 sc->bge_rx_max_coal_bds = (12 * 5);
2563 aprint_verbose("%s: setting short Tx thresholds\n",
2564 sc->bge_dev.dv_xname);
2565 }
2566
2567 /* Set up ifnet structure */
2568 ifp = &sc->ethercom.ec_if;
2569 ifp->if_softc = sc;
2570 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2571 ifp->if_ioctl = bge_ioctl;
2572 ifp->if_start = bge_start;
2573 ifp->if_init = bge_init;
2574 ifp->if_watchdog = bge_watchdog;
2575 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2576 IFQ_SET_READY(&ifp->if_snd);
2577 DPRINTFN(5, ("strcpy if_xname\n"));
2578 strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
2579
2580 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
2581 sc->ethercom.ec_if.if_capabilities |=
2582 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2583 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2584 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
2585 sc->ethercom.ec_capabilities |=
2586 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2587
2588 if (sc->bge_pcie)
2589 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
2590
2591 /*
2592 * Do MII setup.
2593 */
2594 DPRINTFN(5, ("mii setup\n"));
2595 sc->bge_mii.mii_ifp = ifp;
2596 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2597 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2598 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2599
2600 /*
2601 * Figure out what sort of media we have by checking the
2602 * hardware config word in the first 32k of NIC internal memory,
2603 * or fall back to the config word in the EEPROM. Note: on some BCM5700
2604 * cards, this value appears to be unset. If that's the
2605 * case, we have to rely on identifying the NIC by its PCI
2606 * subsystem ID, as we do below for the SysKonnect SK-9D41.
2607 */
2608 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2609 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2610 } else {
2611 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2612 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2613 hwcfg = be32toh(hwcfg);
2614 }
2615 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2616 sc->bge_tbi = 1;
2617
2618 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2619 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
2620 SK_SUBSYSID_9D41)
2621 sc->bge_tbi = 1;
2622
2623 if (sc->bge_tbi) {
2624 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2625 bge_ifmedia_sts);
2626 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2627 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2628 0, NULL);
2629 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2630 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2631 } else {
2632 /*
2633 * Do transceiver setup.
2634 */
2635 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2636 bge_ifmedia_sts);
2637 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2638 MII_PHY_ANY, MII_OFFSET_ANY,
2639 MIIF_FORCEANEG|MIIF_DOPAUSE);
2640
2641 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2642 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2643 ifmedia_add(&sc->bge_mii.mii_media,
2644 IFM_ETHER|IFM_MANUAL, 0, NULL);
2645 ifmedia_set(&sc->bge_mii.mii_media,
2646 IFM_ETHER|IFM_MANUAL);
2647 } else
2648 ifmedia_set(&sc->bge_mii.mii_media,
2649 IFM_ETHER|IFM_AUTO);
2650 }
2651
2652 /*
2653 * When using the BCM5701 in PCI-X mode, data corruption has
2654 * been observed in the first few bytes of some received packets.
2655 * Aligning the packet buffer in memory eliminates the corruption.
2656 * Unfortunately, this misaligns the packet payloads. On platforms
2657 * which do not support unaligned accesses, we will realign the
2658 * payloads by copying the received packets.
2659 */
2660 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
2661 /* If in PCI-X mode, work around the alignment bug. */
2662 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2663 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2664 BGE_PCISTATE_PCI_BUSSPEED)
2665 sc->bge_rx_alignment_bug = 1;
2666 }
2667
2668 /*
2669 * Call MI attach routine.
2670 */
2671 DPRINTFN(5, ("if_attach\n"));
2672 if_attach(ifp);
2673 DPRINTFN(5, ("ether_ifattach\n"));
2674 ether_ifattach(ifp, eaddr);
2675 #ifdef BGE_EVENT_COUNTERS
2676 /*
2677 * Attach event counters.
2678 */
2679 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
2680 NULL, sc->bge_dev.dv_xname, "intr");
2681 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
2682 NULL, sc->bge_dev.dv_xname, "tx_xoff");
2683 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
2684 NULL, sc->bge_dev.dv_xname, "tx_xon");
2685 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
2686 NULL, sc->bge_dev.dv_xname, "rx_xoff");
2687 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
2688 NULL, sc->bge_dev.dv_xname, "rx_xon");
2689 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
2690 NULL, sc->bge_dev.dv_xname, "rx_macctl");
2691 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
2692 NULL, sc->bge_dev.dv_xname, "xoffentered");
2693 #endif /* BGE_EVENT_COUNTERS */
2694 DPRINTFN(5, ("callout_init\n"));
2695 callout_init(&sc->bge_timeout);
2696
2697 sc->bge_powerhook = powerhook_establish(sc->bge_dev.dv_xname,
2698 bge_powerhook, sc);
2699 if (sc->bge_powerhook == NULL)
2700 printf("%s: WARNING: unable to establish PCI power hook\n",
2701 sc->bge_dev.dv_xname);
2702 }
2703
2704 static void
2705 bge_release_resources(struct bge_softc *sc)
2706 {
2707 if (sc->bge_vpd_prodname != NULL)
2708 free(sc->bge_vpd_prodname, M_DEVBUF);
2709
2710 if (sc->bge_vpd_readonly != NULL)
2711 free(sc->bge_vpd_readonly, M_DEVBUF);
2712 }
2713
2714 static void
2715 bge_reset(struct bge_softc *sc)
2716 {
2717 struct pci_attach_args *pa = &sc->bge_pa;
2718 u_int32_t cachesize, command, pcistate, new_pcistate;
2719 int i, val;
2720
2721 /* Save some important PCI state. */
2722 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2723 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2724 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2725
2726 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2727 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2728 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2729
2730 /*
2731 * Disable the firmware fastboot feature on 5752 ASIC
2732 * to avoid firmware timeout.
2733 */
2734 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752)
2735 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2736
2737 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1);
2738 /*
2739 * XXX: from FreeBSD/Linux; no documentation
2740 */
2741 if (sc->bge_pcie) {
2742 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60)
2743 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20);
2744 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2745 /* No idea what that actually means */
2746 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2747 val |= (1<<29);
2748 }
2749 }
2750
2751 /* Issue global reset */
2752 bge_writereg_ind(sc, BGE_MISC_CFG, val);
2753
2754 DELAY(1000);
2755
2756 /*
2757 * XXX: from FreeBSD/Linux; no documentation
2758 */
2759 if (sc->bge_pcie) {
2760 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
2761 pcireg_t reg;
2762
2763 DELAY(500000);
2764 /* XXX: Magic Numbers */
2765 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0);
2766 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_UNKNOWN0,
2767 reg | (1 << 15));
2768 }
2769 /*
2770 * XXX: Magic Numbers.
2771 * Sets maximal PCI-e payload and clears any PCI-e errors.
2772 * Should be replaced with references to PCI config-space
2773 * capability block for PCI-Express.
2774 */
2775 pci_conf_write(pa->pa_pc, pa->pa_tag,
2776 BGE_PCI_CONF_DEV_CTRL, 0xf5000);
2777
2778 }
2779
2780 /* Reset some of the PCI state that got zapped by reset */
2781 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2782 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2783 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2784 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2785 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2786 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2787
2788 /* Enable memory arbiter. */
2789 {
2790 uint32_t marbmode = 0;
2791 if (BGE_IS_5714_FAMILY(sc)) {
2792 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
2793 }
2794 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
2795 }
2796
2797 /*
2798 * Prevent PXE restart: write a magic number to the
2799 * general communications memory at 0xB50.
2800 */
2801 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2802
2803 /*
2804 * Poll the value location we just wrote until
2805 * we see the 1's complement of the magic number.
2806 * This indicates that the firmware initialization
2807 * is complete.
2808 */
2809 for (i = 0; i < BGE_TIMEOUT; i++) {
2810 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2811 if (val == ~BGE_MAGIC_NUMBER)
2812 break;
2813 DELAY(1000);
2814 }
2815
2816 if (i >= BGE_TIMEOUT) {
2817 printf("%s: firmware handshake timed out, val = %x\n",
2818 sc->bge_dev.dv_xname, val);
2819 /*
2820 * XXX: occasionally fired on bcm5721, but without
2821 * apparent harm. For now, keep going if we timeout
2822 * against PCI-E devices.
2823 */
2824 if (!sc->bge_pcie)
2825 return;
2826 }
2827
2828 /*
2829 * XXX Wait for the value of the PCISTATE register to
2830 * return to its original pre-reset state. This is a
2831 * fairly good indicator of reset completion. If we don't
2832 * wait for the reset to fully complete, trying to read
2833 * from the device's non-PCI registers may yield garbage
2834 * results.
2835 */
2836 for (i = 0; i < BGE_TIMEOUT; i++) {
2837 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2838 BGE_PCI_PCISTATE);
2839 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2840 (pcistate & ~BGE_PCISTATE_RESERVED))
2841 break;
2842 DELAY(10);
2843 }
2844 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2845 (pcistate & ~BGE_PCISTATE_RESERVED)) {
2846 printf("%s: pcistate failed to revert\n",
2847 sc->bge_dev.dv_xname);
2848 }
2849
2850 /* XXX: from FreeBSD/Linux; no documentation */
2851 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0)
2852 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25));
2853
2854 /* Enable memory arbiter. */
2855 /* XXX why do this twice? */
2856 {
2857 uint32_t marbmode = 0;
2858 if (BGE_IS_5714_FAMILY(sc)) {
2859 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
2860 }
2861 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
2862 }
2863
2864 /* Fix up byte swapping */
2865 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2866
2867 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2868
2869 DELAY(10000);
2870 }
2871
2872 /*
2873 * Frame reception handling. This is called if there's a frame
2874 * on the receive return list.
2875 *
2876 * Note: we have to be able to handle two possibilities here:
2877 * 1) the frame is from the jumbo recieve ring
2878 * 2) the frame is from the standard receive ring
2879 */
2880
2881 static void
2882 bge_rxeof(struct bge_softc *sc)
2883 {
2884 struct ifnet *ifp;
2885 int stdcnt = 0, jumbocnt = 0;
2886 bus_dmamap_t dmamap;
2887 bus_addr_t offset, toff;
2888 bus_size_t tlen;
2889 int tosync;
2890
2891 ifp = &sc->ethercom.ec_if;
2892
2893 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2894 offsetof(struct bge_ring_data, bge_status_block),
2895 sizeof (struct bge_status_block),
2896 BUS_DMASYNC_POSTREAD);
2897
2898 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2899 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2900 sc->bge_rx_saved_considx;
2901
2902 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2903
2904 if (tosync < 0) {
2905 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2906 sizeof (struct bge_rx_bd);
2907 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2908 toff, tlen, BUS_DMASYNC_POSTREAD);
2909 tosync = -tosync;
2910 }
2911
2912 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2913 offset, tosync * sizeof (struct bge_rx_bd),
2914 BUS_DMASYNC_POSTREAD);
2915
2916 while(sc->bge_rx_saved_considx !=
2917 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2918 struct bge_rx_bd *cur_rx;
2919 u_int32_t rxidx;
2920 struct mbuf *m = NULL;
2921
2922 cur_rx = &sc->bge_rdata->
2923 bge_rx_return_ring[sc->bge_rx_saved_considx];
2924
2925 rxidx = cur_rx->bge_idx;
2926 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2927
2928 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2929 caddr_t buf;
2930 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2931 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2932 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2933 jumbocnt++;
2934 buf = m->m_ext.ext_buf;
2935 bus_dmamap_sync(sc->bge_dmatag,
2936 sc->bge_cdata.bge_rx_jumbo_map,
2937 buf - sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
2938 BUS_DMASYNC_POSTREAD);
2939 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2940 ifp->if_ierrors++;
2941 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2942 continue;
2943 }
2944 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2945 NULL)== ENOBUFS) {
2946 ifp->if_ierrors++;
2947 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2948 continue;
2949 }
2950 } else {
2951 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2952 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2953
2954 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2955 stdcnt++;
2956 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2957 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2958 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, MCLBYTES,
2959 BUS_DMASYNC_POSTREAD);
2960 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2961 ifp->if_ierrors++;
2962 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2963 continue;
2964 }
2965 if (bge_newbuf_std(sc, sc->bge_std,
2966 NULL, dmamap) == ENOBUFS) {
2967 ifp->if_ierrors++;
2968 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2969 continue;
2970 }
2971 }
2972
2973 ifp->if_ipackets++;
2974 #ifndef __NO_STRICT_ALIGNMENT
2975 /*
2976 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
2977 * the Rx buffer has the layer-2 header unaligned.
2978 * If our CPU requires alignment, re-align by copying.
2979 */
2980 if (sc->bge_rx_alignment_bug) {
2981 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data,
2982 cur_rx->bge_len);
2983 m->m_data += ETHER_ALIGN;
2984 }
2985 #endif
2986
2987 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2988 m->m_pkthdr.rcvif = ifp;
2989
2990 #if NBPFILTER > 0
2991 /*
2992 * Handle BPF listeners. Let the BPF user see the packet.
2993 */
2994 if (ifp->if_bpf)
2995 bpf_mtap(ifp->if_bpf, m);
2996 #endif
2997
2998 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
2999
3000 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
3001 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
3002 /*
3003 * Rx transport checksum-offload may also
3004 * have bugs with packets which, when transmitted,
3005 * were `runts' requiring padding.
3006 */
3007 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3008 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
3009 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
3010 m->m_pkthdr.csum_data =
3011 cur_rx->bge_tcp_udp_csum;
3012 m->m_pkthdr.csum_flags |=
3013 (M_CSUM_TCPv4|M_CSUM_UDPv4|
3014 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
3015 }
3016
3017 /*
3018 * If we received a packet with a vlan tag, pass it
3019 * to vlan_input() instead of ether_input().
3020 */
3021 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG)
3022 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue);
3023
3024 (*ifp->if_input)(ifp, m);
3025 }
3026
3027 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3028 if (stdcnt)
3029 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3030 if (jumbocnt)
3031 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3032 }
3033
3034 static void
3035 bge_txeof(struct bge_softc *sc)
3036 {
3037 struct bge_tx_bd *cur_tx = NULL;
3038 struct ifnet *ifp;
3039 struct txdmamap_pool_entry *dma;
3040 bus_addr_t offset, toff;
3041 bus_size_t tlen;
3042 int tosync;
3043 struct mbuf *m;
3044
3045 ifp = &sc->ethercom.ec_if;
3046
3047 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3048 offsetof(struct bge_ring_data, bge_status_block),
3049 sizeof (struct bge_status_block),
3050 BUS_DMASYNC_POSTREAD);
3051
3052 offset = offsetof(struct bge_ring_data, bge_tx_ring);
3053 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
3054 sc->bge_tx_saved_considx;
3055
3056 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
3057
3058 if (tosync < 0) {
3059 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
3060 sizeof (struct bge_tx_bd);
3061 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3062 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3063 tosync = -tosync;
3064 }
3065
3066 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3067 offset, tosync * sizeof (struct bge_tx_bd),
3068 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3069
3070 /*
3071 * Go through our tx ring and free mbufs for those
3072 * frames that have been sent.
3073 */
3074 while (sc->bge_tx_saved_considx !=
3075 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
3076 u_int32_t idx = 0;
3077
3078 idx = sc->bge_tx_saved_considx;
3079 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
3080 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3081 ifp->if_opackets++;
3082 m = sc->bge_cdata.bge_tx_chain[idx];
3083 if (m != NULL) {
3084 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3085 dma = sc->txdma[idx];
3086 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
3087 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3088 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
3089 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
3090 sc->txdma[idx] = NULL;
3091
3092 m_freem(m);
3093 }
3094 sc->bge_txcnt--;
3095 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3096 ifp->if_timer = 0;
3097 }
3098
3099 if (cur_tx != NULL)
3100 ifp->if_flags &= ~IFF_OACTIVE;
3101 }
3102
3103 static int
3104 bge_intr(void *xsc)
3105 {
3106 struct bge_softc *sc;
3107 struct ifnet *ifp;
3108
3109 sc = xsc;
3110 ifp = &sc->ethercom.ec_if;
3111
3112 #ifdef notdef
3113 /* Avoid this for now -- checking this register is expensive. */
3114 /* Make sure this is really our interrupt. */
3115 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
3116 return (0);
3117 #endif
3118 /* Ack interrupt and stop others from occuring. */
3119 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3120
3121 BGE_EVCNT_INCR(sc->bge_ev_intr);
3122
3123 /*
3124 * Process link state changes.
3125 * Grrr. The link status word in the status block does
3126 * not work correctly on the BCM5700 rev AX and BX chips,
3127 * according to all available information. Hence, we have
3128 * to enable MII interrupts in order to properly obtain
3129 * async link changes. Unfortunately, this also means that
3130 * we have to read the MAC status register to detect link
3131 * changes, thereby adding an additional register access to
3132 * the interrupt handler.
3133 */
3134
3135 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
3136 u_int32_t status;
3137
3138 status = CSR_READ_4(sc, BGE_MAC_STS);
3139 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3140 sc->bge_link = 0;
3141 callout_stop(&sc->bge_timeout);
3142 bge_tick(sc);
3143 /* Clear the interrupt */
3144 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3145 BGE_EVTENB_MI_INTERRUPT);
3146 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
3147 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
3148 BRGPHY_INTRS);
3149 }
3150 } else {
3151 if (sc->bge_rdata->bge_status_block.bge_status &
3152 BGE_STATFLAG_LINKSTATE_CHANGED) {
3153 sc->bge_link = 0;
3154 callout_stop(&sc->bge_timeout);
3155 bge_tick(sc);
3156 /* Clear the interrupt */
3157 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3158 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3159 BGE_MACSTAT_LINK_CHANGED);
3160 }
3161 }
3162
3163 if (ifp->if_flags & IFF_RUNNING) {
3164 /* Check RX return ring producer/consumer */
3165 bge_rxeof(sc);
3166
3167 /* Check TX ring producer/consumer */
3168 bge_txeof(sc);
3169 }
3170
3171 if (sc->bge_pending_rxintr_change) {
3172 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
3173 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
3174 uint32_t junk;
3175
3176 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
3177 DELAY(10);
3178 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3179
3180 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
3181 DELAY(10);
3182 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3183
3184 sc->bge_pending_rxintr_change = 0;
3185 }
3186 bge_handle_events(sc);
3187
3188 /* Re-enable interrupts. */
3189 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3190
3191 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
3192 bge_start(ifp);
3193
3194 return (1);
3195 }
3196
3197 static void
3198 bge_tick(void *xsc)
3199 {
3200 struct bge_softc *sc = xsc;
3201 struct mii_data *mii = &sc->bge_mii;
3202 struct ifmedia *ifm = NULL;
3203 struct ifnet *ifp = &sc->ethercom.ec_if;
3204 int s;
3205
3206 s = splnet();
3207
3208 bge_stats_update(sc);
3209 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3210 if (sc->bge_link) {
3211 splx(s);
3212 return;
3213 }
3214
3215 if (sc->bge_tbi) {
3216 ifm = &sc->bge_ifmedia;
3217 if (CSR_READ_4(sc, BGE_MAC_STS) &
3218 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3219 sc->bge_link++;
3220 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3221 if (!IFQ_IS_EMPTY(&ifp->if_snd))
3222 bge_start(ifp);
3223 }
3224 splx(s);
3225 return;
3226 }
3227
3228 mii_tick(mii);
3229
3230 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3231 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3232 sc->bge_link++;
3233 if (!IFQ_IS_EMPTY(&ifp->if_snd))
3234 bge_start(ifp);
3235 }
3236
3237 splx(s);
3238 }
3239
3240 static void
3241 bge_stats_update(struct bge_softc *sc)
3242 {
3243 struct ifnet *ifp = &sc->ethercom.ec_if;
3244 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3245 bus_size_t rstats = BGE_RX_STATS;
3246
3247 #define READ_RSTAT(sc, stats, stat) \
3248 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
3249
3250 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
3251 ifp->if_collisions +=
3252 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
3253 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
3254 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
3255 READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
3256
3257 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff,
3258 READ_RSTAT(sc, rstats, outXoffSent));
3259 BGE_EVCNT_ADD(sc->bge_ev_tx_xon,
3260 READ_RSTAT(sc, rstats, outXonSent));
3261 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff,
3262 READ_RSTAT(sc, rstats, xoffPauseFramesReceived));
3263 BGE_EVCNT_ADD(sc->bge_ev_rx_xon,
3264 READ_RSTAT(sc, rstats, xonPauseFramesReceived));
3265 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl,
3266 READ_RSTAT(sc, rstats, macControlFramesReceived));
3267 BGE_EVCNT_ADD(sc->bge_ev_xoffentered,
3268 READ_RSTAT(sc, rstats, xoffStateEntered));
3269 return;
3270 }
3271
3272 #undef READ_RSTAT
3273 #define READ_STAT(sc, stats, stat) \
3274 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3275
3276 ifp->if_collisions +=
3277 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3278 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3279 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3280 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3281 ifp->if_collisions;
3282
3283 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
3284 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
3285 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
3286 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
3287 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
3288 READ_STAT(sc, stats,
3289 xoffPauseFramesReceived.bge_addr_lo));
3290 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
3291 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
3292 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
3293 READ_STAT(sc, stats,
3294 macControlFramesReceived.bge_addr_lo));
3295 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
3296 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
3297
3298 #undef READ_STAT
3299
3300 #ifdef notdef
3301 ifp->if_collisions +=
3302 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3303 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3304 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3305 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3306 ifp->if_collisions;
3307 #endif
3308 }
3309
3310 /*
3311 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3312 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3313 * but when such padded frames employ the bge IP/TCP checksum offload,
3314 * the hardware checksum assist gives incorrect results (possibly
3315 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3316 * If we pad such runts with zeros, the onboard checksum comes out correct.
3317 */
3318 static inline int
3319 bge_cksum_pad(struct mbuf *pkt)
3320 {
3321 struct mbuf *last = NULL;
3322 int padlen;
3323
3324 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3325
3326 /* if there's only the packet-header and we can pad there, use it. */
3327 if (pkt->m_pkthdr.len == pkt->m_len &&
3328 M_TRAILINGSPACE(pkt) >= padlen) {
3329 last = pkt;
3330 } else {
3331 /*
3332 * Walk packet chain to find last mbuf. We will either
3333 * pad there, or append a new mbuf and pad it
3334 * (thus perhaps avoiding the bcm5700 dma-min bug).
3335 */
3336 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3337 continue; /* do nothing */
3338 }
3339
3340 /* `last' now points to last in chain. */
3341 if (M_TRAILINGSPACE(last) < padlen) {
3342 /* Allocate new empty mbuf, pad it. Compact later. */
3343 struct mbuf *n;
3344 MGET(n, M_DONTWAIT, MT_DATA);
3345 n->m_len = 0;
3346 last->m_next = n;
3347 last = n;
3348 }
3349 }
3350
3351 KDASSERT(!M_READONLY(last));
3352 KDASSERT(M_TRAILINGSPACE(last) >= padlen);
3353
3354 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3355 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3356 last->m_len += padlen;
3357 pkt->m_pkthdr.len += padlen;
3358 return 0;
3359 }
3360
3361 /*
3362 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3363 */
3364 static inline int
3365 bge_compact_dma_runt(struct mbuf *pkt)
3366 {
3367 struct mbuf *m, *prev;
3368 int totlen, prevlen;
3369
3370 prev = NULL;
3371 totlen = 0;
3372 prevlen = -1;
3373
3374 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3375 int mlen = m->m_len;
3376 int shortfall = 8 - mlen ;
3377
3378 totlen += mlen;
3379 if (mlen == 0) {
3380 continue;
3381 }
3382 if (mlen >= 8)
3383 continue;
3384
3385 /* If we get here, mbuf data is too small for DMA engine.
3386 * Try to fix by shuffling data to prev or next in chain.
3387 * If that fails, do a compacting deep-copy of the whole chain.
3388 */
3389
3390 /* Internal frag. If fits in prev, copy it there. */
3391 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
3392 memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
3393 prev->m_len += mlen;
3394 m->m_len = 0;
3395 /* XXX stitch chain */
3396 prev->m_next = m_free(m);
3397 m = prev;
3398 continue;
3399 }
3400 else if (m->m_next != NULL &&
3401 M_TRAILINGSPACE(m) >= shortfall &&
3402 m->m_next->m_len >= (8 + shortfall)) {
3403 /* m is writable and have enough data in next, pull up. */
3404
3405 memcpy(m->m_data + m->m_len, m->m_next->m_data,
3406 shortfall);
3407 m->m_len += shortfall;
3408 m->m_next->m_len -= shortfall;
3409 m->m_next->m_data += shortfall;
3410 }
3411 else if (m->m_next == NULL || 1) {
3412 /* Got a runt at the very end of the packet.
3413 * borrow data from the tail of the preceding mbuf and
3414 * update its length in-place. (The original data is still
3415 * valid, so we can do this even if prev is not writable.)
3416 */
3417
3418 /* if we'd make prev a runt, just move all of its data. */
3419 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3420 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3421
3422 if ((prev->m_len - shortfall) < 8)
3423 shortfall = prev->m_len;
3424
3425 #ifdef notyet /* just do the safe slow thing for now */
3426 if (!M_READONLY(m)) {
3427 if (M_LEADINGSPACE(m) < shorfall) {
3428 void *m_dat;
3429 m_dat = (m->m_flags & M_PKTHDR) ?
3430 m->m_pktdat : m->dat;
3431 memmove(m_dat, mtod(m, void*), m->m_len);
3432 m->m_data = m_dat;
3433 }
3434 } else
3435 #endif /* just do the safe slow thing */
3436 {
3437 struct mbuf * n = NULL;
3438 int newprevlen = prev->m_len - shortfall;
3439
3440 MGET(n, M_NOWAIT, MT_DATA);
3441 if (n == NULL)
3442 return ENOBUFS;
3443 KASSERT(m->m_len + shortfall < MLEN
3444 /*,
3445 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3446
3447 /* first copy the data we're stealing from prev */
3448 memcpy(n->m_data, prev->m_data + newprevlen,
3449 shortfall);
3450
3451 /* update prev->m_len accordingly */
3452 prev->m_len -= shortfall;
3453
3454 /* copy data from runt m */
3455 memcpy(n->m_data + shortfall, m->m_data,
3456 m->m_len);
3457
3458 /* n holds what we stole from prev, plus m */
3459 n->m_len = shortfall + m->m_len;
3460
3461 /* stitch n into chain and free m */
3462 n->m_next = m->m_next;
3463 prev->m_next = n;
3464 /* KASSERT(m->m_next == NULL); */
3465 m->m_next = NULL;
3466 m_free(m);
3467 m = n; /* for continuing loop */
3468 }
3469 }
3470 prevlen = m->m_len;
3471 }
3472 return 0;
3473 }
3474
3475 /*
3476 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3477 * pointers to descriptors.
3478 */
3479 static int
3480 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
3481 {
3482 struct bge_tx_bd *f = NULL;
3483 u_int32_t frag, cur;
3484 u_int16_t csum_flags = 0;
3485 u_int16_t txbd_tso_flags = 0;
3486 struct txdmamap_pool_entry *dma;
3487 bus_dmamap_t dmamap;
3488 int i = 0;
3489 struct m_tag *mtag;
3490 int use_tso, maxsegsize, error;
3491
3492 cur = frag = *txidx;
3493
3494 if (m_head->m_pkthdr.csum_flags) {
3495 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3496 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3497 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3498 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3499 }
3500
3501 /*
3502 * If we were asked to do an outboard checksum, and the NIC
3503 * has the bug where it sometimes adds in the Ethernet padding,
3504 * explicitly pad with zeros so the cksum will be correct either way.
3505 * (For now, do this for all chip versions, until newer
3506 * are confirmed to not require the workaround.)
3507 */
3508 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3509 #ifdef notyet
3510 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3511 #endif
3512 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3513 goto check_dma_bug;
3514
3515 if (bge_cksum_pad(m_head) != 0) {
3516 return ENOBUFS;
3517 }
3518
3519 check_dma_bug:
3520 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
3521 goto doit;
3522 /*
3523 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3524 * less than eight bytes. If we encounter a teeny mbuf
3525 * at the end of a chain, we can pad. Otherwise, copy.
3526 */
3527 if (bge_compact_dma_runt(m_head) != 0)
3528 return ENOBUFS;
3529
3530 doit:
3531 dma = SLIST_FIRST(&sc->txdma_list);
3532 if (dma == NULL)
3533 return ENOBUFS;
3534 dmamap = dma->dmamap;
3535
3536 /*
3537 * Set up any necessary TSO state before we start packing...
3538 */
3539 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
3540 if (!use_tso) {
3541 maxsegsize = 0;
3542 } else { /* TSO setup */
3543 unsigned mss;
3544 struct ether_header *eh;
3545 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
3546 struct mbuf * m0 = m_head;
3547 struct ip *ip;
3548 struct tcphdr *th;
3549 int iphl, hlen;
3550
3551 /*
3552 * XXX It would be nice if the mbuf pkthdr had offset
3553 * fields for the protocol headers.
3554 */
3555
3556 eh = mtod(m0, struct ether_header *);
3557 switch (htons(eh->ether_type)) {
3558 case ETHERTYPE_IP:
3559 offset = ETHER_HDR_LEN;
3560 break;
3561
3562 case ETHERTYPE_VLAN:
3563 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3564 break;
3565
3566 default:
3567 /*
3568 * Don't support this protocol or encapsulation.
3569 */
3570 return (ENOBUFS);
3571 }
3572
3573 /*
3574 * TCP/IP headers are in the first mbuf; we can do
3575 * this the easy way.
3576 */
3577 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
3578 hlen = iphl + offset;
3579 if (__predict_false(m0->m_len <
3580 (hlen + sizeof(struct tcphdr)))) {
3581
3582 printf("TSO: hard case m0->m_len == %d <"
3583 " ip/tcp hlen %zd, not handled yet\n",
3584 m0->m_len, hlen+ sizeof(struct tcphdr));
3585 #ifdef NOTYET
3586 /*
3587 * XXX jonathan (at) NetBSD.org: untested.
3588 * how to force this branch to be taken?
3589 */
3590 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain);
3591
3592 m_copydata(m0, offset, sizeof(ip), &ip);
3593 m_copydata(m0, hlen, sizeof(th), &th);
3594
3595 ip.ip_len = 0;
3596
3597 m_copyback(m0, hlen + offsetof(struct ip, ip_len),
3598 sizeof(ip.ip_len), &ip.ip_len);
3599
3600 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
3601 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
3602
3603 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
3604 sizeof(th.th_sum), &th.th_sum);
3605
3606 hlen += th.th_off << 2;
3607 iptcp_opt_words = hlen;
3608 #else
3609 /*
3610 * if_wm "hard" case not yet supported, can we not
3611 * mandate it out of existence?
3612 */
3613 (void) ip; (void)th; (void) ip_tcp_hlen;
3614
3615 return ENOBUFS;
3616 #endif
3617 } else {
3618 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
3619 th = (struct tcphdr *) (mtod(m0, caddr_t) + hlen);
3620 ip_tcp_hlen = iphl + (th->th_off << 2);
3621
3622 /* Total IP/TCP options, in 32-bit words */
3623 iptcp_opt_words = (ip_tcp_hlen
3624 - sizeof(struct tcphdr)
3625 - sizeof(struct ip)) >> 2;
3626 }
3627 if (BGE_IS_5750_OR_BEYOND(sc)) {
3628 th->th_sum = 0;
3629 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM);
3630 } else {
3631 /*
3632 * XXX jonathan (at) NetBSD.org: 5705 untested.
3633 * Requires TSO firmware patch for 5701/5703/5704.
3634 */
3635 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3636 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3637 }
3638
3639 mss = m_head->m_pkthdr.segsz;
3640 txbd_tso_flags |=
3641 BGE_TXBDFLAG_CPU_PRE_DMA |
3642 BGE_TXBDFLAG_CPU_POST_DMA;
3643
3644 /*
3645 * Our NIC TSO-assist assumes TSO has standard, optionless
3646 * IPv4 and TCP headers, which total 40 bytes. By default,
3647 * the NIC copies 40 bytes of IP/TCP header from the
3648 * supplied header into the IP/TCP header portion of
3649 * each post-TSO-segment. If the supplied packet has IP or
3650 * TCP options, we need to tell the NIC to copy those extra
3651 * bytes into each post-TSO header, in addition to the normal
3652 * 40-byte IP/TCP header (and to leave space accordingly).
3653 * Unfortunately, the driver encoding of option length
3654 * varies across different ASIC families.
3655 */
3656 tcp_seg_flags = 0;
3657 if (iptcp_opt_words) {
3658 if ( BGE_IS_5705_OR_BEYOND(sc)) {
3659 tcp_seg_flags =
3660 iptcp_opt_words << 11;
3661 } else {
3662 txbd_tso_flags |=
3663 iptcp_opt_words << 12;
3664 }
3665 }
3666 maxsegsize = mss | tcp_seg_flags;
3667 ip->ip_len = htons(mss + ip_tcp_hlen);
3668
3669 } /* TSO setup */
3670
3671 /*
3672 * Start packing the mbufs in this chain into
3673 * the fragment pointers. Stop when we run out
3674 * of fragments or hit the end of the mbuf chain.
3675 */
3676 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3677 BUS_DMA_NOWAIT);
3678 if (error) {
3679 return(ENOBUFS);
3680 }
3681 /*
3682 * Sanity check: avoid coming within 16 descriptors
3683 * of the end of the ring.
3684 */
3685 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3686 BGE_TSO_PRINTF(("%s: "
3687 " dmamap_load_mbuf too close to ring wrap\n",
3688 sc->bge_dev.dv_xname));
3689 goto fail_unload;
3690 }
3691
3692 mtag = sc->ethercom.ec_nvlans ?
3693 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
3694
3695
3696 /* Iterate over dmap-map fragments. */
3697 for (i = 0; i < dmamap->dm_nsegs; i++) {
3698 f = &sc->bge_rdata->bge_tx_ring[frag];
3699 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3700 break;
3701
3702 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
3703 f->bge_len = dmamap->dm_segs[i].ds_len;
3704
3705 /*
3706 * For 5751 and follow-ons, for TSO we must turn
3707 * off checksum-assist flag in the tx-descr, and
3708 * supply the ASIC-revision-specific encoding
3709 * of TSO flags and segsize.
3710 */
3711 if (use_tso) {
3712 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) {
3713 f->bge_rsvd = maxsegsize;
3714 f->bge_flags = csum_flags | txbd_tso_flags;
3715 } else {
3716 f->bge_rsvd = 0;
3717 f->bge_flags =
3718 (csum_flags | txbd_tso_flags) & 0x0fff;
3719 }
3720 } else {
3721 f->bge_rsvd = 0;
3722 f->bge_flags = csum_flags;
3723 }
3724
3725 if (mtag != NULL) {
3726 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3727 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3728 } else {
3729 f->bge_vlan_tag = 0;
3730 }
3731 cur = frag;
3732 BGE_INC(frag, BGE_TX_RING_CNT);
3733 }
3734
3735 if (i < dmamap->dm_nsegs) {
3736 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
3737 sc->bge_dev.dv_xname, i, dmamap->dm_nsegs));
3738 goto fail_unload;
3739 }
3740
3741 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3742 BUS_DMASYNC_PREWRITE);
3743
3744 if (frag == sc->bge_tx_saved_considx) {
3745 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
3746 sc->bge_dev.dv_xname, frag, sc->bge_tx_saved_considx));
3747
3748 goto fail_unload;
3749 }
3750
3751 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3752 sc->bge_cdata.bge_tx_chain[cur] = m_head;
3753 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3754 sc->txdma[cur] = dma;
3755 sc->bge_txcnt += dmamap->dm_nsegs;
3756
3757 *txidx = frag;
3758
3759 return(0);
3760
3761 fail_unload:
3762 bus_dmamap_unload(sc->bge_dmatag, dmamap);
3763
3764 return ENOBUFS;
3765 }
3766
3767 /*
3768 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3769 * to the mbuf data regions directly in the transmit descriptors.
3770 */
3771 static void
3772 bge_start(struct ifnet *ifp)
3773 {
3774 struct bge_softc *sc;
3775 struct mbuf *m_head = NULL;
3776 u_int32_t prodidx;
3777 int pkts = 0;
3778
3779 sc = ifp->if_softc;
3780
3781 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3782 return;
3783
3784 prodidx = sc->bge_tx_prodidx;
3785
3786 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3787 IFQ_POLL(&ifp->if_snd, m_head);
3788 if (m_head == NULL)
3789 break;
3790
3791 #if 0
3792 /*
3793 * XXX
3794 * safety overkill. If this is a fragmented packet chain
3795 * with delayed TCP/UDP checksums, then only encapsulate
3796 * it if we have enough descriptors to handle the entire
3797 * chain at once.
3798 * (paranoia -- may not actually be needed)
3799 */
3800 if (m_head->m_flags & M_FIRSTFRAG &&
3801 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3802 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3803 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
3804 ifp->if_flags |= IFF_OACTIVE;
3805 break;
3806 }
3807 }
3808 #endif
3809
3810 /*
3811 * Pack the data into the transmit ring. If we
3812 * don't have room, set the OACTIVE flag and wait
3813 * for the NIC to drain the ring.
3814 */
3815 if (bge_encap(sc, m_head, &prodidx)) {
3816 printf("bge: failed on len %d?\n", m_head->m_pkthdr.len);
3817 ifp->if_flags |= IFF_OACTIVE;
3818 break;
3819 }
3820
3821 /* now we are committed to transmit the packet */
3822 IFQ_DEQUEUE(&ifp->if_snd, m_head);
3823 pkts++;
3824
3825 #if NBPFILTER > 0
3826 /*
3827 * If there's a BPF listener, bounce a copy of this frame
3828 * to him.
3829 */
3830 if (ifp->if_bpf)
3831 bpf_mtap(ifp->if_bpf, m_head);
3832 #endif
3833 }
3834 if (pkts == 0)
3835 return;
3836
3837 /* Transmit */
3838 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3839 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
3840 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3841
3842 sc->bge_tx_prodidx = prodidx;
3843
3844 /*
3845 * Set a timeout in case the chip goes out to lunch.
3846 */
3847 ifp->if_timer = 5;
3848 }
3849
3850 static int
3851 bge_init(struct ifnet *ifp)
3852 {
3853 struct bge_softc *sc = ifp->if_softc;
3854 u_int16_t *m;
3855 int s, error;
3856
3857 s = splnet();
3858
3859 ifp = &sc->ethercom.ec_if;
3860
3861 /* Cancel pending I/O and flush buffers. */
3862 bge_stop(sc);
3863 bge_reset(sc);
3864 bge_chipinit(sc);
3865
3866 /*
3867 * Init the various state machines, ring
3868 * control blocks and firmware.
3869 */
3870 error = bge_blockinit(sc);
3871 if (error != 0) {
3872 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
3873 error);
3874 splx(s);
3875 return error;
3876 }
3877
3878 ifp = &sc->ethercom.ec_if;
3879
3880 /* Specify MTU. */
3881 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3882 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
3883
3884 /* Load our MAC address. */
3885 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
3886 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3887 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3888
3889 /* Enable or disable promiscuous mode as needed. */
3890 if (ifp->if_flags & IFF_PROMISC) {
3891 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3892 } else {
3893 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3894 }
3895
3896 /* Program multicast filter. */
3897 bge_setmulti(sc);
3898
3899 /* Init RX ring. */
3900 bge_init_rx_ring_std(sc);
3901
3902 /* Init jumbo RX ring. */
3903 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3904 bge_init_rx_ring_jumbo(sc);
3905
3906 /* Init our RX return ring index */
3907 sc->bge_rx_saved_considx = 0;
3908
3909 /* Init TX ring. */
3910 bge_init_tx_ring(sc);
3911
3912 /* Turn on transmitter */
3913 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3914
3915 /* Turn on receiver */
3916 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3917
3918 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3919
3920 /* Tell firmware we're alive. */
3921 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3922
3923 /* Enable host interrupts. */
3924 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3925 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3926 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3927
3928 bge_ifmedia_upd(ifp);
3929
3930 ifp->if_flags |= IFF_RUNNING;
3931 ifp->if_flags &= ~IFF_OACTIVE;
3932
3933 splx(s);
3934
3935 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3936
3937 return 0;
3938 }
3939
3940 /*
3941 * Set media options.
3942 */
3943 static int
3944 bge_ifmedia_upd(struct ifnet *ifp)
3945 {
3946 struct bge_softc *sc = ifp->if_softc;
3947 struct mii_data *mii = &sc->bge_mii;
3948 struct ifmedia *ifm = &sc->bge_ifmedia;
3949
3950 /* If this is a 1000baseX NIC, enable the TBI port. */
3951 if (sc->bge_tbi) {
3952 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3953 return(EINVAL);
3954 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3955 case IFM_AUTO:
3956 break;
3957 case IFM_1000_SX:
3958 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3959 BGE_CLRBIT(sc, BGE_MAC_MODE,
3960 BGE_MACMODE_HALF_DUPLEX);
3961 } else {
3962 BGE_SETBIT(sc, BGE_MAC_MODE,
3963 BGE_MACMODE_HALF_DUPLEX);
3964 }
3965 break;
3966 default:
3967 return(EINVAL);
3968 }
3969 /* XXX 802.3x flow control for 1000BASE-SX */
3970 return(0);
3971 }
3972
3973 sc->bge_link = 0;
3974 mii_mediachg(mii);
3975
3976 return(0);
3977 }
3978
3979 /*
3980 * Report current media status.
3981 */
3982 static void
3983 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3984 {
3985 struct bge_softc *sc = ifp->if_softc;
3986 struct mii_data *mii = &sc->bge_mii;
3987
3988 if (sc->bge_tbi) {
3989 ifmr->ifm_status = IFM_AVALID;
3990 ifmr->ifm_active = IFM_ETHER;
3991 if (CSR_READ_4(sc, BGE_MAC_STS) &
3992 BGE_MACSTAT_TBI_PCS_SYNCHED)
3993 ifmr->ifm_status |= IFM_ACTIVE;
3994 ifmr->ifm_active |= IFM_1000_SX;
3995 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3996 ifmr->ifm_active |= IFM_HDX;
3997 else
3998 ifmr->ifm_active |= IFM_FDX;
3999 return;
4000 }
4001
4002 mii_pollstat(mii);
4003 ifmr->ifm_status = mii->mii_media_status;
4004 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4005 sc->bge_flowflags;
4006 }
4007
4008 static int
4009 bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
4010 {
4011 struct bge_softc *sc = ifp->if_softc;
4012 struct ifreq *ifr = (struct ifreq *) data;
4013 int s, error = 0;
4014 struct mii_data *mii;
4015
4016 s = splnet();
4017
4018 switch(command) {
4019 case SIOCSIFFLAGS:
4020 if (ifp->if_flags & IFF_UP) {
4021 /*
4022 * If only the state of the PROMISC flag changed,
4023 * then just use the 'set promisc mode' command
4024 * instead of reinitializing the entire NIC. Doing
4025 * a full re-init means reloading the firmware and
4026 * waiting for it to start up, which may take a
4027 * second or two.
4028 */
4029 if (ifp->if_flags & IFF_RUNNING &&
4030 ifp->if_flags & IFF_PROMISC &&
4031 !(sc->bge_if_flags & IFF_PROMISC)) {
4032 BGE_SETBIT(sc, BGE_RX_MODE,
4033 BGE_RXMODE_RX_PROMISC);
4034 } else if (ifp->if_flags & IFF_RUNNING &&
4035 !(ifp->if_flags & IFF_PROMISC) &&
4036 sc->bge_if_flags & IFF_PROMISC) {
4037 BGE_CLRBIT(sc, BGE_RX_MODE,
4038 BGE_RXMODE_RX_PROMISC);
4039 } else if (!(sc->bge_if_flags & IFF_UP))
4040 bge_init(ifp);
4041 } else {
4042 if (ifp->if_flags & IFF_RUNNING) {
4043 bge_stop(sc);
4044 }
4045 }
4046 sc->bge_if_flags = ifp->if_flags;
4047 error = 0;
4048 break;
4049 case SIOCSIFMEDIA:
4050 /* XXX Flow control is not supported for 1000BASE-SX */
4051 if (sc->bge_tbi) {
4052 ifr->ifr_media &= ~IFM_ETH_FMASK;
4053 sc->bge_flowflags = 0;
4054 }
4055
4056 /* Flow control requires full-duplex mode. */
4057 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4058 (ifr->ifr_media & IFM_FDX) == 0) {
4059 ifr->ifr_media &= ~IFM_ETH_FMASK;
4060 }
4061 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4062 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4063 /* We an do both TXPAUSE and RXPAUSE. */
4064 ifr->ifr_media |=
4065 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4066 }
4067 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4068 }
4069 /* FALLTHROUGH */
4070 case SIOCGIFMEDIA:
4071 if (sc->bge_tbi) {
4072 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4073 command);
4074 } else {
4075 mii = &sc->bge_mii;
4076 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4077 command);
4078 }
4079 break;
4080 default:
4081 error = ether_ioctl(ifp, command, data);
4082 if (error == ENETRESET) {
4083 if (ifp->if_flags & IFF_RUNNING)
4084 bge_setmulti(sc);
4085 error = 0;
4086 }
4087 break;
4088 }
4089
4090 splx(s);
4091
4092 return(error);
4093 }
4094
4095 static void
4096 bge_watchdog(struct ifnet *ifp)
4097 {
4098 struct bge_softc *sc;
4099
4100 sc = ifp->if_softc;
4101
4102 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
4103
4104 ifp->if_flags &= ~IFF_RUNNING;
4105 bge_init(ifp);
4106
4107 ifp->if_oerrors++;
4108 }
4109
4110 static void
4111 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
4112 {
4113 int i;
4114
4115 BGE_CLRBIT(sc, reg, bit);
4116
4117 for (i = 0; i < BGE_TIMEOUT; i++) {
4118 if ((CSR_READ_4(sc, reg) & bit) == 0)
4119 return;
4120 delay(100);
4121 if (sc->bge_pcie)
4122 DELAY(1000);
4123 }
4124
4125 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
4126 sc->bge_dev.dv_xname, (u_long) reg, bit);
4127 }
4128
4129 /*
4130 * Stop the adapter and free any mbufs allocated to the
4131 * RX and TX lists.
4132 */
4133 static void
4134 bge_stop(struct bge_softc *sc)
4135 {
4136 struct ifnet *ifp = &sc->ethercom.ec_if;
4137
4138 callout_stop(&sc->bge_timeout);
4139
4140 /*
4141 * Disable all of the receiver blocks
4142 */
4143 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4144 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4145 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4146 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4147 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4148 }
4149 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4150 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4151 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4152
4153 /*
4154 * Disable all of the transmit blocks
4155 */
4156 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4157 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4158 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4159 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4160 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4161 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4162 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4163 }
4164 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4165
4166 /*
4167 * Shut down all of the memory managers and related
4168 * state machines.
4169 */
4170 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4171 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4172 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4173 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4174 }
4175
4176 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4177 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4178
4179 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4180 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4181 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4182 }
4183
4184 /* Disable host interrupts. */
4185 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4186 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
4187
4188 /*
4189 * Tell firmware we're shutting down.
4190 */
4191 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4192
4193 /* Free the RX lists. */
4194 bge_free_rx_ring_std(sc);
4195
4196 /* Free jumbo RX list. */
4197 bge_free_rx_ring_jumbo(sc);
4198
4199 /* Free TX buffers. */
4200 bge_free_tx_ring(sc);
4201
4202 /*
4203 * Isolate/power down the PHY.
4204 */
4205 if (!sc->bge_tbi)
4206 mii_down(&sc->bge_mii);
4207
4208 sc->bge_link = 0;
4209
4210 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4211
4212 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4213 }
4214
4215 /*
4216 * Stop all chip I/O so that the kernel's probe routines don't
4217 * get confused by errant DMAs when rebooting.
4218 */
4219 static void
4220 bge_shutdown(void *xsc)
4221 {
4222 struct bge_softc *sc = (struct bge_softc *)xsc;
4223
4224 bge_stop(sc);
4225 bge_reset(sc);
4226 }
4227
4228
4229 static int
4230 sysctl_bge_verify(SYSCTLFN_ARGS)
4231 {
4232 int error, t;
4233 struct sysctlnode node;
4234
4235 node = *rnode;
4236 t = *(int*)rnode->sysctl_data;
4237 node.sysctl_data = &t;
4238 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4239 if (error || newp == NULL)
4240 return (error);
4241
4242 #if 0
4243 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
4244 node.sysctl_num, rnode->sysctl_num));
4245 #endif
4246
4247 if (node.sysctl_num == bge_rxthresh_nodenum) {
4248 if (t < 0 || t >= NBGE_RX_THRESH)
4249 return (EINVAL);
4250 bge_update_all_threshes(t);
4251 } else
4252 return (EINVAL);
4253
4254 *(int*)rnode->sysctl_data = t;
4255
4256 return (0);
4257 }
4258
4259 /*
4260 * Set up sysctl(3) MIB, hw.bge.*.
4261 *
4262 * TBD condition SYSCTL_PERMANENT on being an LKM or not
4263 */
4264 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
4265 {
4266 int rc, bge_root_num;
4267 const struct sysctlnode *node;
4268
4269 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
4270 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
4271 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
4272 goto err;
4273 }
4274
4275 if ((rc = sysctl_createv(clog, 0, NULL, &node,
4276 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge",
4277 SYSCTL_DESCR("BGE interface controls"),
4278 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
4279 goto err;
4280 }
4281
4282 bge_root_num = node->sysctl_num;
4283
4284 /* BGE Rx interrupt mitigation level */
4285 if ((rc = sysctl_createv(clog, 0, NULL, &node,
4286 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4287 CTLTYPE_INT, "rx_lvl",
4288 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
4289 sysctl_bge_verify, 0,
4290 &bge_rx_thresh_lvl,
4291 0, CTL_HW, bge_root_num, CTL_CREATE,
4292 CTL_EOL)) != 0) {
4293 goto err;
4294 }
4295
4296 bge_rxthresh_nodenum = node->sysctl_num;
4297
4298 return;
4299
4300 err:
4301 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
4302 }
4303
4304 static void
4305 bge_powerhook(int why, void *hdl)
4306 {
4307 struct bge_softc *sc = (struct bge_softc *)hdl;
4308 struct ifnet *ifp = &sc->ethercom.ec_if;
4309 struct pci_attach_args *pa = &(sc->bge_pa);
4310 pci_chipset_tag_t pc = pa->pa_pc;
4311 pcitag_t tag = pa->pa_tag;
4312
4313 switch (why) {
4314 case PWR_SOFTSUSPEND:
4315 case PWR_SOFTSTANDBY:
4316 bge_shutdown(sc);
4317 break;
4318 case PWR_SOFTRESUME:
4319 if (ifp->if_flags & IFF_UP) {
4320 ifp->if_flags &= ~IFF_RUNNING;
4321 bge_init(ifp);
4322 }
4323 break;
4324 case PWR_SUSPEND:
4325 case PWR_STANDBY:
4326 pci_conf_capture(pc, tag, &sc->bge_pciconf);
4327 break;
4328 case PWR_RESUME:
4329 pci_conf_restore(pc, tag, &sc->bge_pciconf);
4330 break;
4331 }
4332
4333 return;
4334 }
4335