if_bge.c revision 1.75 1 /* $NetBSD: if_bge.c,v 1.75 2004/09/29 11:22:03 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.75 2004/09/29 11:22:03 yamt Exp $");
83
84 #include "bpfilter.h"
85 #include "vlan.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/device.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
97
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_media.h>
101 #include <net/if_ether.h>
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #endif
109
110 #if NBPFILTER > 0
111 #include <net/bpf.h>
112 #endif
113
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122
123 #include <dev/pci/if_bgereg.h>
124
125 #include <uvm/uvm_extern.h>
126
127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
128
129
130 /*
131 * Tunable thresholds for rx-side bge interrupt mitigation.
132 */
133
134 /*
135 * The pairs of values below were obtained from empirical measurement
136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
137 * interrupt for every N packets received, where N is, approximately,
138 * the second value (rx_max_bds) in each pair. The values are chosen
139 * such that moving from one pair to the succeeding pair was observed
140 * to roughly halve interrupt rate under sustained input packet load.
141 * The values were empirically chosen to avoid overflowing internal
142 * limits on the bcm5700: inreasing rx_ticks much beyond 600
143 * results in internal wrapping and higher interrupt rates.
144 * The limit of 46 frames was chosen to match NFS workloads.
145 *
146 * These values also work well on bcm5701, bcm5704C, and (less
147 * tested) bcm5703. On other chipsets, (including the Altima chip
148 * family), the larger values may overflow internal chip limits,
149 * leading to increasing interrupt rates rather than lower interrupt
150 * rates.
151 *
152 * Applications using heavy interrupt mitigation (interrupting every
153 * 32 or 46 frames) in both directions may need to increase the TCP
154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
155 * full link bandwidth, due to ACKs and window updates lingering
156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
157 */
158 struct bge_load_rx_thresh {
159 int rx_ticks;
160 int rx_max_bds; }
161 bge_rx_threshes[] = {
162 { 32, 2 },
163 { 50, 4 },
164 { 100, 8 },
165 { 192, 16 },
166 { 416, 32 },
167 { 598, 46 }
168 };
169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
170
171 /* XXX patchable; should be sysctl'able */
172 static int bge_auto_thresh = 1;
173 static int bge_rx_thresh_lvl;
174
175 #ifdef __NetBSD__
176 static int bge_rxthresh_nodenum;
177 #endif /* __NetBSD__ */
178
179 int bge_probe(struct device *, struct cfdata *, void *);
180 void bge_attach(struct device *, struct device *, void *);
181 void bge_release_resources(struct bge_softc *);
182 void bge_txeof(struct bge_softc *);
183 void bge_rxeof(struct bge_softc *);
184
185 void bge_tick(void *);
186 void bge_stats_update(struct bge_softc *);
187 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
188 static __inline int bge_cksum_pad(struct mbuf *pkt);
189 static __inline int bge_compact_dma_runt(struct mbuf *pkt);
190
191 int bge_intr(void *);
192 void bge_start(struct ifnet *);
193 int bge_ioctl(struct ifnet *, u_long, caddr_t);
194 int bge_init(struct ifnet *);
195 void bge_stop(struct bge_softc *);
196 void bge_watchdog(struct ifnet *);
197 void bge_shutdown(void *);
198 int bge_ifmedia_upd(struct ifnet *);
199 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
200
201 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
202 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
203
204 void bge_setmulti(struct bge_softc *);
205
206 void bge_handle_events(struct bge_softc *);
207 int bge_alloc_jumbo_mem(struct bge_softc *);
208 void bge_free_jumbo_mem(struct bge_softc *);
209 void *bge_jalloc(struct bge_softc *);
210 void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
211 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
212 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
213 int bge_init_rx_ring_std(struct bge_softc *);
214 void bge_free_rx_ring_std(struct bge_softc *);
215 int bge_init_rx_ring_jumbo(struct bge_softc *);
216 void bge_free_rx_ring_jumbo(struct bge_softc *);
217 void bge_free_tx_ring(struct bge_softc *);
218 int bge_init_tx_ring(struct bge_softc *);
219
220 int bge_chipinit(struct bge_softc *);
221 int bge_blockinit(struct bge_softc *);
222 int bge_setpowerstate(struct bge_softc *, int);
223
224 #ifdef notdef
225 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
226 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
227 void bge_vpd_read(struct bge_softc *);
228 #endif
229
230 u_int32_t bge_readmem_ind(struct bge_softc *, int);
231 void bge_writemem_ind(struct bge_softc *, int, int);
232 #ifdef notdef
233 u_int32_t bge_readreg_ind(struct bge_softc *, int);
234 #endif
235 void bge_writereg_ind(struct bge_softc *, int, int);
236
237 int bge_miibus_readreg(struct device *, int, int);
238 void bge_miibus_writereg(struct device *, int, int, int);
239 void bge_miibus_statchg(struct device *);
240
241 void bge_reset(struct bge_softc *);
242
243 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/);
244 void bge_update_all_threshes(int /*lvl*/);
245
246 void bge_dump_status(struct bge_softc *);
247 void bge_dump_rxbd(struct bge_rx_bd *);
248
249 #define BGE_DEBUG
250 #ifdef BGE_DEBUG
251 #define DPRINTF(x) if (bgedebug) printf x
252 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
253 int bgedebug = 0;
254 #else
255 #define DPRINTF(x)
256 #define DPRINTFN(n,x)
257 #endif
258
259 #ifdef BGE_EVENT_COUNTERS
260 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
261 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
262 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
263 #else
264 #define BGE_EVCNT_INCR(ev) /* nothing */
265 #define BGE_EVCNT_ADD(ev, val) /* nothing */
266 #define BGE_EVCNT_UPD(ev, val) /* nothing */
267 #endif
268
269 /* Various chip quirks. */
270 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001
271 #define BGE_QUIRK_CSUM_BROKEN 0x00000002
272 #define BGE_QUIRK_ONLY_PHY_1 0x00000004
273 #define BGE_QUIRK_5700_SMALLDMA 0x00000008
274 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010
275 #define BGE_QUIRK_PRODUCER_BUG 0x00000020
276 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040
277 #define BGE_QUIRK_5705_CORE 0x00000080
278 #define BGE_QUIRK_FEWER_MBUFS 0x00000100
279
280 /* following bugs are common to bcm5700 rev B, all flavours */
281 #define BGE_QUIRK_5700_COMMON \
282 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
283
284 CFATTACH_DECL(bge, sizeof(struct bge_softc),
285 bge_probe, bge_attach, NULL, NULL);
286
287 u_int32_t
288 bge_readmem_ind(sc, off)
289 struct bge_softc *sc;
290 int off;
291 {
292 struct pci_attach_args *pa = &(sc->bge_pa);
293 pcireg_t val;
294
295 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
296 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
297 return val;
298 }
299
300 void
301 bge_writemem_ind(sc, off, val)
302 struct bge_softc *sc;
303 int off, val;
304 {
305 struct pci_attach_args *pa = &(sc->bge_pa);
306
307 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
308 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
309 }
310
311 #ifdef notdef
312 u_int32_t
313 bge_readreg_ind(sc, off)
314 struct bge_softc *sc;
315 int off;
316 {
317 struct pci_attach_args *pa = &(sc->bge_pa);
318
319 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
320 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
321 }
322 #endif
323
324 void
325 bge_writereg_ind(sc, off, val)
326 struct bge_softc *sc;
327 int off, val;
328 {
329 struct pci_attach_args *pa = &(sc->bge_pa);
330
331 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
332 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
333 }
334
335 #ifdef notdef
336 u_int8_t
337 bge_vpd_readbyte(sc, addr)
338 struct bge_softc *sc;
339 int addr;
340 {
341 int i;
342 u_int32_t val;
343 struct pci_attach_args *pa = &(sc->bge_pa);
344
345 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
346 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
347 DELAY(10);
348 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
349 BGE_VPD_FLAG)
350 break;
351 }
352
353 if (i == BGE_TIMEOUT) {
354 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
355 return(0);
356 }
357
358 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
359
360 return((val >> ((addr % 4) * 8)) & 0xFF);
361 }
362
363 void
364 bge_vpd_read_res(sc, res, addr)
365 struct bge_softc *sc;
366 struct vpd_res *res;
367 int addr;
368 {
369 int i;
370 u_int8_t *ptr;
371
372 ptr = (u_int8_t *)res;
373 for (i = 0; i < sizeof(struct vpd_res); i++)
374 ptr[i] = bge_vpd_readbyte(sc, i + addr);
375 }
376
377 void
378 bge_vpd_read(sc)
379 struct bge_softc *sc;
380 {
381 int pos = 0, i;
382 struct vpd_res res;
383
384 if (sc->bge_vpd_prodname != NULL)
385 free(sc->bge_vpd_prodname, M_DEVBUF);
386 if (sc->bge_vpd_readonly != NULL)
387 free(sc->bge_vpd_readonly, M_DEVBUF);
388 sc->bge_vpd_prodname = NULL;
389 sc->bge_vpd_readonly = NULL;
390
391 bge_vpd_read_res(sc, &res, pos);
392
393 if (res.vr_id != VPD_RES_ID) {
394 printf("%s: bad VPD resource id: expected %x got %x\n",
395 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
396 return;
397 }
398
399 pos += sizeof(res);
400 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
401 if (sc->bge_vpd_prodname == NULL)
402 panic("bge_vpd_read");
403 for (i = 0; i < res.vr_len; i++)
404 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
405 sc->bge_vpd_prodname[i] = '\0';
406 pos += i;
407
408 bge_vpd_read_res(sc, &res, pos);
409
410 if (res.vr_id != VPD_RES_READ) {
411 printf("%s: bad VPD resource id: expected %x got %x\n",
412 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
413 return;
414 }
415
416 pos += sizeof(res);
417 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
418 if (sc->bge_vpd_readonly == NULL)
419 panic("bge_vpd_read");
420 for (i = 0; i < res.vr_len + 1; i++)
421 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
422 }
423 #endif
424
425 /*
426 * Read a byte of data stored in the EEPROM at address 'addr.' The
427 * BCM570x supports both the traditional bitbang interface and an
428 * auto access interface for reading the EEPROM. We use the auto
429 * access method.
430 */
431 u_int8_t
432 bge_eeprom_getbyte(sc, addr, dest)
433 struct bge_softc *sc;
434 int addr;
435 u_int8_t *dest;
436 {
437 int i;
438 u_int32_t byte = 0;
439
440 /*
441 * Enable use of auto EEPROM access so we can avoid
442 * having to use the bitbang method.
443 */
444 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
445
446 /* Reset the EEPROM, load the clock period. */
447 CSR_WRITE_4(sc, BGE_EE_ADDR,
448 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
449 DELAY(20);
450
451 /* Issue the read EEPROM command. */
452 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
453
454 /* Wait for completion */
455 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
456 DELAY(10);
457 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
458 break;
459 }
460
461 if (i == BGE_TIMEOUT) {
462 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
463 return(0);
464 }
465
466 /* Get result. */
467 byte = CSR_READ_4(sc, BGE_EE_DATA);
468
469 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
470
471 return(0);
472 }
473
474 /*
475 * Read a sequence of bytes from the EEPROM.
476 */
477 int
478 bge_read_eeprom(sc, dest, off, cnt)
479 struct bge_softc *sc;
480 caddr_t dest;
481 int off;
482 int cnt;
483 {
484 int err = 0, i;
485 u_int8_t byte = 0;
486
487 for (i = 0; i < cnt; i++) {
488 err = bge_eeprom_getbyte(sc, off + i, &byte);
489 if (err)
490 break;
491 *(dest + i) = byte;
492 }
493
494 return(err ? 1 : 0);
495 }
496
497 int
498 bge_miibus_readreg(dev, phy, reg)
499 struct device *dev;
500 int phy, reg;
501 {
502 struct bge_softc *sc = (struct bge_softc *)dev;
503 u_int32_t val;
504 u_int32_t saved_autopoll;
505 int i;
506
507 /*
508 * Several chips with builtin PHYs will incorrectly answer to
509 * other PHY instances than the builtin PHY at id 1.
510 */
511 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
512 return(0);
513
514 /* Reading with autopolling on may trigger PCI errors */
515 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
516 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
517 CSR_WRITE_4(sc, BGE_MI_MODE,
518 saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
519 DELAY(40);
520 }
521
522 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
523 BGE_MIPHY(phy)|BGE_MIREG(reg));
524
525 for (i = 0; i < BGE_TIMEOUT; i++) {
526 val = CSR_READ_4(sc, BGE_MI_COMM);
527 if (!(val & BGE_MICOMM_BUSY))
528 break;
529 delay(10);
530 }
531
532 if (i == BGE_TIMEOUT) {
533 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
534 val = 0;
535 goto done;
536 }
537
538 val = CSR_READ_4(sc, BGE_MI_COMM);
539
540 done:
541 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
542 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
543 DELAY(40);
544 }
545
546 if (val & BGE_MICOMM_READFAIL)
547 return(0);
548
549 return(val & 0xFFFF);
550 }
551
552 void
553 bge_miibus_writereg(dev, phy, reg, val)
554 struct device *dev;
555 int phy, reg, val;
556 {
557 struct bge_softc *sc = (struct bge_softc *)dev;
558 u_int32_t saved_autopoll;
559 int i;
560
561 /* Touching the PHY while autopolling is on may trigger PCI errors */
562 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
563 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
564 delay(40);
565 CSR_WRITE_4(sc, BGE_MI_MODE,
566 saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
567 delay(10); /* 40 usec is supposed to be adequate */
568 }
569
570 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
571 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
572
573 for (i = 0; i < BGE_TIMEOUT; i++) {
574 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
575 break;
576 delay(10);
577 }
578
579 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
580 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
581 delay(40);
582 }
583
584 if (i == BGE_TIMEOUT) {
585 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
586 }
587 }
588
589 void
590 bge_miibus_statchg(dev)
591 struct device *dev;
592 {
593 struct bge_softc *sc = (struct bge_softc *)dev;
594 struct mii_data *mii = &sc->bge_mii;
595
596 /*
597 * Get flow control negotiation result.
598 */
599 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
600 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
601 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
602 mii->mii_media_active &= ~IFM_ETH_FMASK;
603 }
604
605 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
606 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
607 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
608 } else {
609 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
610 }
611
612 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
613 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
614 } else {
615 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
616 }
617
618 /*
619 * 802.3x flow control
620 */
621 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) {
622 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
623 } else {
624 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
625 }
626 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) {
627 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
628 } else {
629 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
630 }
631 }
632
633 /*
634 * Update rx threshold levels to values in a particular slot
635 * of the interrupt-mitigation table bge_rx_threshes.
636 */
637 void
638 bge_set_thresh(struct ifnet *ifp, int lvl)
639 {
640 struct bge_softc *sc = ifp->if_softc;
641 int s;
642
643 /* For now, just save the new Rx-intr thresholds and record
644 * that a threshold update is pending. Updating the hardware
645 * registers here (even at splhigh()) is observed to
646 * occasionaly cause glitches where Rx-interrupts are not
647 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
648 */
649 s = splnet();
650 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
651 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
652 sc->bge_pending_rxintr_change = 1;
653 splx(s);
654
655 return;
656 }
657
658
659 /*
660 * Update Rx thresholds of all bge devices
661 */
662 void
663 bge_update_all_threshes(int lvl)
664 {
665 struct ifnet *ifp;
666 const char * const namebuf = "bge";
667 int namelen;
668
669 if (lvl < 0)
670 lvl = 0;
671 else if( lvl >= NBGE_RX_THRESH)
672 lvl = NBGE_RX_THRESH - 1;
673
674 namelen = strlen(namebuf);
675 /*
676 * Now search all the interfaces for this name/number
677 */
678 TAILQ_FOREACH(ifp, &ifnet, if_list) {
679 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
680 continue;
681 /* We got a match: update if doing auto-threshold-tuning */
682 if (bge_auto_thresh)
683 bge_set_thresh(ifp, lvl);
684 }
685 }
686
687 /*
688 * Handle events that have triggered interrupts.
689 */
690 void
691 bge_handle_events(sc)
692 struct bge_softc *sc;
693 {
694
695 return;
696 }
697
698 /*
699 * Memory management for jumbo frames.
700 */
701
702 int
703 bge_alloc_jumbo_mem(sc)
704 struct bge_softc *sc;
705 {
706 caddr_t ptr, kva;
707 bus_dma_segment_t seg;
708 int i, rseg, state, error;
709 struct bge_jpool_entry *entry;
710
711 state = error = 0;
712
713 /* Grab a big chunk o' storage. */
714 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
715 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
716 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
717 return ENOBUFS;
718 }
719
720 state = 1;
721 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
722 BUS_DMA_NOWAIT)) {
723 printf("%s: can't map DMA buffers (%d bytes)\n",
724 sc->bge_dev.dv_xname, (int)BGE_JMEM);
725 error = ENOBUFS;
726 goto out;
727 }
728
729 state = 2;
730 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
731 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
732 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname);
733 error = ENOBUFS;
734 goto out;
735 }
736
737 state = 3;
738 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
739 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
740 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname);
741 error = ENOBUFS;
742 goto out;
743 }
744
745 state = 4;
746 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
747 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
748
749 SLIST_INIT(&sc->bge_jfree_listhead);
750 SLIST_INIT(&sc->bge_jinuse_listhead);
751
752 /*
753 * Now divide it up into 9K pieces and save the addresses
754 * in an array.
755 */
756 ptr = sc->bge_cdata.bge_jumbo_buf;
757 for (i = 0; i < BGE_JSLOTS; i++) {
758 sc->bge_cdata.bge_jslots[i] = ptr;
759 ptr += BGE_JLEN;
760 entry = malloc(sizeof(struct bge_jpool_entry),
761 M_DEVBUF, M_NOWAIT);
762 if (entry == NULL) {
763 printf("%s: no memory for jumbo buffer queue!\n",
764 sc->bge_dev.dv_xname);
765 error = ENOBUFS;
766 goto out;
767 }
768 entry->slot = i;
769 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
770 entry, jpool_entries);
771 }
772 out:
773 if (error != 0) {
774 switch (state) {
775 case 4:
776 bus_dmamap_unload(sc->bge_dmatag,
777 sc->bge_cdata.bge_rx_jumbo_map);
778 case 3:
779 bus_dmamap_destroy(sc->bge_dmatag,
780 sc->bge_cdata.bge_rx_jumbo_map);
781 case 2:
782 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
783 case 1:
784 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
785 break;
786 default:
787 break;
788 }
789 }
790
791 return error;
792 }
793
794 /*
795 * Allocate a jumbo buffer.
796 */
797 void *
798 bge_jalloc(sc)
799 struct bge_softc *sc;
800 {
801 struct bge_jpool_entry *entry;
802
803 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
804
805 if (entry == NULL) {
806 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
807 return(NULL);
808 }
809
810 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
811 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
812 return(sc->bge_cdata.bge_jslots[entry->slot]);
813 }
814
815 /*
816 * Release a jumbo buffer.
817 */
818 void
819 bge_jfree(m, buf, size, arg)
820 struct mbuf *m;
821 caddr_t buf;
822 size_t size;
823 void *arg;
824 {
825 struct bge_jpool_entry *entry;
826 struct bge_softc *sc;
827 int i, s;
828
829 /* Extract the softc struct pointer. */
830 sc = (struct bge_softc *)arg;
831
832 if (sc == NULL)
833 panic("bge_jfree: can't find softc pointer!");
834
835 /* calculate the slot this buffer belongs to */
836
837 i = ((caddr_t)buf
838 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
839
840 if ((i < 0) || (i >= BGE_JSLOTS))
841 panic("bge_jfree: asked to free buffer that we don't manage!");
842
843 s = splvm();
844 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
845 if (entry == NULL)
846 panic("bge_jfree: buffer not in use!");
847 entry->slot = i;
848 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
849 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
850
851 if (__predict_true(m != NULL))
852 pool_cache_put(&mbpool_cache, m);
853 splx(s);
854 }
855
856
857 /*
858 * Intialize a standard receive ring descriptor.
859 */
860 int
861 bge_newbuf_std(sc, i, m, dmamap)
862 struct bge_softc *sc;
863 int i;
864 struct mbuf *m;
865 bus_dmamap_t dmamap;
866 {
867 struct mbuf *m_new = NULL;
868 struct bge_rx_bd *r;
869 int error;
870
871 if (dmamap == NULL) {
872 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
873 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
874 if (error != 0)
875 return error;
876 }
877
878 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
879
880 if (m == NULL) {
881 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
882 if (m_new == NULL) {
883 return(ENOBUFS);
884 }
885
886 MCLGET(m_new, M_DONTWAIT);
887 if (!(m_new->m_flags & M_EXT)) {
888 m_freem(m_new);
889 return(ENOBUFS);
890 }
891 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
892 if (!sc->bge_rx_alignment_bug)
893 m_adj(m_new, ETHER_ALIGN);
894
895 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
896 BUS_DMA_READ|BUS_DMA_NOWAIT))
897 return(ENOBUFS);
898 } else {
899 m_new = m;
900 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
901 m_new->m_data = m_new->m_ext.ext_buf;
902 if (!sc->bge_rx_alignment_bug)
903 m_adj(m_new, ETHER_ALIGN);
904 }
905
906 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
907 r = &sc->bge_rdata->bge_rx_std_ring[i];
908 bge_set_hostaddr(&r->bge_addr,
909 dmamap->dm_segs[0].ds_addr);
910 r->bge_flags = BGE_RXBDFLAG_END;
911 r->bge_len = m_new->m_len;
912 r->bge_idx = i;
913
914 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
915 offsetof(struct bge_ring_data, bge_rx_std_ring) +
916 i * sizeof (struct bge_rx_bd),
917 sizeof (struct bge_rx_bd),
918 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
919
920 return(0);
921 }
922
923 /*
924 * Initialize a jumbo receive ring descriptor. This allocates
925 * a jumbo buffer from the pool managed internally by the driver.
926 */
927 int
928 bge_newbuf_jumbo(sc, i, m)
929 struct bge_softc *sc;
930 int i;
931 struct mbuf *m;
932 {
933 struct mbuf *m_new = NULL;
934 struct bge_rx_bd *r;
935
936 if (m == NULL) {
937 caddr_t buf = NULL;
938
939 /* Allocate the mbuf. */
940 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
941 if (m_new == NULL) {
942 return(ENOBUFS);
943 }
944
945 /* Allocate the jumbo buffer */
946 buf = bge_jalloc(sc);
947 if (buf == NULL) {
948 m_freem(m_new);
949 printf("%s: jumbo allocation failed "
950 "-- packet dropped!\n", sc->bge_dev.dv_xname);
951 return(ENOBUFS);
952 }
953
954 /* Attach the buffer to the mbuf. */
955 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
956 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
957 bge_jfree, sc);
958 m_new->m_flags |= M_EXT_RW;
959 } else {
960 m_new = m;
961 m_new->m_data = m_new->m_ext.ext_buf;
962 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
963 }
964
965 if (!sc->bge_rx_alignment_bug)
966 m_adj(m_new, ETHER_ALIGN);
967 /* Set up the descriptor. */
968 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
969 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
970 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
971 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
972 r->bge_len = m_new->m_len;
973 r->bge_idx = i;
974
975 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
976 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
977 i * sizeof (struct bge_rx_bd),
978 sizeof (struct bge_rx_bd),
979 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
980
981 return(0);
982 }
983
984 /*
985 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
986 * that's 1MB or memory, which is a lot. For now, we fill only the first
987 * 256 ring entries and hope that our CPU is fast enough to keep up with
988 * the NIC.
989 */
990 int
991 bge_init_rx_ring_std(sc)
992 struct bge_softc *sc;
993 {
994 int i;
995
996 if (sc->bge_flags & BGE_RXRING_VALID)
997 return 0;
998
999 for (i = 0; i < BGE_SSLOTS; i++) {
1000 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1001 return(ENOBUFS);
1002 }
1003
1004 sc->bge_std = i - 1;
1005 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1006
1007 sc->bge_flags |= BGE_RXRING_VALID;
1008
1009 return(0);
1010 }
1011
1012 void
1013 bge_free_rx_ring_std(sc)
1014 struct bge_softc *sc;
1015 {
1016 int i;
1017
1018 if (!(sc->bge_flags & BGE_RXRING_VALID))
1019 return;
1020
1021 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1022 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1023 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1024 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1025 bus_dmamap_destroy(sc->bge_dmatag,
1026 sc->bge_cdata.bge_rx_std_map[i]);
1027 }
1028 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1029 sizeof(struct bge_rx_bd));
1030 }
1031
1032 sc->bge_flags &= ~BGE_RXRING_VALID;
1033 }
1034
1035 int
1036 bge_init_rx_ring_jumbo(sc)
1037 struct bge_softc *sc;
1038 {
1039 int i;
1040 volatile struct bge_rcb *rcb;
1041
1042 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1043 return 0;
1044
1045 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1046 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1047 return(ENOBUFS);
1048 };
1049
1050 sc->bge_jumbo = i - 1;
1051 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1052
1053 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1054 rcb->bge_maxlen_flags = 0;
1055 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1056
1057 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1058
1059 return(0);
1060 }
1061
1062 void
1063 bge_free_rx_ring_jumbo(sc)
1064 struct bge_softc *sc;
1065 {
1066 int i;
1067
1068 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1069 return;
1070
1071 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1072 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1073 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1074 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1075 }
1076 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1077 sizeof(struct bge_rx_bd));
1078 }
1079
1080 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1081 }
1082
1083 void
1084 bge_free_tx_ring(sc)
1085 struct bge_softc *sc;
1086 {
1087 int i, freed;
1088 struct txdmamap_pool_entry *dma;
1089
1090 if (!(sc->bge_flags & BGE_TXRING_VALID))
1091 return;
1092
1093 freed = 0;
1094
1095 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1096 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1097 freed++;
1098 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1099 sc->bge_cdata.bge_tx_chain[i] = NULL;
1100 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1101 link);
1102 sc->txdma[i] = 0;
1103 }
1104 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1105 sizeof(struct bge_tx_bd));
1106 }
1107
1108 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1109 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1110 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1111 free(dma, M_DEVBUF);
1112 }
1113
1114 sc->bge_flags &= ~BGE_TXRING_VALID;
1115 }
1116
1117 int
1118 bge_init_tx_ring(sc)
1119 struct bge_softc *sc;
1120 {
1121 int i;
1122 bus_dmamap_t dmamap;
1123 struct txdmamap_pool_entry *dma;
1124
1125 if (sc->bge_flags & BGE_TXRING_VALID)
1126 return 0;
1127
1128 sc->bge_txcnt = 0;
1129 sc->bge_tx_saved_considx = 0;
1130 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1131 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1132 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1133
1134 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1135 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1136 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1137
1138 SLIST_INIT(&sc->txdma_list);
1139 for (i = 0; i < BGE_RSLOTS; i++) {
1140 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
1141 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1142 &dmamap))
1143 return(ENOBUFS);
1144 if (dmamap == NULL)
1145 panic("dmamap NULL in bge_init_tx_ring");
1146 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1147 if (dma == NULL) {
1148 printf("%s: can't alloc txdmamap_pool_entry\n",
1149 sc->bge_dev.dv_xname);
1150 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1151 return (ENOMEM);
1152 }
1153 dma->dmamap = dmamap;
1154 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1155 }
1156
1157 sc->bge_flags |= BGE_TXRING_VALID;
1158
1159 return(0);
1160 }
1161
1162 void
1163 bge_setmulti(sc)
1164 struct bge_softc *sc;
1165 {
1166 struct ethercom *ac = &sc->ethercom;
1167 struct ifnet *ifp = &ac->ec_if;
1168 struct ether_multi *enm;
1169 struct ether_multistep step;
1170 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1171 u_int32_t h;
1172 int i;
1173
1174 if (ifp->if_flags & IFF_PROMISC)
1175 goto allmulti;
1176
1177 /* Now program new ones. */
1178 ETHER_FIRST_MULTI(step, ac, enm);
1179 while (enm != NULL) {
1180 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1181 /*
1182 * We must listen to a range of multicast addresses.
1183 * For now, just accept all multicasts, rather than
1184 * trying to set only those filter bits needed to match
1185 * the range. (At this time, the only use of address
1186 * ranges is for IP multicast routing, for which the
1187 * range is big enough to require all bits set.)
1188 */
1189 goto allmulti;
1190 }
1191
1192 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1193
1194 /* Just want the 7 least-significant bits. */
1195 h &= 0x7f;
1196
1197 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1198 ETHER_NEXT_MULTI(step, enm);
1199 }
1200
1201 ifp->if_flags &= ~IFF_ALLMULTI;
1202 goto setit;
1203
1204 allmulti:
1205 ifp->if_flags |= IFF_ALLMULTI;
1206 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1207
1208 setit:
1209 for (i = 0; i < 4; i++)
1210 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1211 }
1212
1213 const int bge_swapbits[] = {
1214 0,
1215 BGE_MODECTL_BYTESWAP_DATA,
1216 BGE_MODECTL_WORDSWAP_DATA,
1217 BGE_MODECTL_BYTESWAP_NONFRAME,
1218 BGE_MODECTL_WORDSWAP_NONFRAME,
1219
1220 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1221 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1222 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1223
1224 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1225 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1226
1227 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1228
1229 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1230 BGE_MODECTL_BYTESWAP_NONFRAME,
1231 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1232 BGE_MODECTL_WORDSWAP_NONFRAME,
1233 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1234 BGE_MODECTL_WORDSWAP_NONFRAME,
1235 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1236 BGE_MODECTL_WORDSWAP_NONFRAME,
1237
1238 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1239 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1240 };
1241
1242 int bge_swapindex = 0;
1243
1244 /*
1245 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1246 * self-test results.
1247 */
1248 int
1249 bge_chipinit(sc)
1250 struct bge_softc *sc;
1251 {
1252 u_int32_t cachesize;
1253 int i;
1254 u_int32_t dma_rw_ctl;
1255 struct pci_attach_args *pa = &(sc->bge_pa);
1256
1257
1258 /* Set endianness before we access any non-PCI registers. */
1259 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1260 BGE_INIT);
1261
1262 /* Set power state to D0. */
1263 bge_setpowerstate(sc, 0);
1264
1265 /*
1266 * Check the 'ROM failed' bit on the RX CPU to see if
1267 * self-tests passed.
1268 */
1269 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1270 printf("%s: RX CPU self-diagnostics failed!\n",
1271 sc->bge_dev.dv_xname);
1272 return(ENODEV);
1273 }
1274
1275 /* Clear the MAC control register */
1276 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1277
1278 /*
1279 * Clear the MAC statistics block in the NIC's
1280 * internal memory.
1281 */
1282 for (i = BGE_STATS_BLOCK;
1283 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1284 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1285
1286 for (i = BGE_STATUS_BLOCK;
1287 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1288 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1289
1290 /* Set up the PCI DMA control register. */
1291 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
1292 BGE_PCISTATE_PCI_BUSMODE) {
1293 /* Conventional PCI bus */
1294 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname));
1295 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1296 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1297 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1298 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1299 dma_rw_ctl |= 0x0F;
1300 }
1301 } else {
1302 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname));
1303 /* PCI-X bus */
1304 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1305 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1306 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1307 (0x0F);
1308 /*
1309 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1310 * for hardware bugs, which means we should also clear
1311 * the low-order MINDMA bits. In addition, the 5704
1312 * uses a different encoding of read/write watermarks.
1313 */
1314 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1315 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1316 /* should be 0x1f0000 */
1317 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1318 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1319 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1320 }
1321 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
1322 dma_rw_ctl &= 0xfffffff0;
1323 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1324 }
1325 }
1326
1327 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1328
1329 /*
1330 * Set up general mode register.
1331 */
1332 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1333 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1334 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1335
1336 /* Get cache line size. */
1337 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1338
1339 /*
1340 * Avoid violating PCI spec on certain chip revs.
1341 */
1342 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1343 PCIM_CMD_MWIEN) {
1344 switch(cachesize) {
1345 case 1:
1346 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1347 BGE_PCI_WRITE_BNDRY_16BYTES);
1348 break;
1349 case 2:
1350 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1351 BGE_PCI_WRITE_BNDRY_32BYTES);
1352 break;
1353 case 4:
1354 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1355 BGE_PCI_WRITE_BNDRY_64BYTES);
1356 break;
1357 case 8:
1358 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1359 BGE_PCI_WRITE_BNDRY_128BYTES);
1360 break;
1361 case 16:
1362 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1363 BGE_PCI_WRITE_BNDRY_256BYTES);
1364 break;
1365 case 32:
1366 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1367 BGE_PCI_WRITE_BNDRY_512BYTES);
1368 break;
1369 case 64:
1370 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1371 BGE_PCI_WRITE_BNDRY_1024BYTES);
1372 break;
1373 default:
1374 /* Disable PCI memory write and invalidate. */
1375 #if 0
1376 if (bootverbose)
1377 printf("%s: cache line size %d not "
1378 "supported; disabling PCI MWI\n",
1379 sc->bge_dev.dv_xname, cachesize);
1380 #endif
1381 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1382 PCIM_CMD_MWIEN);
1383 break;
1384 }
1385 }
1386
1387 /*
1388 * Disable memory write invalidate. Apparently it is not supported
1389 * properly by these devices.
1390 */
1391 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
1392
1393
1394 #ifdef __brokenalpha__
1395 /*
1396 * Must insure that we do not cross an 8K (bytes) boundary
1397 * for DMA reads. Our highest limit is 1K bytes. This is a
1398 * restriction on some ALPHA platforms with early revision
1399 * 21174 PCI chipsets, such as the AlphaPC 164lx
1400 */
1401 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1402 #endif
1403
1404 /* Set the timer prescaler (always 66MHz) */
1405 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1406
1407 return(0);
1408 }
1409
1410 int
1411 bge_blockinit(sc)
1412 struct bge_softc *sc;
1413 {
1414 volatile struct bge_rcb *rcb;
1415 bus_size_t rcb_addr;
1416 int i;
1417 struct ifnet *ifp = &sc->ethercom.ec_if;
1418 bge_hostaddr taddr;
1419
1420 /*
1421 * Initialize the memory window pointer register so that
1422 * we can access the first 32K of internal NIC RAM. This will
1423 * allow us to set up the TX send ring RCBs and the RX return
1424 * ring RCBs, plus other things which live in NIC memory.
1425 */
1426
1427 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1428 BGE_PCI_MEMWIN_BASEADDR, 0);
1429
1430 /* Configure mbuf memory pool */
1431 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1432 if (sc->bge_extram) {
1433 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1434 BGE_EXT_SSRAM);
1435 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1436 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1437 else
1438 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1439 } else {
1440 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1441 BGE_BUFFPOOL_1);
1442 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1443 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1444 else
1445 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1446 }
1447
1448 /* Configure DMA resource pool */
1449 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1450 BGE_DMA_DESCRIPTORS);
1451 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1452 }
1453
1454 /* Configure mbuf pool watermarks */
1455 #ifdef ORIG_WPAUL_VALUES
1456 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1457 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1458 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1459 #else
1460 /* new broadcom docs strongly recommend these: */
1461 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1462 if (ifp->if_mtu > ETHER_MAX_LEN) {
1463 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1464 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1465 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1466 } else {
1467 /* Values from Linux driver... */
1468 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304);
1469 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152);
1470 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380);
1471 }
1472 } else {
1473 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1474 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1475 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1476 }
1477 #endif
1478
1479 /* Configure DMA resource watermarks */
1480 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1481 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1482
1483 /* Enable buffer manager */
1484 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1485 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1486 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1487
1488 /* Poll for buffer manager start indication */
1489 for (i = 0; i < BGE_TIMEOUT; i++) {
1490 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1491 break;
1492 DELAY(10);
1493 }
1494
1495 if (i == BGE_TIMEOUT) {
1496 printf("%s: buffer manager failed to start\n",
1497 sc->bge_dev.dv_xname);
1498 return(ENXIO);
1499 }
1500 }
1501
1502 /* Enable flow-through queues */
1503 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1504 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1505
1506 /* Wait until queue initialization is complete */
1507 for (i = 0; i < BGE_TIMEOUT; i++) {
1508 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1509 break;
1510 DELAY(10);
1511 }
1512
1513 if (i == BGE_TIMEOUT) {
1514 printf("%s: flow-through queue init failed\n",
1515 sc->bge_dev.dv_xname);
1516 return(ENXIO);
1517 }
1518
1519 /* Initialize the standard RX ring control block */
1520 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1521 bge_set_hostaddr(&rcb->bge_hostaddr,
1522 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1523 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1524 rcb->bge_maxlen_flags =
1525 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1526 } else {
1527 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1528 }
1529 if (sc->bge_extram)
1530 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1531 else
1532 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1533 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1534 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1535 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1536 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1537
1538 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1539 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1540 } else {
1541 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1542 }
1543
1544 /*
1545 * Initialize the jumbo RX ring control block
1546 * We set the 'ring disabled' bit in the flags
1547 * field until we're actually ready to start
1548 * using this ring (i.e. once we set the MTU
1549 * high enough to require it).
1550 */
1551 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1552 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1553 bge_set_hostaddr(&rcb->bge_hostaddr,
1554 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1555 rcb->bge_maxlen_flags =
1556 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1557 BGE_RCB_FLAG_RING_DISABLED);
1558 if (sc->bge_extram)
1559 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1560 else
1561 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1562
1563 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1564 rcb->bge_hostaddr.bge_addr_hi);
1565 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1566 rcb->bge_hostaddr.bge_addr_lo);
1567 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1568 rcb->bge_maxlen_flags);
1569 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1570
1571 /* Set up dummy disabled mini ring RCB */
1572 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1573 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1574 BGE_RCB_FLAG_RING_DISABLED);
1575 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1576 rcb->bge_maxlen_flags);
1577
1578 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1579 offsetof(struct bge_ring_data, bge_info),
1580 sizeof (struct bge_gib),
1581 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1582 }
1583
1584 /*
1585 * Set the BD ring replentish thresholds. The recommended
1586 * values are 1/8th the number of descriptors allocated to
1587 * each ring.
1588 */
1589 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1590 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1591
1592 /*
1593 * Disable all unused send rings by setting the 'ring disabled'
1594 * bit in the flags field of all the TX send ring control blocks.
1595 * These are located in NIC memory.
1596 */
1597 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1598 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1599 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1600 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1601 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1602 rcb_addr += sizeof(struct bge_rcb);
1603 }
1604
1605 /* Configure TX RCB 0 (we use only the first ring) */
1606 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1607 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1608 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1609 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1610 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1611 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1612 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1613 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1614 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1615 }
1616
1617 /* Disable all unused RX return rings */
1618 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1619 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1620 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1621 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1622 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1623 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1624 BGE_RCB_FLAG_RING_DISABLED));
1625 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1626 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1627 (i * (sizeof(u_int64_t))), 0);
1628 rcb_addr += sizeof(struct bge_rcb);
1629 }
1630
1631 /* Initialize RX ring indexes */
1632 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1633 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1634 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1635
1636 /*
1637 * Set up RX return ring 0
1638 * Note that the NIC address for RX return rings is 0x00000000.
1639 * The return rings live entirely within the host, so the
1640 * nicaddr field in the RCB isn't used.
1641 */
1642 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1643 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1644 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1645 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1646 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1647 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1648 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1649
1650 /* Set random backoff seed for TX */
1651 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1652 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1653 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1654 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1655 BGE_TX_BACKOFF_SEED_MASK);
1656
1657 /* Set inter-packet gap */
1658 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1659
1660 /*
1661 * Specify which ring to use for packets that don't match
1662 * any RX rules.
1663 */
1664 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1665
1666 /*
1667 * Configure number of RX lists. One interrupt distribution
1668 * list, sixteen active lists, one bad frames class.
1669 */
1670 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1671
1672 /* Inialize RX list placement stats mask. */
1673 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1674 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1675
1676 /* Disable host coalescing until we get it set up */
1677 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1678
1679 /* Poll to make sure it's shut down. */
1680 for (i = 0; i < BGE_TIMEOUT; i++) {
1681 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1682 break;
1683 DELAY(10);
1684 }
1685
1686 if (i == BGE_TIMEOUT) {
1687 printf("%s: host coalescing engine failed to idle\n",
1688 sc->bge_dev.dv_xname);
1689 return(ENXIO);
1690 }
1691
1692 /* Set up host coalescing defaults */
1693 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1694 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1695 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1696 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1697 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1698 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1699 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1700 }
1701 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1702 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1703
1704 /* Set up address of statistics block */
1705 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1706 bge_set_hostaddr(&taddr,
1707 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1708 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1709 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1710 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1711 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1712 }
1713
1714 /* Set up address of status block */
1715 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1716 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1717 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1718 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1719 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1720 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1721
1722 /* Turn on host coalescing state machine */
1723 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1724
1725 /* Turn on RX BD completion state machine and enable attentions */
1726 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1727 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1728
1729 /* Turn on RX list placement state machine */
1730 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1731
1732 /* Turn on RX list selector state machine. */
1733 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1734 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1735 }
1736
1737 /* Turn on DMA, clear stats */
1738 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1739 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1740 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1741 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1742 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1743
1744 /* Set misc. local control, enable interrupts on attentions */
1745 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1746
1747 #ifdef notdef
1748 /* Assert GPIO pins for PHY reset */
1749 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1750 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1751 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1752 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1753 #endif
1754
1755 #if defined(not_quite_yet)
1756 /* Linux driver enables enable gpio pin #1 on 5700s */
1757 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
1758 sc->bge_local_ctrl_reg |=
1759 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
1760 }
1761 #endif
1762 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1763
1764 /* Turn on DMA completion state machine */
1765 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1766 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1767 }
1768
1769 /* Turn on write DMA state machine */
1770 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1771 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1772
1773 /* Turn on read DMA state machine */
1774 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1775 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1776
1777 /* Turn on RX data completion state machine */
1778 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1779
1780 /* Turn on RX BD initiator state machine */
1781 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1782
1783 /* Turn on RX data and RX BD initiator state machine */
1784 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1785
1786 /* Turn on Mbuf cluster free state machine */
1787 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1788 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1789 }
1790
1791 /* Turn on send BD completion state machine */
1792 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1793
1794 /* Turn on send data completion state machine */
1795 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1796
1797 /* Turn on send data initiator state machine */
1798 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1799
1800 /* Turn on send BD initiator state machine */
1801 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1802
1803 /* Turn on send BD selector state machine */
1804 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1805
1806 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1807 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1808 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1809
1810 /* ack/clear link change events */
1811 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1812 BGE_MACSTAT_CFG_CHANGED);
1813 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1814
1815 /* Enable PHY auto polling (for MII/GMII only) */
1816 if (sc->bge_tbi) {
1817 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1818 } else {
1819 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1820 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1821 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1822 BGE_EVTENB_MI_INTERRUPT);
1823 }
1824
1825 /* Enable link state change attentions. */
1826 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1827
1828 return(0);
1829 }
1830
1831 static const struct bge_revision {
1832 uint32_t br_chipid;
1833 uint32_t br_quirks;
1834 const char *br_name;
1835 } bge_revisions[] = {
1836 { BGE_CHIPID_BCM5700_A0,
1837 BGE_QUIRK_LINK_STATE_BROKEN,
1838 "BCM5700 A0" },
1839
1840 { BGE_CHIPID_BCM5700_A1,
1841 BGE_QUIRK_LINK_STATE_BROKEN,
1842 "BCM5700 A1" },
1843
1844 { BGE_CHIPID_BCM5700_B0,
1845 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
1846 "BCM5700 B0" },
1847
1848 { BGE_CHIPID_BCM5700_B1,
1849 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1850 "BCM5700 B1" },
1851
1852 { BGE_CHIPID_BCM5700_B2,
1853 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1854 "BCM5700 B2" },
1855
1856 /* This is treated like a BCM5700 Bx */
1857 { BGE_CHIPID_BCM5700_ALTIMA,
1858 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1859 "BCM5700 Altima" },
1860
1861 { BGE_CHIPID_BCM5700_C0,
1862 0,
1863 "BCM5700 C0" },
1864
1865 { BGE_CHIPID_BCM5701_A0,
1866 0, /*XXX really, just not known */
1867 "BCM5701 A0" },
1868
1869 { BGE_CHIPID_BCM5701_B0,
1870 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1871 "BCM5701 B0" },
1872
1873 { BGE_CHIPID_BCM5701_B2,
1874 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1875 "BCM5701 B2" },
1876
1877 { BGE_CHIPID_BCM5701_B5,
1878 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1879 "BCM5701 B5" },
1880
1881 { BGE_CHIPID_BCM5703_A0,
1882 0,
1883 "BCM5703 A0" },
1884
1885 { BGE_CHIPID_BCM5703_A1,
1886 0,
1887 "BCM5703 A1" },
1888
1889 { BGE_CHIPID_BCM5703_A2,
1890 BGE_QUIRK_ONLY_PHY_1,
1891 "BCM5703 A2" },
1892
1893 { BGE_CHIPID_BCM5703_A3,
1894 BGE_QUIRK_ONLY_PHY_1,
1895 "BCM5703 A3" },
1896
1897 { BGE_CHIPID_BCM5704_A0,
1898 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1899 "BCM5704 A0" },
1900
1901 { BGE_CHIPID_BCM5704_A1,
1902 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1903 "BCM5704 A1" },
1904
1905 { BGE_CHIPID_BCM5704_A2,
1906 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1907 "BCM5704 A2" },
1908
1909 { BGE_CHIPID_BCM5704_A3,
1910 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1911 "BCM5704 A3" },
1912
1913 { BGE_CHIPID_BCM5705_A0,
1914 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1915 "BCM5705 A0" },
1916
1917 { BGE_CHIPID_BCM5705_A1,
1918 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1919 "BCM5705 A1" },
1920
1921 { BGE_CHIPID_BCM5705_A2,
1922 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1923 "BCM5705 A2" },
1924
1925 { BGE_CHIPID_BCM5705_A3,
1926 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1927 "BCM5705 A3" },
1928
1929 { 0, 0, NULL }
1930 };
1931
1932 /*
1933 * Some defaults for major revisions, so that newer steppings
1934 * that we don't know about have a shot at working.
1935 */
1936 static const struct bge_revision bge_majorrevs[] = {
1937 { BGE_ASICREV_BCM5700,
1938 BGE_QUIRK_LINK_STATE_BROKEN,
1939 "unknown BCM5700" },
1940
1941 { BGE_ASICREV_BCM5701,
1942 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1943 "unknown BCM5701" },
1944
1945 { BGE_ASICREV_BCM5703,
1946 0,
1947 "unknown BCM5703" },
1948
1949 { BGE_ASICREV_BCM5704,
1950 BGE_QUIRK_ONLY_PHY_1,
1951 "unknown BCM5704" },
1952
1953 { BGE_ASICREV_BCM5705,
1954 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1955 "unknown BCM5705" },
1956
1957 { 0,
1958 0,
1959 NULL }
1960 };
1961
1962
1963 static const struct bge_revision *
1964 bge_lookup_rev(uint32_t chipid)
1965 {
1966 const struct bge_revision *br;
1967
1968 for (br = bge_revisions; br->br_name != NULL; br++) {
1969 if (br->br_chipid == chipid)
1970 return (br);
1971 }
1972
1973 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1974 if (br->br_chipid == BGE_ASICREV(chipid))
1975 return (br);
1976 }
1977
1978 return (NULL);
1979 }
1980
1981 static const struct bge_product {
1982 pci_vendor_id_t bp_vendor;
1983 pci_product_id_t bp_product;
1984 const char *bp_name;
1985 } bge_products[] = {
1986 /*
1987 * The BCM5700 documentation seems to indicate that the hardware
1988 * still has the Alteon vendor ID burned into it, though it
1989 * should always be overridden by the value in the EEPROM. We'll
1990 * check for it anyway.
1991 */
1992 { PCI_VENDOR_ALTEON,
1993 PCI_PRODUCT_ALTEON_BCM5700,
1994 "Broadcom BCM5700 Gigabit Ethernet",
1995 },
1996 { PCI_VENDOR_ALTEON,
1997 PCI_PRODUCT_ALTEON_BCM5701,
1998 "Broadcom BCM5701 Gigabit Ethernet",
1999 },
2000
2001 { PCI_VENDOR_ALTIMA,
2002 PCI_PRODUCT_ALTIMA_AC1000,
2003 "Altima AC1000 Gigabit Ethernet",
2004 },
2005 { PCI_VENDOR_ALTIMA,
2006 PCI_PRODUCT_ALTIMA_AC1001,
2007 "Altima AC1001 Gigabit Ethernet",
2008 },
2009 { PCI_VENDOR_ALTIMA,
2010 PCI_PRODUCT_ALTIMA_AC9100,
2011 "Altima AC9100 Gigabit Ethernet",
2012 },
2013
2014 { PCI_VENDOR_BROADCOM,
2015 PCI_PRODUCT_BROADCOM_BCM5700,
2016 "Broadcom BCM5700 Gigabit Ethernet",
2017 },
2018 { PCI_VENDOR_BROADCOM,
2019 PCI_PRODUCT_BROADCOM_BCM5701,
2020 "Broadcom BCM5701 Gigabit Ethernet",
2021 },
2022 { PCI_VENDOR_BROADCOM,
2023 PCI_PRODUCT_BROADCOM_BCM5702,
2024 "Broadcom BCM5702 Gigabit Ethernet",
2025 },
2026 { PCI_VENDOR_BROADCOM,
2027 PCI_PRODUCT_BROADCOM_BCM5702X,
2028 "Broadcom BCM5702X Gigabit Ethernet" },
2029
2030 { PCI_VENDOR_BROADCOM,
2031 PCI_PRODUCT_BROADCOM_BCM5703,
2032 "Broadcom BCM5703 Gigabit Ethernet",
2033 },
2034 { PCI_VENDOR_BROADCOM,
2035 PCI_PRODUCT_BROADCOM_BCM5703X,
2036 "Broadcom BCM5703X Gigabit Ethernet",
2037 },
2038 { PCI_VENDOR_BROADCOM,
2039 PCI_PRODUCT_BROADCOM_BCM5703A3,
2040 "Broadcom BCM5703A3 Gigabit Ethernet",
2041 },
2042
2043 { PCI_VENDOR_BROADCOM,
2044 PCI_PRODUCT_BROADCOM_BCM5704C,
2045 "Broadcom BCM5704C Dual Gigabit Ethernet",
2046 },
2047 { PCI_VENDOR_BROADCOM,
2048 PCI_PRODUCT_BROADCOM_BCM5704S,
2049 "Broadcom BCM5704S Dual Gigabit Ethernet",
2050 },
2051
2052 { PCI_VENDOR_BROADCOM,
2053 PCI_PRODUCT_BROADCOM_BCM5705,
2054 "Broadcom BCM5705 Gigabit Ethernet",
2055 },
2056 { PCI_VENDOR_BROADCOM,
2057 PCI_PRODUCT_BROADCOM_BCM5705_ALT,
2058 "Broadcom BCM5705 Gigabit Ethernet",
2059 },
2060 { PCI_VENDOR_BROADCOM,
2061 PCI_PRODUCT_BROADCOM_BCM5705M,
2062 "Broadcom BCM5705M Gigabit Ethernet",
2063 },
2064
2065 { PCI_VENDOR_BROADCOM,
2066 PCI_PRODUCT_BROADCOM_BCM5782,
2067 "Broadcom BCM5782 Gigabit Ethernet",
2068 },
2069 { PCI_VENDOR_BROADCOM,
2070 PCI_PRODUCT_BROADCOM_BCM5788,
2071 "Broadcom BCM5788 Gigabit Ethernet",
2072 },
2073
2074 { PCI_VENDOR_BROADCOM,
2075 PCI_PRODUCT_BROADCOM_BCM5901,
2076 "Broadcom BCM5901 Fast Ethernet",
2077 },
2078 { PCI_VENDOR_BROADCOM,
2079 PCI_PRODUCT_BROADCOM_BCM5901A2,
2080 "Broadcom BCM5901A2 Fast Ethernet",
2081 },
2082
2083 { PCI_VENDOR_SCHNEIDERKOCH,
2084 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
2085 "SysKonnect SK-9Dx1 Gigabit Ethernet",
2086 },
2087
2088 { PCI_VENDOR_3COM,
2089 PCI_PRODUCT_3COM_3C996,
2090 "3Com 3c996 Gigabit Ethernet",
2091 },
2092
2093 { 0,
2094 0,
2095 NULL },
2096 };
2097
2098 static const struct bge_product *
2099 bge_lookup(const struct pci_attach_args *pa)
2100 {
2101 const struct bge_product *bp;
2102
2103 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2104 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2105 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2106 return (bp);
2107 }
2108
2109 return (NULL);
2110 }
2111
2112 int
2113 bge_setpowerstate(sc, powerlevel)
2114 struct bge_softc *sc;
2115 int powerlevel;
2116 {
2117 #ifdef NOTYET
2118 u_int32_t pm_ctl = 0;
2119
2120 /* XXX FIXME: make sure indirect accesses enabled? */
2121 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2122 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2123 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2124
2125 /* clear the PME_assert bit and power state bits, enable PME */
2126 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2127 pm_ctl &= ~PCIM_PSTAT_DMASK;
2128 pm_ctl |= (1 << 8);
2129
2130 if (powerlevel == 0) {
2131 pm_ctl |= PCIM_PSTAT_D0;
2132 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2133 pm_ctl, 2);
2134 DELAY(10000);
2135 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2136 DELAY(10000);
2137
2138 #ifdef NOTYET
2139 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2140 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2141 #endif
2142 DELAY(40); DELAY(40); DELAY(40);
2143 DELAY(10000); /* above not quite adequate on 5700 */
2144 return 0;
2145 }
2146
2147
2148 /*
2149 * Entering ACPI power states D1-D3 is achieved by wiggling
2150 * GMII gpio pins. Example code assumes all hardware vendors
2151 * followed Broadom's sample pcb layout. Until we verify that
2152 * for all supported OEM cards, states D1-D3 are unsupported.
2153 */
2154 printf("%s: power state %d unimplemented; check GPIO pins\n",
2155 sc->bge_dev.dv_xname, powerlevel);
2156 #endif
2157 return EOPNOTSUPP;
2158 }
2159
2160
2161 /*
2162 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2163 * against our list and return its name if we find a match. Note
2164 * that since the Broadcom controller contains VPD support, we
2165 * can get the device name string from the controller itself instead
2166 * of the compiled-in string. This is a little slow, but it guarantees
2167 * we'll always announce the right product name.
2168 */
2169 int
2170 bge_probe(parent, match, aux)
2171 struct device *parent;
2172 struct cfdata *match;
2173 void *aux;
2174 {
2175 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2176
2177 if (bge_lookup(pa) != NULL)
2178 return (1);
2179
2180 return (0);
2181 }
2182
2183 void
2184 bge_attach(parent, self, aux)
2185 struct device *parent, *self;
2186 void *aux;
2187 {
2188 struct bge_softc *sc = (struct bge_softc *)self;
2189 struct pci_attach_args *pa = aux;
2190 const struct bge_product *bp;
2191 const struct bge_revision *br;
2192 pci_chipset_tag_t pc = pa->pa_pc;
2193 pci_intr_handle_t ih;
2194 const char *intrstr = NULL;
2195 bus_dma_segment_t seg;
2196 int rseg;
2197 u_int32_t hwcfg = 0;
2198 u_int32_t mac_addr = 0;
2199 u_int32_t command;
2200 struct ifnet *ifp;
2201 caddr_t kva;
2202 u_char eaddr[ETHER_ADDR_LEN];
2203 pcireg_t memtype;
2204 bus_addr_t memaddr;
2205 bus_size_t memsize;
2206 u_int32_t pm_ctl;
2207
2208 bp = bge_lookup(pa);
2209 KASSERT(bp != NULL);
2210
2211 sc->bge_pa = *pa;
2212
2213 aprint_naive(": Ethernet controller\n");
2214 aprint_normal(": %s\n", bp->bp_name);
2215
2216 /*
2217 * Map control/status registers.
2218 */
2219 DPRINTFN(5, ("Map control/status regs\n"));
2220 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2221 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2222 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
2223 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2224
2225 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2226 aprint_error("%s: failed to enable memory mapping!\n",
2227 sc->bge_dev.dv_xname);
2228 return;
2229 }
2230
2231 DPRINTFN(5, ("pci_mem_find\n"));
2232 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2233 switch (memtype) {
2234 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2235 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2236 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2237 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2238 &memaddr, &memsize) == 0)
2239 break;
2240 default:
2241 aprint_error("%s: can't find mem space\n",
2242 sc->bge_dev.dv_xname);
2243 return;
2244 }
2245
2246 DPRINTFN(5, ("pci_intr_map\n"));
2247 if (pci_intr_map(pa, &ih)) {
2248 aprint_error("%s: couldn't map interrupt\n",
2249 sc->bge_dev.dv_xname);
2250 return;
2251 }
2252
2253 DPRINTFN(5, ("pci_intr_string\n"));
2254 intrstr = pci_intr_string(pc, ih);
2255
2256 DPRINTFN(5, ("pci_intr_establish\n"));
2257 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2258
2259 if (sc->bge_intrhand == NULL) {
2260 aprint_error("%s: couldn't establish interrupt",
2261 sc->bge_dev.dv_xname);
2262 if (intrstr != NULL)
2263 aprint_normal(" at %s", intrstr);
2264 aprint_normal("\n");
2265 return;
2266 }
2267 aprint_normal("%s: interrupting at %s\n",
2268 sc->bge_dev.dv_xname, intrstr);
2269
2270 /*
2271 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2272 * can clobber the chip's PCI config-space power control registers,
2273 * leaving the card in D3 powersave state.
2274 * We do not have memory-mapped registers in this state,
2275 * so force device into D0 state before starting initialization.
2276 */
2277 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2278 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2279 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2280 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2281 DELAY(1000); /* 27 usec is allegedly sufficent */
2282
2283 /* Try to reset the chip. */
2284 DPRINTFN(5, ("bge_reset\n"));
2285 bge_reset(sc);
2286
2287 if (bge_chipinit(sc)) {
2288 aprint_error("%s: chip initialization failed\n",
2289 sc->bge_dev.dv_xname);
2290 bge_release_resources(sc);
2291 return;
2292 }
2293
2294 /*
2295 * Get station address from the EEPROM.
2296 */
2297 mac_addr = bge_readmem_ind(sc, 0x0c14);
2298 if ((mac_addr >> 16) == 0x484b) {
2299 eaddr[0] = (u_char)(mac_addr >> 8);
2300 eaddr[1] = (u_char)(mac_addr >> 0);
2301 mac_addr = bge_readmem_ind(sc, 0x0c18);
2302 eaddr[2] = (u_char)(mac_addr >> 24);
2303 eaddr[3] = (u_char)(mac_addr >> 16);
2304 eaddr[4] = (u_char)(mac_addr >> 8);
2305 eaddr[5] = (u_char)(mac_addr >> 0);
2306 } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
2307 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2308 aprint_error("%s: failed to read station address\n",
2309 sc->bge_dev.dv_xname);
2310 bge_release_resources(sc);
2311 return;
2312 }
2313
2314 /*
2315 * Save ASIC rev. Look up any quirks associated with this
2316 * ASIC.
2317 */
2318 sc->bge_chipid =
2319 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
2320 BGE_PCIMISCCTL_ASICREV;
2321 br = bge_lookup_rev(sc->bge_chipid);
2322
2323 aprint_normal("%s: ", sc->bge_dev.dv_xname);
2324
2325 if (br == NULL) {
2326 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
2327 sc->bge_quirks = 0;
2328 } else {
2329 aprint_normal("ASIC %s (0x%04x)",
2330 br->br_name, sc->bge_chipid >> 16);
2331 sc->bge_quirks |= br->br_quirks;
2332 }
2333 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2334
2335 /* Allocate the general information block and ring buffers. */
2336 if (pci_dma64_available(pa))
2337 sc->bge_dmatag = pa->pa_dmat64;
2338 else
2339 sc->bge_dmatag = pa->pa_dmat;
2340 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2341 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2342 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2343 aprint_error("%s: can't alloc rx buffers\n",
2344 sc->bge_dev.dv_xname);
2345 return;
2346 }
2347 DPRINTFN(5, ("bus_dmamem_map\n"));
2348 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2349 sizeof(struct bge_ring_data), &kva,
2350 BUS_DMA_NOWAIT)) {
2351 aprint_error("%s: can't map DMA buffers (%d bytes)\n",
2352 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
2353 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2354 return;
2355 }
2356 DPRINTFN(5, ("bus_dmamem_create\n"));
2357 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2358 sizeof(struct bge_ring_data), 0,
2359 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2360 aprint_error("%s: can't create DMA map\n",
2361 sc->bge_dev.dv_xname);
2362 bus_dmamem_unmap(sc->bge_dmatag, kva,
2363 sizeof(struct bge_ring_data));
2364 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2365 return;
2366 }
2367 DPRINTFN(5, ("bus_dmamem_load\n"));
2368 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2369 sizeof(struct bge_ring_data), NULL,
2370 BUS_DMA_NOWAIT)) {
2371 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2372 bus_dmamem_unmap(sc->bge_dmatag, kva,
2373 sizeof(struct bge_ring_data));
2374 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2375 return;
2376 }
2377
2378 DPRINTFN(5, ("bzero\n"));
2379 sc->bge_rdata = (struct bge_ring_data *)kva;
2380
2381 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2382
2383 /* Try to allocate memory for jumbo buffers. */
2384 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2385 if (bge_alloc_jumbo_mem(sc)) {
2386 aprint_error("%s: jumbo buffer allocation failed\n",
2387 sc->bge_dev.dv_xname);
2388 } else
2389 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2390 }
2391
2392 /* Set default tuneable values. */
2393 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2394 sc->bge_rx_coal_ticks = 150;
2395 sc->bge_rx_max_coal_bds = 64;
2396 #ifdef ORIG_WPAUL_VALUES
2397 sc->bge_tx_coal_ticks = 150;
2398 sc->bge_tx_max_coal_bds = 128;
2399 #else
2400 sc->bge_tx_coal_ticks = 300;
2401 sc->bge_tx_max_coal_bds = 400;
2402 #endif
2403
2404 /* Set up ifnet structure */
2405 ifp = &sc->ethercom.ec_if;
2406 ifp->if_softc = sc;
2407 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2408 ifp->if_ioctl = bge_ioctl;
2409 ifp->if_start = bge_start;
2410 ifp->if_init = bge_init;
2411 ifp->if_watchdog = bge_watchdog;
2412 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2413 IFQ_SET_READY(&ifp->if_snd);
2414 DPRINTFN(5, ("bcopy\n"));
2415 strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
2416
2417 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
2418 sc->ethercom.ec_if.if_capabilities |=
2419 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
2420 sc->ethercom.ec_capabilities |=
2421 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2422
2423 /*
2424 * Do MII setup.
2425 */
2426 DPRINTFN(5, ("mii setup\n"));
2427 sc->bge_mii.mii_ifp = ifp;
2428 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2429 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2430 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2431
2432 /*
2433 * Figure out what sort of media we have by checking the
2434 * hardware config word in the first 32k of NIC internal memory,
2435 * or fall back to the config word in the EEPROM. Note: on some BCM5700
2436 * cards, this value appears to be unset. If that's the
2437 * case, we have to rely on identifying the NIC by its PCI
2438 * subsystem ID, as we do below for the SysKonnect SK-9D41.
2439 */
2440 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2441 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2442 } else {
2443 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2444 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2445 hwcfg = be32toh(hwcfg);
2446 }
2447 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2448 sc->bge_tbi = 1;
2449
2450 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2451 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
2452 SK_SUBSYSID_9D41)
2453 sc->bge_tbi = 1;
2454
2455 if (sc->bge_tbi) {
2456 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2457 bge_ifmedia_sts);
2458 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2459 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2460 0, NULL);
2461 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2462 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2463 } else {
2464 /*
2465 * Do transceiver setup.
2466 */
2467 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2468 bge_ifmedia_sts);
2469 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2470 MII_PHY_ANY, MII_OFFSET_ANY,
2471 MIIF_FORCEANEG|MIIF_DOPAUSE);
2472
2473 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2474 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2475 ifmedia_add(&sc->bge_mii.mii_media,
2476 IFM_ETHER|IFM_MANUAL, 0, NULL);
2477 ifmedia_set(&sc->bge_mii.mii_media,
2478 IFM_ETHER|IFM_MANUAL);
2479 } else
2480 ifmedia_set(&sc->bge_mii.mii_media,
2481 IFM_ETHER|IFM_AUTO);
2482 }
2483
2484 /*
2485 * When using the BCM5701 in PCI-X mode, data corruption has
2486 * been observed in the first few bytes of some received packets.
2487 * Aligning the packet buffer in memory eliminates the corruption.
2488 * Unfortunately, this misaligns the packet payloads. On platforms
2489 * which do not support unaligned accesses, we will realign the
2490 * payloads by copying the received packets.
2491 */
2492 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
2493 /* If in PCI-X mode, work around the alignment bug. */
2494 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2495 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2496 BGE_PCISTATE_PCI_BUSSPEED)
2497 sc->bge_rx_alignment_bug = 1;
2498 }
2499
2500 /*
2501 * Call MI attach routine.
2502 */
2503 DPRINTFN(5, ("if_attach\n"));
2504 if_attach(ifp);
2505 DPRINTFN(5, ("ether_ifattach\n"));
2506 ether_ifattach(ifp, eaddr);
2507 #ifdef BGE_EVENT_COUNTERS
2508 /*
2509 * Attach event counters.
2510 */
2511 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
2512 NULL, sc->bge_dev.dv_xname, "intr");
2513 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
2514 NULL, sc->bge_dev.dv_xname, "tx_xoff");
2515 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
2516 NULL, sc->bge_dev.dv_xname, "tx_xon");
2517 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
2518 NULL, sc->bge_dev.dv_xname, "rx_xoff");
2519 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
2520 NULL, sc->bge_dev.dv_xname, "rx_xon");
2521 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
2522 NULL, sc->bge_dev.dv_xname, "rx_macctl");
2523 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
2524 NULL, sc->bge_dev.dv_xname, "xoffentered");
2525 #endif /* BGE_EVENT_COUNTERS */
2526 DPRINTFN(5, ("callout_init\n"));
2527 callout_init(&sc->bge_timeout);
2528 }
2529
2530 void
2531 bge_release_resources(sc)
2532 struct bge_softc *sc;
2533 {
2534 if (sc->bge_vpd_prodname != NULL)
2535 free(sc->bge_vpd_prodname, M_DEVBUF);
2536
2537 if (sc->bge_vpd_readonly != NULL)
2538 free(sc->bge_vpd_readonly, M_DEVBUF);
2539 }
2540
2541 void
2542 bge_reset(sc)
2543 struct bge_softc *sc;
2544 {
2545 struct pci_attach_args *pa = &sc->bge_pa;
2546 u_int32_t cachesize, command, pcistate, new_pcistate;
2547 int i, val = 0;
2548
2549 /* Save some important PCI state. */
2550 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2551 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2552 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2553
2554 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2555 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2556 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2557
2558 /* Issue global reset */
2559 bge_writereg_ind(sc, BGE_MISC_CFG,
2560 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
2561
2562 DELAY(1000);
2563
2564 /* Reset some of the PCI state that got zapped by reset */
2565 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2566 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2567 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2568 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2569 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2570 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2571
2572 /* Enable memory arbiter. */
2573 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2574 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2575 }
2576
2577 /*
2578 * Prevent PXE restart: write a magic number to the
2579 * general communications memory at 0xB50.
2580 */
2581 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2582
2583 /*
2584 * Poll the value location we just wrote until
2585 * we see the 1's complement of the magic number.
2586 * This indicates that the firmware initialization
2587 * is complete.
2588 */
2589 for (i = 0; i < 750; i++) {
2590 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2591 if (val == ~BGE_MAGIC_NUMBER)
2592 break;
2593 DELAY(1000);
2594 }
2595
2596 if (i == 750) {
2597 printf("%s: firmware handshake timed out, val = %x\n",
2598 sc->bge_dev.dv_xname, val);
2599 return;
2600 }
2601
2602 /*
2603 * XXX Wait for the value of the PCISTATE register to
2604 * return to its original pre-reset state. This is a
2605 * fairly good indicator of reset completion. If we don't
2606 * wait for the reset to fully complete, trying to read
2607 * from the device's non-PCI registers may yield garbage
2608 * results.
2609 */
2610 for (i = 0; i < BGE_TIMEOUT; i++) {
2611 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2612 BGE_PCI_PCISTATE);
2613 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2614 (pcistate & ~BGE_PCISTATE_RESERVED))
2615 break;
2616 DELAY(10);
2617 }
2618 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2619 (pcistate & ~BGE_PCISTATE_RESERVED)) {
2620 printf("%s: pcistate failed to revert\n",
2621 sc->bge_dev.dv_xname);
2622 }
2623
2624 /* Enable memory arbiter. */
2625 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2626 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2627 }
2628
2629 /* Fix up byte swapping */
2630 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2631
2632 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2633
2634 DELAY(10000);
2635 }
2636
2637 /*
2638 * Frame reception handling. This is called if there's a frame
2639 * on the receive return list.
2640 *
2641 * Note: we have to be able to handle two possibilities here:
2642 * 1) the frame is from the jumbo recieve ring
2643 * 2) the frame is from the standard receive ring
2644 */
2645
2646 void
2647 bge_rxeof(sc)
2648 struct bge_softc *sc;
2649 {
2650 struct ifnet *ifp;
2651 int stdcnt = 0, jumbocnt = 0;
2652 int have_tag = 0;
2653 u_int16_t vlan_tag = 0;
2654 bus_dmamap_t dmamap;
2655 bus_addr_t offset, toff;
2656 bus_size_t tlen;
2657 int tosync;
2658
2659 ifp = &sc->ethercom.ec_if;
2660
2661 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2662 offsetof(struct bge_ring_data, bge_status_block),
2663 sizeof (struct bge_status_block),
2664 BUS_DMASYNC_POSTREAD);
2665
2666 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2667 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2668 sc->bge_rx_saved_considx;
2669
2670 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2671
2672 if (tosync < 0) {
2673 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2674 sizeof (struct bge_rx_bd);
2675 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2676 toff, tlen, BUS_DMASYNC_POSTREAD);
2677 tosync = -tosync;
2678 }
2679
2680 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2681 offset, tosync * sizeof (struct bge_rx_bd),
2682 BUS_DMASYNC_POSTREAD);
2683
2684 while(sc->bge_rx_saved_considx !=
2685 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2686 struct bge_rx_bd *cur_rx;
2687 u_int32_t rxidx;
2688 struct mbuf *m = NULL;
2689
2690 cur_rx = &sc->bge_rdata->
2691 bge_rx_return_ring[sc->bge_rx_saved_considx];
2692
2693 rxidx = cur_rx->bge_idx;
2694 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2695
2696 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2697 have_tag = 1;
2698 vlan_tag = cur_rx->bge_vlan_tag;
2699 }
2700
2701 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2702 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2703 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2704 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2705 jumbocnt++;
2706 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2707 ifp->if_ierrors++;
2708 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2709 continue;
2710 }
2711 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2712 NULL)== ENOBUFS) {
2713 ifp->if_ierrors++;
2714 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2715 continue;
2716 }
2717 } else {
2718 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2719 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2720 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2721 stdcnt++;
2722 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2723 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2724 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2725 ifp->if_ierrors++;
2726 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2727 continue;
2728 }
2729 if (bge_newbuf_std(sc, sc->bge_std,
2730 NULL, dmamap) == ENOBUFS) {
2731 ifp->if_ierrors++;
2732 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2733 continue;
2734 }
2735 }
2736
2737 ifp->if_ipackets++;
2738 #ifndef __NO_STRICT_ALIGNMENT
2739 /*
2740 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
2741 * the Rx buffer has the layer-2 header unaligned.
2742 * If our CPU requires alignment, re-align by copying.
2743 */
2744 if (sc->bge_rx_alignment_bug) {
2745 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data,
2746 cur_rx->bge_len);
2747 m->m_data += ETHER_ALIGN;
2748 }
2749 #endif
2750
2751 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2752 m->m_pkthdr.rcvif = ifp;
2753
2754 #if NBPFILTER > 0
2755 /*
2756 * Handle BPF listeners. Let the BPF user see the packet.
2757 */
2758 if (ifp->if_bpf)
2759 bpf_mtap(ifp->if_bpf, m);
2760 #endif
2761
2762 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
2763
2764 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
2765 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2766 /*
2767 * Rx transport checksum-offload may also
2768 * have bugs with packets which, when transmitted,
2769 * were `runts' requiring padding.
2770 */
2771 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2772 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
2773 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
2774 m->m_pkthdr.csum_data =
2775 cur_rx->bge_tcp_udp_csum;
2776 m->m_pkthdr.csum_flags |=
2777 (M_CSUM_TCPv4|M_CSUM_UDPv4|
2778 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
2779 }
2780
2781 /*
2782 * If we received a packet with a vlan tag, pass it
2783 * to vlan_input() instead of ether_input().
2784 */
2785 if (have_tag) {
2786 struct m_tag *mtag;
2787
2788 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2789 M_NOWAIT);
2790 if (mtag != NULL) {
2791 *(u_int *)(mtag + 1) = vlan_tag;
2792 m_tag_prepend(m, mtag);
2793 have_tag = vlan_tag = 0;
2794 } else {
2795 printf("%s: no mbuf for tag\n", ifp->if_xname);
2796 m_freem(m);
2797 have_tag = vlan_tag = 0;
2798 continue;
2799 }
2800 }
2801 (*ifp->if_input)(ifp, m);
2802 }
2803
2804 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2805 if (stdcnt)
2806 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2807 if (jumbocnt)
2808 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2809 }
2810
2811 void
2812 bge_txeof(sc)
2813 struct bge_softc *sc;
2814 {
2815 struct bge_tx_bd *cur_tx = NULL;
2816 struct ifnet *ifp;
2817 struct txdmamap_pool_entry *dma;
2818 bus_addr_t offset, toff;
2819 bus_size_t tlen;
2820 int tosync;
2821 struct mbuf *m;
2822
2823 ifp = &sc->ethercom.ec_if;
2824
2825 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2826 offsetof(struct bge_ring_data, bge_status_block),
2827 sizeof (struct bge_status_block),
2828 BUS_DMASYNC_POSTREAD);
2829
2830 offset = offsetof(struct bge_ring_data, bge_tx_ring);
2831 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2832 sc->bge_tx_saved_considx;
2833
2834 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2835
2836 if (tosync < 0) {
2837 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2838 sizeof (struct bge_tx_bd);
2839 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2840 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2841 tosync = -tosync;
2842 }
2843
2844 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2845 offset, tosync * sizeof (struct bge_tx_bd),
2846 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2847
2848 /*
2849 * Go through our tx ring and free mbufs for those
2850 * frames that have been sent.
2851 */
2852 while (sc->bge_tx_saved_considx !=
2853 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2854 u_int32_t idx = 0;
2855
2856 idx = sc->bge_tx_saved_considx;
2857 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2858 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2859 ifp->if_opackets++;
2860 m = sc->bge_cdata.bge_tx_chain[idx];
2861 if (m != NULL) {
2862 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2863 dma = sc->txdma[idx];
2864 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2865 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2866 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2867 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2868 sc->txdma[idx] = NULL;
2869
2870 m_freem(m);
2871 }
2872 sc->bge_txcnt--;
2873 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2874 ifp->if_timer = 0;
2875 }
2876
2877 if (cur_tx != NULL)
2878 ifp->if_flags &= ~IFF_OACTIVE;
2879 }
2880
2881 int
2882 bge_intr(xsc)
2883 void *xsc;
2884 {
2885 struct bge_softc *sc;
2886 struct ifnet *ifp;
2887
2888 sc = xsc;
2889 ifp = &sc->ethercom.ec_if;
2890
2891 #ifdef notdef
2892 /* Avoid this for now -- checking this register is expensive. */
2893 /* Make sure this is really our interrupt. */
2894 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2895 return (0);
2896 #endif
2897 /* Ack interrupt and stop others from occuring. */
2898 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2899
2900 BGE_EVCNT_INCR(sc->bge_ev_intr);
2901
2902 /*
2903 * Process link state changes.
2904 * Grrr. The link status word in the status block does
2905 * not work correctly on the BCM5700 rev AX and BX chips,
2906 * according to all avaibable information. Hence, we have
2907 * to enable MII interrupts in order to properly obtain
2908 * async link changes. Unfortunately, this also means that
2909 * we have to read the MAC status register to detect link
2910 * changes, thereby adding an additional register access to
2911 * the interrupt handler.
2912 */
2913
2914 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
2915 u_int32_t status;
2916
2917 status = CSR_READ_4(sc, BGE_MAC_STS);
2918 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2919 sc->bge_link = 0;
2920 callout_stop(&sc->bge_timeout);
2921 bge_tick(sc);
2922 /* Clear the interrupt */
2923 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2924 BGE_EVTENB_MI_INTERRUPT);
2925 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
2926 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
2927 BRGPHY_INTRS);
2928 }
2929 } else {
2930 if (sc->bge_rdata->bge_status_block.bge_status &
2931 BGE_STATFLAG_LINKSTATE_CHANGED) {
2932 sc->bge_link = 0;
2933 callout_stop(&sc->bge_timeout);
2934 bge_tick(sc);
2935 /* Clear the interrupt */
2936 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2937 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2938 BGE_MACSTAT_LINK_CHANGED);
2939 }
2940 }
2941
2942 if (ifp->if_flags & IFF_RUNNING) {
2943 /* Check RX return ring producer/consumer */
2944 bge_rxeof(sc);
2945
2946 /* Check TX ring producer/consumer */
2947 bge_txeof(sc);
2948 }
2949
2950 if (sc->bge_pending_rxintr_change) {
2951 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
2952 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
2953 uint32_t junk;
2954
2955 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
2956 DELAY(10);
2957 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
2958
2959 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
2960 DELAY(10);
2961 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
2962
2963 sc->bge_pending_rxintr_change = 0;
2964 }
2965 bge_handle_events(sc);
2966
2967 /* Re-enable interrupts. */
2968 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2969
2970 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2971 bge_start(ifp);
2972
2973 return (1);
2974 }
2975
2976 void
2977 bge_tick(xsc)
2978 void *xsc;
2979 {
2980 struct bge_softc *sc = xsc;
2981 struct mii_data *mii = &sc->bge_mii;
2982 struct ifmedia *ifm = NULL;
2983 struct ifnet *ifp = &sc->ethercom.ec_if;
2984 int s;
2985
2986 s = splnet();
2987
2988 bge_stats_update(sc);
2989 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2990 if (sc->bge_link) {
2991 splx(s);
2992 return;
2993 }
2994
2995 if (sc->bge_tbi) {
2996 ifm = &sc->bge_ifmedia;
2997 if (CSR_READ_4(sc, BGE_MAC_STS) &
2998 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2999 sc->bge_link++;
3000 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3001 if (!IFQ_IS_EMPTY(&ifp->if_snd))
3002 bge_start(ifp);
3003 }
3004 splx(s);
3005 return;
3006 }
3007
3008 mii_tick(mii);
3009
3010 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
3011 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
3012 sc->bge_link++;
3013 if (!IFQ_IS_EMPTY(&ifp->if_snd))
3014 bge_start(ifp);
3015 }
3016
3017 splx(s);
3018 }
3019
3020 void
3021 bge_stats_update(sc)
3022 struct bge_softc *sc;
3023 {
3024 struct ifnet *ifp = &sc->ethercom.ec_if;
3025 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3026 bus_size_t rstats = BGE_RX_STATS;
3027
3028 #define READ_RSTAT(sc, stats, stat) \
3029 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
3030
3031 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
3032 ifp->if_collisions +=
3033 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
3034 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
3035 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
3036 READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
3037
3038 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff,
3039 READ_RSTAT(sc, rstats, outXoffSent));
3040 BGE_EVCNT_ADD(sc->bge_ev_tx_xon,
3041 READ_RSTAT(sc, rstats, outXonSent));
3042 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff,
3043 READ_RSTAT(sc, rstats, xoffPauseFramesReceived));
3044 BGE_EVCNT_ADD(sc->bge_ev_rx_xon,
3045 READ_RSTAT(sc, rstats, xonPauseFramesReceived));
3046 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl,
3047 READ_RSTAT(sc, rstats, macControlFramesReceived));
3048 BGE_EVCNT_ADD(sc->bge_ev_xoffentered,
3049 READ_RSTAT(sc, rstats, xoffStateEntered));
3050 return;
3051 }
3052
3053 #undef READ_RSTAT
3054 #define READ_STAT(sc, stats, stat) \
3055 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3056
3057 ifp->if_collisions +=
3058 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3059 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3060 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3061 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3062 ifp->if_collisions;
3063
3064 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
3065 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
3066 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
3067 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
3068 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
3069 READ_STAT(sc, stats,
3070 xoffPauseFramesReceived.bge_addr_lo));
3071 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
3072 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
3073 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
3074 READ_STAT(sc, stats,
3075 macControlFramesReceived.bge_addr_lo));
3076 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
3077 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
3078
3079 #undef READ_STAT
3080
3081 #ifdef notdef
3082 ifp->if_collisions +=
3083 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3084 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3085 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3086 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3087 ifp->if_collisions;
3088 #endif
3089 }
3090
3091 /*
3092 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3093 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3094 * but when such padded frames employ the bge IP/TCP checksum offload,
3095 * the hardware checksum assist gives incorrect results (possibly
3096 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3097 * If we pad such runts with zeros, the onboard checksum comes out correct.
3098 */
3099 static __inline int
3100 bge_cksum_pad(struct mbuf *pkt)
3101 {
3102 struct mbuf *last = NULL;
3103 int padlen;
3104
3105 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3106
3107 /* if there's only the packet-header and we can pad there, use it. */
3108 if (pkt->m_pkthdr.len == pkt->m_len &&
3109 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) {
3110 last = pkt;
3111 } else {
3112 /*
3113 * Walk packet chain to find last mbuf. We will either
3114 * pad there, or append a new mbuf and pad it
3115 * (thus perhaps avoiding the bcm5700 dma-min bug).
3116 */
3117 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3118 (void) 0; /* do nothing*/
3119 }
3120
3121 /* `last' now points to last in chain. */
3122 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) {
3123 (void) 0; /* we can pad here, in-place. */
3124 } else {
3125 /* Allocate new empty mbuf, pad it. Compact later. */
3126 struct mbuf *n;
3127 MGET(n, M_DONTWAIT, MT_DATA);
3128 n->m_len = 0;
3129 last->m_next = n;
3130 last = n;
3131 }
3132 }
3133
3134 #ifdef DEBUG
3135 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/
3136 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ );
3137 #endif
3138 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3139 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3140 last->m_len += padlen;
3141 pkt->m_pkthdr.len += padlen;
3142 return 0;
3143 }
3144
3145 /*
3146 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3147 */
3148 static __inline int
3149 bge_compact_dma_runt(struct mbuf *pkt)
3150 {
3151 struct mbuf *m, *prev;
3152 int totlen, prevlen;
3153
3154 prev = NULL;
3155 totlen = 0;
3156 prevlen = -1;
3157
3158 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3159 int mlen = m->m_len;
3160 int shortfall = 8 - mlen ;
3161
3162 totlen += mlen;
3163 if (mlen == 0) {
3164 continue;
3165 }
3166 if (mlen >= 8)
3167 continue;
3168
3169 /* If we get here, mbuf data is too small for DMA engine.
3170 * Try to fix by shuffling data to prev or next in chain.
3171 * If that fails, do a compacting deep-copy of the whole chain.
3172 */
3173
3174 /* Internal frag. If fits in prev, copy it there. */
3175 if (prev && !M_READONLY(prev) &&
3176 M_TRAILINGSPACE(prev) >= m->m_len) {
3177 bcopy(m->m_data,
3178 prev->m_data+prev->m_len,
3179 mlen);
3180 prev->m_len += mlen;
3181 m->m_len = 0;
3182 /* XXX stitch chain */
3183 prev->m_next = m_free(m);
3184 m = prev;
3185 continue;
3186 }
3187 else if (m->m_next != NULL && !M_READONLY(m) &&
3188 M_TRAILINGSPACE(m) >= shortfall &&
3189 m->m_next->m_len >= (8 + shortfall)) {
3190 /* m is writable and have enough data in next, pull up. */
3191
3192 bcopy(m->m_next->m_data,
3193 m->m_data+m->m_len,
3194 shortfall);
3195 m->m_len += shortfall;
3196 m->m_next->m_len -= shortfall;
3197 m->m_next->m_data += shortfall;
3198 }
3199 else if (m->m_next == NULL || 1) {
3200 /* Got a runt at the very end of the packet.
3201 * borrow data from the tail of the preceding mbuf and
3202 * update its length in-place. (The original data is still
3203 * valid, so we can do this even if prev is not writable.)
3204 */
3205
3206 /* if we'd make prev a runt, just move all of its data. */
3207 #ifdef DEBUG
3208 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3209 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3210 #endif
3211 if ((prev->m_len - shortfall) < 8)
3212 shortfall = prev->m_len;
3213
3214 #ifdef notyet /* just do the safe slow thing for now */
3215 if (!M_READONLY(m)) {
3216 if (M_LEADINGSPACE(m) < shorfall) {
3217 void *m_dat;
3218 m_dat = (m->m_flags & M_PKTHDR) ?
3219 m->m_pktdat : m->dat;
3220 memmove(m_dat, mtod(m, void*), m->m_len);
3221 m->m_data = m_dat;
3222 }
3223 } else
3224 #endif /* just do the safe slow thing */
3225 {
3226 struct mbuf * n = NULL;
3227 int newprevlen = prev->m_len - shortfall;
3228
3229 MGET(n, M_NOWAIT, MT_DATA);
3230 if (n == NULL)
3231 return ENOBUFS;
3232 KASSERT(m->m_len + shortfall < MLEN
3233 /*,
3234 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3235
3236 /* first copy the data we're stealing from prev */
3237 bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
3238
3239 /* update prev->m_len accordingly */
3240 prev->m_len -= shortfall;
3241
3242 /* copy data from runt m */
3243 bcopy(m->m_data, n->m_data + shortfall, m->m_len);
3244
3245 /* n holds what we stole from prev, plus m */
3246 n->m_len = shortfall + m->m_len;
3247
3248 /* stitch n into chain and free m */
3249 n->m_next = m->m_next;
3250 prev->m_next = n;
3251 /* KASSERT(m->m_next == NULL); */
3252 m->m_next = NULL;
3253 m_free(m);
3254 m = n; /* for continuing loop */
3255 }
3256 }
3257 prevlen = m->m_len;
3258 }
3259 return 0;
3260 }
3261
3262 /*
3263 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3264 * pointers to descriptors.
3265 */
3266 int
3267 bge_encap(sc, m_head, txidx)
3268 struct bge_softc *sc;
3269 struct mbuf *m_head;
3270 u_int32_t *txidx;
3271 {
3272 struct bge_tx_bd *f = NULL;
3273 u_int32_t frag, cur, cnt = 0;
3274 u_int16_t csum_flags = 0;
3275 struct txdmamap_pool_entry *dma;
3276 bus_dmamap_t dmamap;
3277 int i = 0;
3278 struct m_tag *mtag;
3279
3280 cur = frag = *txidx;
3281
3282 if (m_head->m_pkthdr.csum_flags) {
3283 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3284 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3285 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3286 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3287 }
3288
3289 /*
3290 * If we were asked to do an outboard checksum, and the NIC
3291 * has the bug where it sometimes adds in the Ethernet padding,
3292 * explicitly pad with zeros so the cksum will be correct either way.
3293 * (For now, do this for all chip versions, until newer
3294 * are confirmed to not require the workaround.)
3295 */
3296 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3297 #ifdef notyet
3298 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3299 #endif
3300 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3301 goto check_dma_bug;
3302
3303 if (bge_cksum_pad(m_head) != 0)
3304 return ENOBUFS;
3305
3306 check_dma_bug:
3307 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
3308 goto doit;
3309 /*
3310 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3311 * less than eight bytes. If we encounter a teeny mbuf
3312 * at the end of a chain, we can pad. Otherwise, copy.
3313 */
3314 if (bge_compact_dma_runt(m_head) != 0)
3315 return ENOBUFS;
3316
3317 doit:
3318 dma = SLIST_FIRST(&sc->txdma_list);
3319 if (dma == NULL)
3320 return ENOBUFS;
3321 dmamap = dma->dmamap;
3322
3323 /*
3324 * Start packing the mbufs in this chain into
3325 * the fragment pointers. Stop when we run out
3326 * of fragments or hit the end of the mbuf chain.
3327 */
3328 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3329 BUS_DMA_NOWAIT))
3330 return(ENOBUFS);
3331
3332 mtag = sc->ethercom.ec_nvlans ?
3333 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
3334
3335 for (i = 0; i < dmamap->dm_nsegs; i++) {
3336 f = &sc->bge_rdata->bge_tx_ring[frag];
3337 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3338 break;
3339 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
3340 f->bge_len = dmamap->dm_segs[i].ds_len;
3341 f->bge_flags = csum_flags;
3342
3343 if (mtag != NULL) {
3344 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3345 f->bge_vlan_tag = *(u_int *)(mtag + 1);
3346 } else {
3347 f->bge_vlan_tag = 0;
3348 }
3349 /*
3350 * Sanity check: avoid coming within 16 descriptors
3351 * of the end of the ring.
3352 */
3353 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
3354 return(ENOBUFS);
3355 cur = frag;
3356 BGE_INC(frag, BGE_TX_RING_CNT);
3357 cnt++;
3358 }
3359
3360 if (i < dmamap->dm_nsegs)
3361 return ENOBUFS;
3362
3363 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3364 BUS_DMASYNC_PREWRITE);
3365
3366 if (frag == sc->bge_tx_saved_considx)
3367 return(ENOBUFS);
3368
3369 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3370 sc->bge_cdata.bge_tx_chain[cur] = m_head;
3371 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3372 sc->txdma[cur] = dma;
3373 sc->bge_txcnt += cnt;
3374
3375 *txidx = frag;
3376
3377 return(0);
3378 }
3379
3380 /*
3381 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3382 * to the mbuf data regions directly in the transmit descriptors.
3383 */
3384 void
3385 bge_start(ifp)
3386 struct ifnet *ifp;
3387 {
3388 struct bge_softc *sc;
3389 struct mbuf *m_head = NULL;
3390 u_int32_t prodidx = 0;
3391 int pkts = 0;
3392
3393 sc = ifp->if_softc;
3394
3395 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3396 return;
3397
3398 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3399
3400 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3401 IFQ_POLL(&ifp->if_snd, m_head);
3402 if (m_head == NULL)
3403 break;
3404
3405 #if 0
3406 /*
3407 * XXX
3408 * safety overkill. If this is a fragmented packet chain
3409 * with delayed TCP/UDP checksums, then only encapsulate
3410 * it if we have enough descriptors to handle the entire
3411 * chain at once.
3412 * (paranoia -- may not actually be needed)
3413 */
3414 if (m_head->m_flags & M_FIRSTFRAG &&
3415 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3416 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3417 m_head->m_pkthdr.csum_data + 16) {
3418 ifp->if_flags |= IFF_OACTIVE;
3419 break;
3420 }
3421 }
3422 #endif
3423
3424 /*
3425 * Pack the data into the transmit ring. If we
3426 * don't have room, set the OACTIVE flag and wait
3427 * for the NIC to drain the ring.
3428 */
3429 if (bge_encap(sc, m_head, &prodidx)) {
3430 ifp->if_flags |= IFF_OACTIVE;
3431 break;
3432 }
3433
3434 /* now we are committed to transmit the packet */
3435 IFQ_DEQUEUE(&ifp->if_snd, m_head);
3436 pkts++;
3437
3438 #if NBPFILTER > 0
3439 /*
3440 * If there's a BPF listener, bounce a copy of this frame
3441 * to him.
3442 */
3443 if (ifp->if_bpf)
3444 bpf_mtap(ifp->if_bpf, m_head);
3445 #endif
3446 }
3447 if (pkts == 0)
3448 return;
3449
3450 /* Transmit */
3451 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3452 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
3453 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3454
3455 /*
3456 * Set a timeout in case the chip goes out to lunch.
3457 */
3458 ifp->if_timer = 5;
3459 }
3460
3461 int
3462 bge_init(ifp)
3463 struct ifnet *ifp;
3464 {
3465 struct bge_softc *sc = ifp->if_softc;
3466 u_int16_t *m;
3467 int s, error;
3468
3469 s = splnet();
3470
3471 ifp = &sc->ethercom.ec_if;
3472
3473 /* Cancel pending I/O and flush buffers. */
3474 bge_stop(sc);
3475 bge_reset(sc);
3476 bge_chipinit(sc);
3477
3478 /*
3479 * Init the various state machines, ring
3480 * control blocks and firmware.
3481 */
3482 error = bge_blockinit(sc);
3483 if (error != 0) {
3484 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
3485 error);
3486 splx(s);
3487 return error;
3488 }
3489
3490 ifp = &sc->ethercom.ec_if;
3491
3492 /* Specify MTU. */
3493 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3494 ETHER_HDR_LEN + ETHER_CRC_LEN);
3495
3496 /* Load our MAC address. */
3497 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
3498 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3499 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3500
3501 /* Enable or disable promiscuous mode as needed. */
3502 if (ifp->if_flags & IFF_PROMISC) {
3503 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3504 } else {
3505 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3506 }
3507
3508 /* Program multicast filter. */
3509 bge_setmulti(sc);
3510
3511 /* Init RX ring. */
3512 bge_init_rx_ring_std(sc);
3513
3514 /* Init jumbo RX ring. */
3515 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3516 bge_init_rx_ring_jumbo(sc);
3517
3518 /* Init our RX return ring index */
3519 sc->bge_rx_saved_considx = 0;
3520
3521 /* Init TX ring. */
3522 bge_init_tx_ring(sc);
3523
3524 /* Turn on transmitter */
3525 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3526
3527 /* Turn on receiver */
3528 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3529
3530 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
3531
3532 /* Tell firmware we're alive. */
3533 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3534
3535 /* Enable host interrupts. */
3536 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3537 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3538 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3539
3540 bge_ifmedia_upd(ifp);
3541
3542 ifp->if_flags |= IFF_RUNNING;
3543 ifp->if_flags &= ~IFF_OACTIVE;
3544
3545 splx(s);
3546
3547 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3548
3549 return 0;
3550 }
3551
3552 /*
3553 * Set media options.
3554 */
3555 int
3556 bge_ifmedia_upd(ifp)
3557 struct ifnet *ifp;
3558 {
3559 struct bge_softc *sc = ifp->if_softc;
3560 struct mii_data *mii = &sc->bge_mii;
3561 struct ifmedia *ifm = &sc->bge_ifmedia;
3562
3563 /* If this is a 1000baseX NIC, enable the TBI port. */
3564 if (sc->bge_tbi) {
3565 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3566 return(EINVAL);
3567 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3568 case IFM_AUTO:
3569 break;
3570 case IFM_1000_SX:
3571 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3572 BGE_CLRBIT(sc, BGE_MAC_MODE,
3573 BGE_MACMODE_HALF_DUPLEX);
3574 } else {
3575 BGE_SETBIT(sc, BGE_MAC_MODE,
3576 BGE_MACMODE_HALF_DUPLEX);
3577 }
3578 break;
3579 default:
3580 return(EINVAL);
3581 }
3582 /* XXX 802.3x flow control for 1000BASE-SX */
3583 return(0);
3584 }
3585
3586 sc->bge_link = 0;
3587 mii_mediachg(mii);
3588
3589 return(0);
3590 }
3591
3592 /*
3593 * Report current media status.
3594 */
3595 void
3596 bge_ifmedia_sts(ifp, ifmr)
3597 struct ifnet *ifp;
3598 struct ifmediareq *ifmr;
3599 {
3600 struct bge_softc *sc = ifp->if_softc;
3601 struct mii_data *mii = &sc->bge_mii;
3602
3603 if (sc->bge_tbi) {
3604 ifmr->ifm_status = IFM_AVALID;
3605 ifmr->ifm_active = IFM_ETHER;
3606 if (CSR_READ_4(sc, BGE_MAC_STS) &
3607 BGE_MACSTAT_TBI_PCS_SYNCHED)
3608 ifmr->ifm_status |= IFM_ACTIVE;
3609 ifmr->ifm_active |= IFM_1000_SX;
3610 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3611 ifmr->ifm_active |= IFM_HDX;
3612 else
3613 ifmr->ifm_active |= IFM_FDX;
3614 return;
3615 }
3616
3617 mii_pollstat(mii);
3618 ifmr->ifm_status = mii->mii_media_status;
3619 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
3620 sc->bge_flowflags;
3621 }
3622
3623 int
3624 bge_ioctl(ifp, command, data)
3625 struct ifnet *ifp;
3626 u_long command;
3627 caddr_t data;
3628 {
3629 struct bge_softc *sc = ifp->if_softc;
3630 struct ifreq *ifr = (struct ifreq *) data;
3631 int s, error = 0;
3632 struct mii_data *mii;
3633
3634 s = splnet();
3635
3636 switch(command) {
3637 case SIOCSIFFLAGS:
3638 if (ifp->if_flags & IFF_UP) {
3639 /*
3640 * If only the state of the PROMISC flag changed,
3641 * then just use the 'set promisc mode' command
3642 * instead of reinitializing the entire NIC. Doing
3643 * a full re-init means reloading the firmware and
3644 * waiting for it to start up, which may take a
3645 * second or two.
3646 */
3647 if (ifp->if_flags & IFF_RUNNING &&
3648 ifp->if_flags & IFF_PROMISC &&
3649 !(sc->bge_if_flags & IFF_PROMISC)) {
3650 BGE_SETBIT(sc, BGE_RX_MODE,
3651 BGE_RXMODE_RX_PROMISC);
3652 } else if (ifp->if_flags & IFF_RUNNING &&
3653 !(ifp->if_flags & IFF_PROMISC) &&
3654 sc->bge_if_flags & IFF_PROMISC) {
3655 BGE_CLRBIT(sc, BGE_RX_MODE,
3656 BGE_RXMODE_RX_PROMISC);
3657 } else
3658 bge_init(ifp);
3659 } else {
3660 if (ifp->if_flags & IFF_RUNNING) {
3661 bge_stop(sc);
3662 }
3663 }
3664 sc->bge_if_flags = ifp->if_flags;
3665 error = 0;
3666 break;
3667 case SIOCSIFMEDIA:
3668 /* XXX Flow control is not supported for 1000BASE-SX */
3669 if (sc->bge_tbi) {
3670 ifr->ifr_media &= ~IFM_ETH_FMASK;
3671 sc->bge_flowflags = 0;
3672 }
3673
3674 /* Flow control requires full-duplex mode. */
3675 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3676 (ifr->ifr_media & IFM_FDX) == 0) {
3677 ifr->ifr_media &= ~IFM_ETH_FMASK;
3678 }
3679 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3680 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3681 /* We an do both TXPAUSE and RXPAUSE. */
3682 ifr->ifr_media |=
3683 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3684 }
3685 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3686 }
3687 /* FALLTHROUGH */
3688 case SIOCGIFMEDIA:
3689 if (sc->bge_tbi) {
3690 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3691 command);
3692 } else {
3693 mii = &sc->bge_mii;
3694 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3695 command);
3696 }
3697 break;
3698 default:
3699 error = ether_ioctl(ifp, command, data);
3700 if (error == ENETRESET) {
3701 bge_setmulti(sc);
3702 error = 0;
3703 }
3704 break;
3705 }
3706
3707 splx(s);
3708
3709 return(error);
3710 }
3711
3712 void
3713 bge_watchdog(ifp)
3714 struct ifnet *ifp;
3715 {
3716 struct bge_softc *sc;
3717
3718 sc = ifp->if_softc;
3719
3720 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3721
3722 ifp->if_flags &= ~IFF_RUNNING;
3723 bge_init(ifp);
3724
3725 ifp->if_oerrors++;
3726 }
3727
3728 static void
3729 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
3730 {
3731 int i;
3732
3733 BGE_CLRBIT(sc, reg, bit);
3734
3735 for (i = 0; i < BGE_TIMEOUT; i++) {
3736 if ((CSR_READ_4(sc, reg) & bit) == 0)
3737 return;
3738 delay(100);
3739 }
3740
3741 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3742 sc->bge_dev.dv_xname, (u_long) reg, bit);
3743 }
3744
3745 /*
3746 * Stop the adapter and free any mbufs allocated to the
3747 * RX and TX lists.
3748 */
3749 void
3750 bge_stop(sc)
3751 struct bge_softc *sc;
3752 {
3753 struct ifnet *ifp = &sc->ethercom.ec_if;
3754
3755 callout_stop(&sc->bge_timeout);
3756
3757 /*
3758 * Disable all of the receiver blocks
3759 */
3760 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3761 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3762 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3763 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3764 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3765 }
3766 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3767 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3768 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3769
3770 /*
3771 * Disable all of the transmit blocks
3772 */
3773 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3774 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3775 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3776 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3777 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3778 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3779 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3780 }
3781 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3782
3783 /*
3784 * Shut down all of the memory managers and related
3785 * state machines.
3786 */
3787 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3788 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3789 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3790 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3791 }
3792
3793 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3794 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3795
3796 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3797 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3798 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3799 }
3800
3801 /* Disable host interrupts. */
3802 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3803 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3804
3805 /*
3806 * Tell firmware we're shutting down.
3807 */
3808 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3809
3810 /* Free the RX lists. */
3811 bge_free_rx_ring_std(sc);
3812
3813 /* Free jumbo RX list. */
3814 bge_free_rx_ring_jumbo(sc);
3815
3816 /* Free TX buffers. */
3817 bge_free_tx_ring(sc);
3818
3819 /*
3820 * Isolate/power down the PHY.
3821 */
3822 if (!sc->bge_tbi)
3823 mii_down(&sc->bge_mii);
3824
3825 sc->bge_link = 0;
3826
3827 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3828
3829 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3830 }
3831
3832 /*
3833 * Stop all chip I/O so that the kernel's probe routines don't
3834 * get confused by errant DMAs when rebooting.
3835 */
3836 void
3837 bge_shutdown(xsc)
3838 void *xsc;
3839 {
3840 struct bge_softc *sc = (struct bge_softc *)xsc;
3841
3842 bge_stop(sc);
3843 bge_reset(sc);
3844 }
3845
3846
3847 static int
3848 sysctl_bge_verify(SYSCTLFN_ARGS)
3849 {
3850 int error, t;
3851 struct sysctlnode node;
3852
3853 node = *rnode;
3854 t = *(int*)rnode->sysctl_data;
3855 node.sysctl_data = &t;
3856 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3857 if (error || newp == NULL)
3858 return (error);
3859
3860 #if 0
3861 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
3862 node.sysctl_num, rnode->sysctl_num));
3863 #endif
3864
3865 if (node.sysctl_num == bge_rxthresh_nodenum) {
3866 if (t < 0 || t >= NBGE_RX_THRESH)
3867 return (EINVAL);
3868 bge_update_all_threshes(t);
3869 } else
3870 return (EINVAL);
3871
3872 *(int*)rnode->sysctl_data = t;
3873
3874 return (0);
3875 }
3876
3877 /*
3878 * Set up sysctl(3) MIB, hw.bge.*.
3879 *
3880 * TBD condition SYSCTL_PERMANENT on being an LKM or not
3881 */
3882 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
3883 {
3884 int rc, bge_root_num;
3885 struct sysctlnode *node;
3886
3887 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
3888 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
3889 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
3890 goto err;
3891 }
3892
3893 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3894 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge",
3895 SYSCTL_DESCR("BGE interface controls"),
3896 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
3897 goto err;
3898 }
3899
3900 bge_root_num = node->sysctl_num;
3901
3902 /* BGE Rx interrupt mitigation level */
3903 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3904 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
3905 CTLTYPE_INT, "rx_lvl",
3906 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
3907 sysctl_bge_verify, 0,
3908 &bge_rx_thresh_lvl,
3909 0, CTL_HW, bge_root_num, CTL_CREATE,
3910 CTL_EOL)) != 0) {
3911 goto err;
3912 }
3913
3914 bge_rxthresh_nodenum = node->sysctl_num;
3915
3916 return;
3917
3918 err:
3919 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
3920 }
3921