if_bge.c revision 1.64 1 /* $NetBSD: if_bge.c,v 1.64 2004/03/27 01:17:49 jonathan Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.64 2004/03/27 01:17:49 jonathan Exp $");
83
84 #include "bpfilter.h"
85 #include "vlan.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/device.h>
95 #include <sys/socket.h>
96 #include <sys/sysctl.h>
97
98 #include <net/if.h>
99 #include <net/if_dl.h>
100 #include <net/if_media.h>
101 #include <net/if_ether.h>
102
103 #ifdef INET
104 #include <netinet/in.h>
105 #include <netinet/in_systm.h>
106 #include <netinet/in_var.h>
107 #include <netinet/ip.h>
108 #endif
109
110 #if NBPFILTER > 0
111 #include <net/bpf.h>
112 #endif
113
114 #include <dev/pci/pcireg.h>
115 #include <dev/pci/pcivar.h>
116 #include <dev/pci/pcidevs.h>
117
118 #include <dev/mii/mii.h>
119 #include <dev/mii/miivar.h>
120 #include <dev/mii/miidevs.h>
121 #include <dev/mii/brgphyreg.h>
122
123 #include <dev/pci/if_bgereg.h>
124
125 #include <uvm/uvm_extern.h>
126
127 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
128
129
130 /*
131 * Tunable thresholds for rx-side bge interrupt mitigation.
132 */
133
134 /*
135 * The pairs of values below were obtained from empirical measurement
136 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
137 * interrupt for every N packets received, where N is, approximately,
138 * the second value (rx_max_bds) in each pair. The values are chosen
139 * such that moving from one pair to the succeeding pair was observed
140 * to roughly halve interrupt rate under sustained input packet load.
141 * The values were empirically chosen to avoid overflowing internal
142 * limits on the bcm5700: inreasing rx_ticks much beyond 600
143 * results in internal wrapping and higher interrupt rates.
144 * The limit of 46 frames was chosen to match NFS workloads.
145 *
146 * These values also work well on bcm5701, bcm5704C, and (less
147 * tested) bcm5703. On other chipsets, (including the Altima chip
148 * family), the larger values may overflow internal chip limits,
149 * leading to increasing interrupt rates rather than lower interrupt
150 * rates.
151 *
152 * Applications using heavy interrupt mitigation (interrupting every
153 * 32 or 46 frames) in both directions may need to increase the TCP
154 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
155 * full link bandwidth, due to ACKs and window updates lingering
156 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
157 */
158 struct bge_load_rx_thresh {
159 int rx_ticks;
160 int rx_max_bds; }
161 bge_rx_threshes[] = {
162 { 32, 2 },
163 { 50, 4 },
164 { 100, 8 },
165 { 192, 16 },
166 { 416, 32 },
167 { 598, 46 }
168 };
169 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
170
171 /* XXX patchable; should be sysctl'able */
172 static int bge_auto_thresh = 1;
173 static int bge_rx_thresh_lvl;
174
175 #ifdef __NetBSD__
176 static struct sysctlnode *bge_node_root;
177 static int bge_rxthresh_nodenum;
178 #endif /* __NetBSD__ */
179
180 int bge_probe(struct device *, struct cfdata *, void *);
181 void bge_attach(struct device *, struct device *, void *);
182 void bge_release_resources(struct bge_softc *);
183 void bge_txeof(struct bge_softc *);
184 void bge_rxeof(struct bge_softc *);
185
186 void bge_tick(void *);
187 void bge_stats_update(struct bge_softc *);
188 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
189 static __inline int bge_cksum_pad(struct mbuf *pkt);
190 static __inline int bge_compact_dma_runt(struct mbuf *pkt);
191
192 int bge_intr(void *);
193 void bge_start(struct ifnet *);
194 int bge_ioctl(struct ifnet *, u_long, caddr_t);
195 int bge_init(struct ifnet *);
196 void bge_stop(struct bge_softc *);
197 void bge_watchdog(struct ifnet *);
198 void bge_shutdown(void *);
199 int bge_ifmedia_upd(struct ifnet *);
200 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
201
202 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
203 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
204
205 void bge_setmulti(struct bge_softc *);
206
207 void bge_handle_events(struct bge_softc *);
208 int bge_alloc_jumbo_mem(struct bge_softc *);
209 void bge_free_jumbo_mem(struct bge_softc *);
210 void *bge_jalloc(struct bge_softc *);
211 void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
212 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
213 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
214 int bge_init_rx_ring_std(struct bge_softc *);
215 void bge_free_rx_ring_std(struct bge_softc *);
216 int bge_init_rx_ring_jumbo(struct bge_softc *);
217 void bge_free_rx_ring_jumbo(struct bge_softc *);
218 void bge_free_tx_ring(struct bge_softc *);
219 int bge_init_tx_ring(struct bge_softc *);
220
221 int bge_chipinit(struct bge_softc *);
222 int bge_blockinit(struct bge_softc *);
223 int bge_setpowerstate(struct bge_softc *, int);
224
225 #ifdef notdef
226 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
227 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
228 void bge_vpd_read(struct bge_softc *);
229 #endif
230
231 u_int32_t bge_readmem_ind(struct bge_softc *, int);
232 void bge_writemem_ind(struct bge_softc *, int, int);
233 #ifdef notdef
234 u_int32_t bge_readreg_ind(struct bge_softc *, int);
235 #endif
236 void bge_writereg_ind(struct bge_softc *, int, int);
237
238 int bge_miibus_readreg(struct device *, int, int);
239 void bge_miibus_writereg(struct device *, int, int, int);
240 void bge_miibus_statchg(struct device *);
241
242 void bge_reset(struct bge_softc *);
243
244 void bge_set_thresh(struct ifnet * /*ifp*/, int /*lvl*/);
245 void bge_update_all_threshes(int /*lvl*/);
246
247 void bge_dump_status(struct bge_softc *);
248 void bge_dump_rxbd(struct bge_rx_bd *);
249
250 #define BGE_DEBUG
251 #ifdef BGE_DEBUG
252 #define DPRINTF(x) if (bgedebug) printf x
253 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
254 int bgedebug = 0;
255 #else
256 #define DPRINTF(x)
257 #define DPRINTFN(n,x)
258 #endif
259
260 /* Various chip quirks. */
261 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001
262 #define BGE_QUIRK_CSUM_BROKEN 0x00000002
263 #define BGE_QUIRK_ONLY_PHY_1 0x00000004
264 #define BGE_QUIRK_5700_SMALLDMA 0x00000008
265 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010
266 #define BGE_QUIRK_PRODUCER_BUG 0x00000020
267 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040
268 #define BGE_QUIRK_5705_CORE 0x00000080
269 #define BGE_QUIRK_FEWER_MBUFS 0x00000100
270
271 /* following bugs are common to bcm5700 rev B, all flavours */
272 #define BGE_QUIRK_5700_COMMON \
273 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
274
275 CFATTACH_DECL(bge, sizeof(struct bge_softc),
276 bge_probe, bge_attach, NULL, NULL);
277
278 u_int32_t
279 bge_readmem_ind(sc, off)
280 struct bge_softc *sc;
281 int off;
282 {
283 struct pci_attach_args *pa = &(sc->bge_pa);
284 pcireg_t val;
285
286 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
287 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
288 return val;
289 }
290
291 void
292 bge_writemem_ind(sc, off, val)
293 struct bge_softc *sc;
294 int off, val;
295 {
296 struct pci_attach_args *pa = &(sc->bge_pa);
297
298 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
299 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
300 }
301
302 #ifdef notdef
303 u_int32_t
304 bge_readreg_ind(sc, off)
305 struct bge_softc *sc;
306 int off;
307 {
308 struct pci_attach_args *pa = &(sc->bge_pa);
309
310 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
311 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
312 }
313 #endif
314
315 void
316 bge_writereg_ind(sc, off, val)
317 struct bge_softc *sc;
318 int off, val;
319 {
320 struct pci_attach_args *pa = &(sc->bge_pa);
321
322 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
323 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
324 }
325
326 #ifdef notdef
327 u_int8_t
328 bge_vpd_readbyte(sc, addr)
329 struct bge_softc *sc;
330 int addr;
331 {
332 int i;
333 u_int32_t val;
334 struct pci_attach_args *pa = &(sc->bge_pa);
335
336 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
337 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
338 DELAY(10);
339 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
340 BGE_VPD_FLAG)
341 break;
342 }
343
344 if (i == BGE_TIMEOUT) {
345 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
346 return(0);
347 }
348
349 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
350
351 return((val >> ((addr % 4) * 8)) & 0xFF);
352 }
353
354 void
355 bge_vpd_read_res(sc, res, addr)
356 struct bge_softc *sc;
357 struct vpd_res *res;
358 int addr;
359 {
360 int i;
361 u_int8_t *ptr;
362
363 ptr = (u_int8_t *)res;
364 for (i = 0; i < sizeof(struct vpd_res); i++)
365 ptr[i] = bge_vpd_readbyte(sc, i + addr);
366 }
367
368 void
369 bge_vpd_read(sc)
370 struct bge_softc *sc;
371 {
372 int pos = 0, i;
373 struct vpd_res res;
374
375 if (sc->bge_vpd_prodname != NULL)
376 free(sc->bge_vpd_prodname, M_DEVBUF);
377 if (sc->bge_vpd_readonly != NULL)
378 free(sc->bge_vpd_readonly, M_DEVBUF);
379 sc->bge_vpd_prodname = NULL;
380 sc->bge_vpd_readonly = NULL;
381
382 bge_vpd_read_res(sc, &res, pos);
383
384 if (res.vr_id != VPD_RES_ID) {
385 printf("%s: bad VPD resource id: expected %x got %x\n",
386 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
387 return;
388 }
389
390 pos += sizeof(res);
391 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
392 if (sc->bge_vpd_prodname == NULL)
393 panic("bge_vpd_read");
394 for (i = 0; i < res.vr_len; i++)
395 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
396 sc->bge_vpd_prodname[i] = '\0';
397 pos += i;
398
399 bge_vpd_read_res(sc, &res, pos);
400
401 if (res.vr_id != VPD_RES_READ) {
402 printf("%s: bad VPD resource id: expected %x got %x\n",
403 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
404 return;
405 }
406
407 pos += sizeof(res);
408 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
409 if (sc->bge_vpd_readonly == NULL)
410 panic("bge_vpd_read");
411 for (i = 0; i < res.vr_len + 1; i++)
412 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
413 }
414 #endif
415
416 /*
417 * Read a byte of data stored in the EEPROM at address 'addr.' The
418 * BCM570x supports both the traditional bitbang interface and an
419 * auto access interface for reading the EEPROM. We use the auto
420 * access method.
421 */
422 u_int8_t
423 bge_eeprom_getbyte(sc, addr, dest)
424 struct bge_softc *sc;
425 int addr;
426 u_int8_t *dest;
427 {
428 int i;
429 u_int32_t byte = 0;
430
431 /*
432 * Enable use of auto EEPROM access so we can avoid
433 * having to use the bitbang method.
434 */
435 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
436
437 /* Reset the EEPROM, load the clock period. */
438 CSR_WRITE_4(sc, BGE_EE_ADDR,
439 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
440 DELAY(20);
441
442 /* Issue the read EEPROM command. */
443 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
444
445 /* Wait for completion */
446 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
447 DELAY(10);
448 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
449 break;
450 }
451
452 if (i == BGE_TIMEOUT) {
453 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
454 return(0);
455 }
456
457 /* Get result. */
458 byte = CSR_READ_4(sc, BGE_EE_DATA);
459
460 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
461
462 return(0);
463 }
464
465 /*
466 * Read a sequence of bytes from the EEPROM.
467 */
468 int
469 bge_read_eeprom(sc, dest, off, cnt)
470 struct bge_softc *sc;
471 caddr_t dest;
472 int off;
473 int cnt;
474 {
475 int err = 0, i;
476 u_int8_t byte = 0;
477
478 for (i = 0; i < cnt; i++) {
479 err = bge_eeprom_getbyte(sc, off + i, &byte);
480 if (err)
481 break;
482 *(dest + i) = byte;
483 }
484
485 return(err ? 1 : 0);
486 }
487
488 int
489 bge_miibus_readreg(dev, phy, reg)
490 struct device *dev;
491 int phy, reg;
492 {
493 struct bge_softc *sc = (struct bge_softc *)dev;
494 u_int32_t val;
495 u_int32_t saved_autopoll;
496 int i;
497
498 /*
499 * Several chips with builtin PHYs will incorrectly answer to
500 * other PHY instances than the builtin PHY at id 1.
501 */
502 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
503 return(0);
504
505 /* Reading with autopolling on may trigger PCI errors */
506 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
507 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
508 CSR_WRITE_4(sc, BGE_MI_MODE,
509 saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
510 DELAY(40);
511 }
512
513 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
514 BGE_MIPHY(phy)|BGE_MIREG(reg));
515
516 for (i = 0; i < BGE_TIMEOUT; i++) {
517 val = CSR_READ_4(sc, BGE_MI_COMM);
518 if (!(val & BGE_MICOMM_BUSY))
519 break;
520 delay(10);
521 }
522
523 if (i == BGE_TIMEOUT) {
524 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
525 val = 0;
526 goto done;
527 }
528
529 val = CSR_READ_4(sc, BGE_MI_COMM);
530
531 done:
532 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
533 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
534 DELAY(40);
535 }
536
537 if (val & BGE_MICOMM_READFAIL)
538 return(0);
539
540 return(val & 0xFFFF);
541 }
542
543 void
544 bge_miibus_writereg(dev, phy, reg, val)
545 struct device *dev;
546 int phy, reg, val;
547 {
548 struct bge_softc *sc = (struct bge_softc *)dev;
549 u_int32_t saved_autopoll;
550 int i;
551
552 /* Touching the PHY while autopolling is on may trigger PCI errors */
553 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
554 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
555 delay(40);
556 CSR_WRITE_4(sc, BGE_MI_MODE,
557 saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
558 delay(10); /* 40 usec is supposed to be adequate */
559 }
560
561 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
562 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
563
564 for (i = 0; i < BGE_TIMEOUT; i++) {
565 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
566 break;
567 delay(10);
568 }
569
570 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
571 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
572 delay(40);
573 }
574
575 if (i == BGE_TIMEOUT) {
576 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
577 }
578 }
579
580 void
581 bge_miibus_statchg(dev)
582 struct device *dev;
583 {
584 struct bge_softc *sc = (struct bge_softc *)dev;
585 struct mii_data *mii = &sc->bge_mii;
586
587 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
588 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
589 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
590 } else {
591 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
592 }
593
594 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
595 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
596 } else {
597 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
598 }
599 }
600
601 /*
602 * Update rx threshold levels to values in a particular slot
603 * of the interrupt-mitigation table bge_rx_threshes.
604 */
605 void
606 bge_set_thresh(struct ifnet *ifp, int lvl)
607 {
608 struct bge_softc *sc = ifp->if_softc;
609 int s;
610
611 /* For now, just save the new Rx-intr thresholds and record
612 * that a threshold update is pending. Updating the hardware
613 * registers here (even at splhigh()) is observed to
614 * occasionaly cause glitches where Rx-interrupts are not
615 * honoured for up to 10 seconds. jonathan (at) netbsd.org, 2003-04-05
616 */
617 s = splnet();
618 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
619 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
620 sc->bge_pending_rxintr_change = 1;
621 splx(s);
622
623 return;
624 }
625
626
627 /*
628 * Update Rx thresholds of all bge devices
629 */
630 void
631 bge_update_all_threshes(int lvl)
632 {
633 struct ifnet *ifp;
634 const char * const namebuf = "bge";
635 int namelen;
636
637 if (lvl < 0)
638 lvl = 0;
639 else if( lvl >= NBGE_RX_THRESH)
640 lvl = NBGE_RX_THRESH - 1;
641
642 namelen = strlen(namebuf);
643 /*
644 * Now search all the interfaces for this name/number
645 */
646 TAILQ_FOREACH(ifp, &ifnet, if_list) {
647 if (strncmp(ifp->if_xname, namebuf, namelen) != 0 )
648 continue;
649 /* We got a match: update if doing auto-threshold-tuning */
650 if (bge_auto_thresh)
651 bge_set_thresh(ifp->if_softc, lvl);
652 }
653 }
654
655 /*
656 * Handle events that have triggered interrupts.
657 */
658 void
659 bge_handle_events(sc)
660 struct bge_softc *sc;
661 {
662
663 return;
664 }
665
666 /*
667 * Memory management for jumbo frames.
668 */
669
670 int
671 bge_alloc_jumbo_mem(sc)
672 struct bge_softc *sc;
673 {
674 caddr_t ptr, kva;
675 bus_dma_segment_t seg;
676 int i, rseg, state, error;
677 struct bge_jpool_entry *entry;
678
679 state = error = 0;
680
681 /* Grab a big chunk o' storage. */
682 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
683 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
684 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
685 return ENOBUFS;
686 }
687
688 state = 1;
689 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
690 BUS_DMA_NOWAIT)) {
691 printf("%s: can't map DMA buffers (%d bytes)\n",
692 sc->bge_dev.dv_xname, (int)BGE_JMEM);
693 error = ENOBUFS;
694 goto out;
695 }
696
697 state = 2;
698 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
699 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
700 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname);
701 error = ENOBUFS;
702 goto out;
703 }
704
705 state = 3;
706 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
707 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
708 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname);
709 error = ENOBUFS;
710 goto out;
711 }
712
713 state = 4;
714 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
715 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
716
717 SLIST_INIT(&sc->bge_jfree_listhead);
718 SLIST_INIT(&sc->bge_jinuse_listhead);
719
720 /*
721 * Now divide it up into 9K pieces and save the addresses
722 * in an array.
723 */
724 ptr = sc->bge_cdata.bge_jumbo_buf;
725 for (i = 0; i < BGE_JSLOTS; i++) {
726 sc->bge_cdata.bge_jslots[i] = ptr;
727 ptr += BGE_JLEN;
728 entry = malloc(sizeof(struct bge_jpool_entry),
729 M_DEVBUF, M_NOWAIT);
730 if (entry == NULL) {
731 printf("%s: no memory for jumbo buffer queue!\n",
732 sc->bge_dev.dv_xname);
733 error = ENOBUFS;
734 goto out;
735 }
736 entry->slot = i;
737 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
738 entry, jpool_entries);
739 }
740 out:
741 if (error != 0) {
742 switch (state) {
743 case 4:
744 bus_dmamap_unload(sc->bge_dmatag,
745 sc->bge_cdata.bge_rx_jumbo_map);
746 case 3:
747 bus_dmamap_destroy(sc->bge_dmatag,
748 sc->bge_cdata.bge_rx_jumbo_map);
749 case 2:
750 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
751 case 1:
752 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
753 break;
754 default:
755 break;
756 }
757 }
758
759 return error;
760 }
761
762 /*
763 * Allocate a jumbo buffer.
764 */
765 void *
766 bge_jalloc(sc)
767 struct bge_softc *sc;
768 {
769 struct bge_jpool_entry *entry;
770
771 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
772
773 if (entry == NULL) {
774 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
775 return(NULL);
776 }
777
778 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
779 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
780 return(sc->bge_cdata.bge_jslots[entry->slot]);
781 }
782
783 /*
784 * Release a jumbo buffer.
785 */
786 void
787 bge_jfree(m, buf, size, arg)
788 struct mbuf *m;
789 caddr_t buf;
790 size_t size;
791 void *arg;
792 {
793 struct bge_jpool_entry *entry;
794 struct bge_softc *sc;
795 int i, s;
796
797 /* Extract the softc struct pointer. */
798 sc = (struct bge_softc *)arg;
799
800 if (sc == NULL)
801 panic("bge_jfree: can't find softc pointer!");
802
803 /* calculate the slot this buffer belongs to */
804
805 i = ((caddr_t)buf
806 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
807
808 if ((i < 0) || (i >= BGE_JSLOTS))
809 panic("bge_jfree: asked to free buffer that we don't manage!");
810
811 s = splvm();
812 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
813 if (entry == NULL)
814 panic("bge_jfree: buffer not in use!");
815 entry->slot = i;
816 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
817 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
818
819 if (__predict_true(m != NULL))
820 pool_cache_put(&mbpool_cache, m);
821 splx(s);
822 }
823
824
825 /*
826 * Intialize a standard receive ring descriptor.
827 */
828 int
829 bge_newbuf_std(sc, i, m, dmamap)
830 struct bge_softc *sc;
831 int i;
832 struct mbuf *m;
833 bus_dmamap_t dmamap;
834 {
835 struct mbuf *m_new = NULL;
836 struct bge_rx_bd *r;
837 int error;
838
839 if (dmamap == NULL) {
840 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
841 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
842 if (error != 0)
843 return error;
844 }
845
846 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
847
848 if (m == NULL) {
849 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
850 if (m_new == NULL) {
851 return(ENOBUFS);
852 }
853
854 MCLGET(m_new, M_DONTWAIT);
855 if (!(m_new->m_flags & M_EXT)) {
856 m_freem(m_new);
857 return(ENOBUFS);
858 }
859 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
860 if (!sc->bge_rx_alignment_bug)
861 m_adj(m_new, ETHER_ALIGN);
862
863 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
864 BUS_DMA_READ|BUS_DMA_NOWAIT))
865 return(ENOBUFS);
866 } else {
867 m_new = m;
868 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
869 m_new->m_data = m_new->m_ext.ext_buf;
870 if (!sc->bge_rx_alignment_bug)
871 m_adj(m_new, ETHER_ALIGN);
872 }
873
874 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
875 r = &sc->bge_rdata->bge_rx_std_ring[i];
876 bge_set_hostaddr(&r->bge_addr,
877 dmamap->dm_segs[0].ds_addr);
878 r->bge_flags = BGE_RXBDFLAG_END;
879 r->bge_len = m_new->m_len;
880 r->bge_idx = i;
881
882 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
883 offsetof(struct bge_ring_data, bge_rx_std_ring) +
884 i * sizeof (struct bge_rx_bd),
885 sizeof (struct bge_rx_bd),
886 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
887
888 return(0);
889 }
890
891 /*
892 * Initialize a jumbo receive ring descriptor. This allocates
893 * a jumbo buffer from the pool managed internally by the driver.
894 */
895 int
896 bge_newbuf_jumbo(sc, i, m)
897 struct bge_softc *sc;
898 int i;
899 struct mbuf *m;
900 {
901 struct mbuf *m_new = NULL;
902 struct bge_rx_bd *r;
903
904 if (m == NULL) {
905 caddr_t *buf = NULL;
906
907 /* Allocate the mbuf. */
908 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
909 if (m_new == NULL) {
910 return(ENOBUFS);
911 }
912
913 /* Allocate the jumbo buffer */
914 buf = bge_jalloc(sc);
915 if (buf == NULL) {
916 m_freem(m_new);
917 printf("%s: jumbo allocation failed "
918 "-- packet dropped!\n", sc->bge_dev.dv_xname);
919 return(ENOBUFS);
920 }
921
922 /* Attach the buffer to the mbuf. */
923 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
924 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
925 bge_jfree, sc);
926 } else {
927 m_new = m;
928 m_new->m_data = m_new->m_ext.ext_buf;
929 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
930 }
931
932 if (!sc->bge_rx_alignment_bug)
933 m_adj(m_new, ETHER_ALIGN);
934 /* Set up the descriptor. */
935 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
936 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
937 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
938 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
939 r->bge_len = m_new->m_len;
940 r->bge_idx = i;
941
942 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
943 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
944 i * sizeof (struct bge_rx_bd),
945 sizeof (struct bge_rx_bd),
946 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
947
948 return(0);
949 }
950
951 /*
952 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
953 * that's 1MB or memory, which is a lot. For now, we fill only the first
954 * 256 ring entries and hope that our CPU is fast enough to keep up with
955 * the NIC.
956 */
957 int
958 bge_init_rx_ring_std(sc)
959 struct bge_softc *sc;
960 {
961 int i;
962
963 if (sc->bge_flags & BGE_RXRING_VALID)
964 return 0;
965
966 for (i = 0; i < BGE_SSLOTS; i++) {
967 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
968 return(ENOBUFS);
969 }
970
971 sc->bge_std = i - 1;
972 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
973
974 sc->bge_flags |= BGE_RXRING_VALID;
975
976 return(0);
977 }
978
979 void
980 bge_free_rx_ring_std(sc)
981 struct bge_softc *sc;
982 {
983 int i;
984
985 if (!(sc->bge_flags & BGE_RXRING_VALID))
986 return;
987
988 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
989 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
990 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
991 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
992 bus_dmamap_destroy(sc->bge_dmatag,
993 sc->bge_cdata.bge_rx_std_map[i]);
994 }
995 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
996 sizeof(struct bge_rx_bd));
997 }
998
999 sc->bge_flags &= ~BGE_RXRING_VALID;
1000 }
1001
1002 int
1003 bge_init_rx_ring_jumbo(sc)
1004 struct bge_softc *sc;
1005 {
1006 int i;
1007 volatile struct bge_rcb *rcb;
1008
1009 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1010 return 0;
1011
1012 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1013 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1014 return(ENOBUFS);
1015 };
1016
1017 sc->bge_jumbo = i - 1;
1018 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1019
1020 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1021 rcb->bge_maxlen_flags = 0;
1022 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1023
1024 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1025
1026 return(0);
1027 }
1028
1029 void
1030 bge_free_rx_ring_jumbo(sc)
1031 struct bge_softc *sc;
1032 {
1033 int i;
1034
1035 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1036 return;
1037
1038 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1039 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1040 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1041 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1042 }
1043 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1044 sizeof(struct bge_rx_bd));
1045 }
1046
1047 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1048 }
1049
1050 void
1051 bge_free_tx_ring(sc)
1052 struct bge_softc *sc;
1053 {
1054 int i, freed;
1055 struct txdmamap_pool_entry *dma;
1056
1057 if (!(sc->bge_flags & BGE_TXRING_VALID))
1058 return;
1059
1060 freed = 0;
1061
1062 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1063 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1064 freed++;
1065 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1066 sc->bge_cdata.bge_tx_chain[i] = NULL;
1067 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1068 link);
1069 sc->txdma[i] = 0;
1070 }
1071 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1072 sizeof(struct bge_tx_bd));
1073 }
1074
1075 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1076 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1077 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1078 free(dma, M_DEVBUF);
1079 }
1080
1081 sc->bge_flags &= ~BGE_TXRING_VALID;
1082 }
1083
1084 int
1085 bge_init_tx_ring(sc)
1086 struct bge_softc *sc;
1087 {
1088 int i;
1089 bus_dmamap_t dmamap;
1090 struct txdmamap_pool_entry *dma;
1091
1092 if (sc->bge_flags & BGE_TXRING_VALID)
1093 return 0;
1094
1095 sc->bge_txcnt = 0;
1096 sc->bge_tx_saved_considx = 0;
1097 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1098 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1099 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1100
1101 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1102 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1103 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
1104
1105 SLIST_INIT(&sc->txdma_list);
1106 for (i = 0; i < BGE_RSLOTS; i++) {
1107 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
1108 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1109 &dmamap))
1110 return(ENOBUFS);
1111 if (dmamap == NULL)
1112 panic("dmamap NULL in bge_init_tx_ring");
1113 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1114 if (dma == NULL) {
1115 printf("%s: can't alloc txdmamap_pool_entry\n",
1116 sc->bge_dev.dv_xname);
1117 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1118 return (ENOMEM);
1119 }
1120 dma->dmamap = dmamap;
1121 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1122 }
1123
1124 sc->bge_flags |= BGE_TXRING_VALID;
1125
1126 return(0);
1127 }
1128
1129 void
1130 bge_setmulti(sc)
1131 struct bge_softc *sc;
1132 {
1133 struct ethercom *ac = &sc->ethercom;
1134 struct ifnet *ifp = &ac->ec_if;
1135 struct ether_multi *enm;
1136 struct ether_multistep step;
1137 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1138 u_int32_t h;
1139 int i;
1140
1141 if (ifp->if_flags & IFF_PROMISC)
1142 goto allmulti;
1143
1144 /* Now program new ones. */
1145 ETHER_FIRST_MULTI(step, ac, enm);
1146 while (enm != NULL) {
1147 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1148 /*
1149 * We must listen to a range of multicast addresses.
1150 * For now, just accept all multicasts, rather than
1151 * trying to set only those filter bits needed to match
1152 * the range. (At this time, the only use of address
1153 * ranges is for IP multicast routing, for which the
1154 * range is big enough to require all bits set.)
1155 */
1156 goto allmulti;
1157 }
1158
1159 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1160
1161 /* Just want the 7 least-significant bits. */
1162 h &= 0x7f;
1163
1164 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1165 ETHER_NEXT_MULTI(step, enm);
1166 }
1167
1168 ifp->if_flags &= ~IFF_ALLMULTI;
1169 goto setit;
1170
1171 allmulti:
1172 ifp->if_flags |= IFF_ALLMULTI;
1173 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1174
1175 setit:
1176 for (i = 0; i < 4; i++)
1177 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1178 }
1179
1180 const int bge_swapbits[] = {
1181 0,
1182 BGE_MODECTL_BYTESWAP_DATA,
1183 BGE_MODECTL_WORDSWAP_DATA,
1184 BGE_MODECTL_BYTESWAP_NONFRAME,
1185 BGE_MODECTL_WORDSWAP_NONFRAME,
1186
1187 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1188 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1189 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1190
1191 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1192 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1193
1194 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1195
1196 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1197 BGE_MODECTL_BYTESWAP_NONFRAME,
1198 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1199 BGE_MODECTL_WORDSWAP_NONFRAME,
1200 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1201 BGE_MODECTL_WORDSWAP_NONFRAME,
1202 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1203 BGE_MODECTL_WORDSWAP_NONFRAME,
1204
1205 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1206 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1207 };
1208
1209 int bge_swapindex = 0;
1210
1211 /*
1212 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1213 * self-test results.
1214 */
1215 int
1216 bge_chipinit(sc)
1217 struct bge_softc *sc;
1218 {
1219 u_int32_t cachesize;
1220 int i;
1221 u_int32_t dma_rw_ctl;
1222 struct pci_attach_args *pa = &(sc->bge_pa);
1223
1224
1225 /* Set endianness before we access any non-PCI registers. */
1226 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1227 BGE_INIT);
1228
1229 /* Set power state to D0. */
1230 bge_setpowerstate(sc, 0);
1231
1232 /*
1233 * Check the 'ROM failed' bit on the RX CPU to see if
1234 * self-tests passed.
1235 */
1236 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1237 printf("%s: RX CPU self-diagnostics failed!\n",
1238 sc->bge_dev.dv_xname);
1239 return(ENODEV);
1240 }
1241
1242 /* Clear the MAC control register */
1243 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1244
1245 /*
1246 * Clear the MAC statistics block in the NIC's
1247 * internal memory.
1248 */
1249 for (i = BGE_STATS_BLOCK;
1250 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1251 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1252
1253 for (i = BGE_STATUS_BLOCK;
1254 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1255 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1256
1257 /* Set up the PCI DMA control register. */
1258 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
1259 BGE_PCISTATE_PCI_BUSMODE) {
1260 /* Conventional PCI bus */
1261 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname));
1262 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1263 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1264 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1265 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1266 dma_rw_ctl |= 0x0F;
1267 }
1268 } else {
1269 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname));
1270 /* PCI-X bus */
1271 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1272 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1273 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1274 (0x0F);
1275 /*
1276 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1277 * for hardware bugs, which means we should also clear
1278 * the low-order MINDMA bits. In addition, the 5704
1279 * uses a different encoding of read/write watermarks.
1280 */
1281 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1282 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1283 /* should be 0x1f0000 */
1284 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1285 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1286 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1287 }
1288 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
1289 dma_rw_ctl &= 0xfffffff0;
1290 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1291 }
1292 }
1293
1294 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1295
1296 /*
1297 * Set up general mode register.
1298 */
1299 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1300 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1301 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1302
1303 /* Get cache line size. */
1304 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1305
1306 /*
1307 * Avoid violating PCI spec on certain chip revs.
1308 */
1309 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1310 PCIM_CMD_MWIEN) {
1311 switch(cachesize) {
1312 case 1:
1313 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1314 BGE_PCI_WRITE_BNDRY_16BYTES);
1315 break;
1316 case 2:
1317 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1318 BGE_PCI_WRITE_BNDRY_32BYTES);
1319 break;
1320 case 4:
1321 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1322 BGE_PCI_WRITE_BNDRY_64BYTES);
1323 break;
1324 case 8:
1325 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1326 BGE_PCI_WRITE_BNDRY_128BYTES);
1327 break;
1328 case 16:
1329 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1330 BGE_PCI_WRITE_BNDRY_256BYTES);
1331 break;
1332 case 32:
1333 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1334 BGE_PCI_WRITE_BNDRY_512BYTES);
1335 break;
1336 case 64:
1337 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1338 BGE_PCI_WRITE_BNDRY_1024BYTES);
1339 break;
1340 default:
1341 /* Disable PCI memory write and invalidate. */
1342 #if 0
1343 if (bootverbose)
1344 printf("%s: cache line size %d not "
1345 "supported; disabling PCI MWI\n",
1346 sc->bge_dev.dv_xname, cachesize);
1347 #endif
1348 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1349 PCIM_CMD_MWIEN);
1350 break;
1351 }
1352 }
1353
1354 /*
1355 * Disable memory write invalidate. Apparently it is not supported
1356 * properly by these devices.
1357 */
1358 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
1359
1360
1361 #ifdef __brokenalpha__
1362 /*
1363 * Must insure that we do not cross an 8K (bytes) boundary
1364 * for DMA reads. Our highest limit is 1K bytes. This is a
1365 * restriction on some ALPHA platforms with early revision
1366 * 21174 PCI chipsets, such as the AlphaPC 164lx
1367 */
1368 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1369 #endif
1370
1371 /* Set the timer prescaler (always 66MHz) */
1372 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1373
1374 return(0);
1375 }
1376
1377 int
1378 bge_blockinit(sc)
1379 struct bge_softc *sc;
1380 {
1381 volatile struct bge_rcb *rcb;
1382 bus_size_t rcb_addr;
1383 int i;
1384 struct ifnet *ifp = &sc->ethercom.ec_if;
1385 bge_hostaddr taddr;
1386
1387 /*
1388 * Initialize the memory window pointer register so that
1389 * we can access the first 32K of internal NIC RAM. This will
1390 * allow us to set up the TX send ring RCBs and the RX return
1391 * ring RCBs, plus other things which live in NIC memory.
1392 */
1393
1394 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1395 BGE_PCI_MEMWIN_BASEADDR, 0);
1396
1397 /* Configure mbuf memory pool */
1398 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1399 if (sc->bge_extram) {
1400 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1401 BGE_EXT_SSRAM);
1402 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1403 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1404 else
1405 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1406 } else {
1407 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1408 BGE_BUFFPOOL_1);
1409 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1410 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1411 else
1412 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1413 }
1414
1415 /* Configure DMA resource pool */
1416 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1417 BGE_DMA_DESCRIPTORS);
1418 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1419 }
1420
1421 /* Configure mbuf pool watermarks */
1422 #ifdef ORIG_WPAUL_VALUES
1423 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1424 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1425 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1426 #else
1427 /* new broadcom docs strongly recommend these: */
1428 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1429 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1430 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1431 } else {
1432 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1433 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1434 }
1435 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1436 #endif
1437
1438 /* Configure DMA resource watermarks */
1439 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1440 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1441
1442 /* Enable buffer manager */
1443 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1444 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1445 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1446
1447 /* Poll for buffer manager start indication */
1448 for (i = 0; i < BGE_TIMEOUT; i++) {
1449 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1450 break;
1451 DELAY(10);
1452 }
1453
1454 if (i == BGE_TIMEOUT) {
1455 printf("%s: buffer manager failed to start\n",
1456 sc->bge_dev.dv_xname);
1457 return(ENXIO);
1458 }
1459 }
1460
1461 /* Enable flow-through queues */
1462 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1463 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1464
1465 /* Wait until queue initialization is complete */
1466 for (i = 0; i < BGE_TIMEOUT; i++) {
1467 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1468 break;
1469 DELAY(10);
1470 }
1471
1472 if (i == BGE_TIMEOUT) {
1473 printf("%s: flow-through queue init failed\n",
1474 sc->bge_dev.dv_xname);
1475 return(ENXIO);
1476 }
1477
1478 /* Initialize the standard RX ring control block */
1479 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1480 bge_set_hostaddr(&rcb->bge_hostaddr,
1481 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1482 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1483 rcb->bge_maxlen_flags =
1484 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1485 } else {
1486 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1487 }
1488 if (sc->bge_extram)
1489 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1490 else
1491 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1492 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1493 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1494 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1495 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1496
1497 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1498 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1499 } else {
1500 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1501 }
1502
1503 /*
1504 * Initialize the jumbo RX ring control block
1505 * We set the 'ring disabled' bit in the flags
1506 * field until we're actually ready to start
1507 * using this ring (i.e. once we set the MTU
1508 * high enough to require it).
1509 */
1510 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1511 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1512 bge_set_hostaddr(&rcb->bge_hostaddr,
1513 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1514 rcb->bge_maxlen_flags =
1515 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1516 BGE_RCB_FLAG_RING_DISABLED);
1517 if (sc->bge_extram)
1518 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1519 else
1520 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1521
1522 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1523 rcb->bge_hostaddr.bge_addr_hi);
1524 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1525 rcb->bge_hostaddr.bge_addr_lo);
1526 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1527 rcb->bge_maxlen_flags);
1528 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1529
1530 /* Set up dummy disabled mini ring RCB */
1531 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1532 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1533 BGE_RCB_FLAG_RING_DISABLED);
1534 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1535 rcb->bge_maxlen_flags);
1536
1537 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1538 offsetof(struct bge_ring_data, bge_info),
1539 sizeof (struct bge_gib),
1540 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1541 }
1542
1543 /*
1544 * Set the BD ring replentish thresholds. The recommended
1545 * values are 1/8th the number of descriptors allocated to
1546 * each ring.
1547 */
1548 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1549 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1550
1551 /*
1552 * Disable all unused send rings by setting the 'ring disabled'
1553 * bit in the flags field of all the TX send ring control blocks.
1554 * These are located in NIC memory.
1555 */
1556 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1557 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1558 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1559 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1560 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1561 rcb_addr += sizeof(struct bge_rcb);
1562 }
1563
1564 /* Configure TX RCB 0 (we use only the first ring) */
1565 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1566 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1567 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1568 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1569 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1570 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1571 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1572 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1573 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1574 }
1575
1576 /* Disable all unused RX return rings */
1577 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1578 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1579 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1580 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1581 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1582 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1583 BGE_RCB_FLAG_RING_DISABLED));
1584 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1585 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1586 (i * (sizeof(u_int64_t))), 0);
1587 rcb_addr += sizeof(struct bge_rcb);
1588 }
1589
1590 /* Initialize RX ring indexes */
1591 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1592 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1593 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1594
1595 /*
1596 * Set up RX return ring 0
1597 * Note that the NIC address for RX return rings is 0x00000000.
1598 * The return rings live entirely within the host, so the
1599 * nicaddr field in the RCB isn't used.
1600 */
1601 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1602 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1603 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1604 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1605 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1606 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1607 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1608
1609 /* Set random backoff seed for TX */
1610 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1611 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1612 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1613 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1614 BGE_TX_BACKOFF_SEED_MASK);
1615
1616 /* Set inter-packet gap */
1617 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1618
1619 /*
1620 * Specify which ring to use for packets that don't match
1621 * any RX rules.
1622 */
1623 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1624
1625 /*
1626 * Configure number of RX lists. One interrupt distribution
1627 * list, sixteen active lists, one bad frames class.
1628 */
1629 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1630
1631 /* Inialize RX list placement stats mask. */
1632 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1633 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1634
1635 /* Disable host coalescing until we get it set up */
1636 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1637
1638 /* Poll to make sure it's shut down. */
1639 for (i = 0; i < BGE_TIMEOUT; i++) {
1640 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1641 break;
1642 DELAY(10);
1643 }
1644
1645 if (i == BGE_TIMEOUT) {
1646 printf("%s: host coalescing engine failed to idle\n",
1647 sc->bge_dev.dv_xname);
1648 return(ENXIO);
1649 }
1650
1651 /* Set up host coalescing defaults */
1652 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1653 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1654 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1655 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1656 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1657 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1658 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1659 }
1660 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1661 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1662
1663 /* Set up address of statistics block */
1664 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1665 bge_set_hostaddr(&taddr,
1666 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1667 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1668 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1669 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1670 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1671 }
1672
1673 /* Set up address of status block */
1674 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1675 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1676 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1677 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1678 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1679 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1680
1681 /* Turn on host coalescing state machine */
1682 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1683
1684 /* Turn on RX BD completion state machine and enable attentions */
1685 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1686 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1687
1688 /* Turn on RX list placement state machine */
1689 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1690
1691 /* Turn on RX list selector state machine. */
1692 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1693 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1694 }
1695
1696 /* Turn on DMA, clear stats */
1697 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1698 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1699 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1700 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1701 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1702
1703 /* Set misc. local control, enable interrupts on attentions */
1704 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1705
1706 #ifdef notdef
1707 /* Assert GPIO pins for PHY reset */
1708 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1709 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1710 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1711 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1712 #endif
1713
1714 #if defined(not_quite_yet)
1715 /* Linux driver enables enable gpio pin #1 on 5700s */
1716 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
1717 sc->bge_local_ctrl_reg |=
1718 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
1719 }
1720 #endif
1721 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1722
1723 /* Turn on DMA completion state machine */
1724 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1725 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1726 }
1727
1728 /* Turn on write DMA state machine */
1729 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1730 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1731
1732 /* Turn on read DMA state machine */
1733 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1734 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1735
1736 /* Turn on RX data completion state machine */
1737 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1738
1739 /* Turn on RX BD initiator state machine */
1740 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1741
1742 /* Turn on RX data and RX BD initiator state machine */
1743 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1744
1745 /* Turn on Mbuf cluster free state machine */
1746 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1747 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1748 }
1749
1750 /* Turn on send BD completion state machine */
1751 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1752
1753 /* Turn on send data completion state machine */
1754 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1755
1756 /* Turn on send data initiator state machine */
1757 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1758
1759 /* Turn on send BD initiator state machine */
1760 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1761
1762 /* Turn on send BD selector state machine */
1763 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1764
1765 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1766 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1767 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1768
1769 /* ack/clear link change events */
1770 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1771 BGE_MACSTAT_CFG_CHANGED);
1772 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1773
1774 /* Enable PHY auto polling (for MII/GMII only) */
1775 if (sc->bge_tbi) {
1776 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1777 } else {
1778 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1779 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1780 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1781 BGE_EVTENB_MI_INTERRUPT);
1782 }
1783
1784 /* Enable link state change attentions. */
1785 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1786
1787 return(0);
1788 }
1789
1790 static const struct bge_revision {
1791 uint32_t br_chipid;
1792 uint32_t br_quirks;
1793 const char *br_name;
1794 } bge_revisions[] = {
1795 { BGE_CHIPID_BCM5700_A0,
1796 BGE_QUIRK_LINK_STATE_BROKEN,
1797 "BCM5700 A0" },
1798
1799 { BGE_CHIPID_BCM5700_A1,
1800 BGE_QUIRK_LINK_STATE_BROKEN,
1801 "BCM5700 A1" },
1802
1803 { BGE_CHIPID_BCM5700_B0,
1804 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
1805 "BCM5700 B0" },
1806
1807 { BGE_CHIPID_BCM5700_B1,
1808 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1809 "BCM5700 B1" },
1810
1811 { BGE_CHIPID_BCM5700_B2,
1812 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1813 "BCM5700 B2" },
1814
1815 /* This is treated like a BCM5700 Bx */
1816 { BGE_CHIPID_BCM5700_ALTIMA,
1817 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1818 "BCM5700 Altima" },
1819
1820 { BGE_CHIPID_BCM5700_C0,
1821 0,
1822 "BCM5700 C0" },
1823
1824 { BGE_CHIPID_BCM5701_A0,
1825 0, /*XXX really, just not known */
1826 "BCM5701 A0" },
1827
1828 { BGE_CHIPID_BCM5701_B0,
1829 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1830 "BCM5701 B0" },
1831
1832 { BGE_CHIPID_BCM5701_B2,
1833 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1834 "BCM5701 B2" },
1835
1836 { BGE_CHIPID_BCM5701_B5,
1837 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1838 "BCM5701 B5" },
1839
1840 { BGE_CHIPID_BCM5703_A0,
1841 0,
1842 "BCM5703 A0" },
1843
1844 { BGE_CHIPID_BCM5703_A1,
1845 0,
1846 "BCM5703 A1" },
1847
1848 { BGE_CHIPID_BCM5703_A2,
1849 BGE_QUIRK_ONLY_PHY_1,
1850 "BCM5703 A2" },
1851
1852 { BGE_CHIPID_BCM5703_A3,
1853 BGE_QUIRK_ONLY_PHY_1,
1854 "BCM5703 A3" },
1855
1856 { BGE_CHIPID_BCM5704_A0,
1857 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1858 "BCM5704 A0" },
1859
1860 { BGE_CHIPID_BCM5704_A1,
1861 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1862 "BCM5704 A1" },
1863
1864 { BGE_CHIPID_BCM5704_A2,
1865 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1866 "BCM5704 A2" },
1867
1868 { BGE_CHIPID_BCM5704_A3,
1869 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
1870 "BCM5704 A3" },
1871
1872 { BGE_CHIPID_BCM5705_A0,
1873 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1874 "BCM5705 A0" },
1875
1876 { BGE_CHIPID_BCM5705_A1,
1877 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1878 "BCM5705 A1" },
1879
1880 { BGE_CHIPID_BCM5705_A2,
1881 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1882 "BCM5705 A2" },
1883
1884 { BGE_CHIPID_BCM5705_A3,
1885 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1886 "BCM5705 A3" },
1887
1888 { 0, 0, NULL }
1889 };
1890
1891 /*
1892 * Some defaults for major revisions, so that newer steppings
1893 * that we don't know about have a shot at working.
1894 */
1895 static const struct bge_revision bge_majorrevs[] = {
1896 { BGE_ASICREV_BCM5700,
1897 BGE_QUIRK_LINK_STATE_BROKEN,
1898 "unknown BCM5700" },
1899
1900 { BGE_ASICREV_BCM5701,
1901 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1902 "unknown BCM5701" },
1903
1904 { BGE_ASICREV_BCM5703,
1905 0,
1906 "unknown BCM5703" },
1907
1908 { BGE_ASICREV_BCM5704,
1909 BGE_QUIRK_ONLY_PHY_1,
1910 "unknown BCM5704" },
1911
1912 { BGE_ASICREV_BCM5705,
1913 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1914 "unknown BCM5705" },
1915
1916 { 0,
1917 0,
1918 NULL }
1919 };
1920
1921
1922 static const struct bge_revision *
1923 bge_lookup_rev(uint32_t chipid)
1924 {
1925 const struct bge_revision *br;
1926
1927 for (br = bge_revisions; br->br_name != NULL; br++) {
1928 if (br->br_chipid == chipid)
1929 return (br);
1930 }
1931
1932 for (br = bge_majorrevs; br->br_name != NULL; br++) {
1933 if (br->br_chipid == BGE_ASICREV(chipid))
1934 return (br);
1935 }
1936
1937 return (NULL);
1938 }
1939
1940 static const struct bge_product {
1941 pci_vendor_id_t bp_vendor;
1942 pci_product_id_t bp_product;
1943 const char *bp_name;
1944 } bge_products[] = {
1945 /*
1946 * The BCM5700 documentation seems to indicate that the hardware
1947 * still has the Alteon vendor ID burned into it, though it
1948 * should always be overridden by the value in the EEPROM. We'll
1949 * check for it anyway.
1950 */
1951 { PCI_VENDOR_ALTEON,
1952 PCI_PRODUCT_ALTEON_BCM5700,
1953 "Broadcom BCM5700 Gigabit Ethernet",
1954 },
1955 { PCI_VENDOR_ALTEON,
1956 PCI_PRODUCT_ALTEON_BCM5701,
1957 "Broadcom BCM5701 Gigabit Ethernet",
1958 },
1959
1960 { PCI_VENDOR_ALTIMA,
1961 PCI_PRODUCT_ALTIMA_AC1000,
1962 "Altima AC1000 Gigabit Ethernet",
1963 },
1964 { PCI_VENDOR_ALTIMA,
1965 PCI_PRODUCT_ALTIMA_AC1001,
1966 "Altima AC1001 Gigabit Ethernet",
1967 },
1968 { PCI_VENDOR_ALTIMA,
1969 PCI_PRODUCT_ALTIMA_AC9100,
1970 "Altima AC9100 Gigabit Ethernet",
1971 },
1972
1973 { PCI_VENDOR_BROADCOM,
1974 PCI_PRODUCT_BROADCOM_BCM5700,
1975 "Broadcom BCM5700 Gigabit Ethernet",
1976 },
1977 { PCI_VENDOR_BROADCOM,
1978 PCI_PRODUCT_BROADCOM_BCM5701,
1979 "Broadcom BCM5701 Gigabit Ethernet",
1980 },
1981 { PCI_VENDOR_BROADCOM,
1982 PCI_PRODUCT_BROADCOM_BCM5702,
1983 "Broadcom BCM5702 Gigabit Ethernet",
1984 },
1985 { PCI_VENDOR_BROADCOM,
1986 PCI_PRODUCT_BROADCOM_BCM5702X,
1987 "Broadcom BCM5702X Gigabit Ethernet" },
1988
1989 { PCI_VENDOR_BROADCOM,
1990 PCI_PRODUCT_BROADCOM_BCM5703,
1991 "Broadcom BCM5703 Gigabit Ethernet",
1992 },
1993 { PCI_VENDOR_BROADCOM,
1994 PCI_PRODUCT_BROADCOM_BCM5703X,
1995 "Broadcom BCM5703X Gigabit Ethernet",
1996 },
1997 { PCI_VENDOR_BROADCOM,
1998 PCI_PRODUCT_BROADCOM_BCM5703A3,
1999 "Broadcom BCM5703A3 Gigabit Ethernet",
2000 },
2001
2002 { PCI_VENDOR_BROADCOM,
2003 PCI_PRODUCT_BROADCOM_BCM5704C,
2004 "Broadcom BCM5704C Dual Gigabit Ethernet",
2005 },
2006 { PCI_VENDOR_BROADCOM,
2007 PCI_PRODUCT_BROADCOM_BCM5704S,
2008 "Broadcom BCM5704S Dual Gigabit Ethernet",
2009 },
2010
2011 { PCI_VENDOR_BROADCOM,
2012 PCI_PRODUCT_BROADCOM_BCM5705,
2013 "Broadcom BCM5705 Gigabit Ethernet",
2014 },
2015 { PCI_VENDOR_BROADCOM,
2016 PCI_PRODUCT_BROADCOM_BCM5705_ALT,
2017 "Broadcom BCM5705 Gigabit Ethernet",
2018 },
2019 { PCI_VENDOR_BROADCOM,
2020 PCI_PRODUCT_BROADCOM_BCM5705M,
2021 "Broadcom BCM5705M Gigabit Ethernet",
2022 },
2023
2024 { PCI_VENDOR_BROADCOM,
2025 PCI_PRODUCT_BROADCOM_BCM5901,
2026 "Broadcom BCM5901 Fast Ethernet",
2027 },
2028 { PCI_VENDOR_BROADCOM,
2029 PCI_PRODUCT_BROADCOM_BCM5901A2,
2030 "Broadcom BCM5901A2 Fast Ethernet",
2031 },
2032
2033 { PCI_VENDOR_BROADCOM,
2034 PCI_PRODUCT_BROADCOM_BCM5782,
2035 "Broadcom BCM5782 Gigabit Ethernet",
2036 },
2037
2038 { PCI_VENDOR_SCHNEIDERKOCH,
2039 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
2040 "SysKonnect SK-9Dx1 Gigabit Ethernet",
2041 },
2042
2043 { PCI_VENDOR_3COM,
2044 PCI_PRODUCT_3COM_3C996,
2045 "3Com 3c996 Gigabit Ethernet",
2046 },
2047
2048 { 0,
2049 0,
2050 NULL },
2051 };
2052
2053 static const struct bge_product *
2054 bge_lookup(const struct pci_attach_args *pa)
2055 {
2056 const struct bge_product *bp;
2057
2058 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2059 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2060 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2061 return (bp);
2062 }
2063
2064 return (NULL);
2065 }
2066
2067 int
2068 bge_setpowerstate(sc, powerlevel)
2069 struct bge_softc *sc;
2070 int powerlevel;
2071 {
2072 #ifdef NOTYET
2073 u_int32_t pm_ctl = 0;
2074
2075 /* XXX FIXME: make sure indirect accesses enabled? */
2076 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2077 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2078 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2079
2080 /* clear the PME_assert bit and power state bits, enable PME */
2081 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2082 pm_ctl &= ~PCIM_PSTAT_DMASK;
2083 pm_ctl |= (1 << 8);
2084
2085 if (powerlevel == 0) {
2086 pm_ctl |= PCIM_PSTAT_D0;
2087 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2088 pm_ctl, 2);
2089 DELAY(10000);
2090 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2091 DELAY(10000);
2092
2093 #ifdef NOTYET
2094 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2095 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2096 #endif
2097 DELAY(40); DELAY(40); DELAY(40);
2098 DELAY(10000); /* above not quite adequate on 5700 */
2099 return 0;
2100 }
2101
2102
2103 /*
2104 * Entering ACPI power states D1-D3 is achieved by wiggling
2105 * GMII gpio pins. Example code assumes all hardware vendors
2106 * followed Broadom's sample pcb layout. Until we verify that
2107 * for all supported OEM cards, states D1-D3 are unsupported.
2108 */
2109 printf("%s: power state %d unimplemented; check GPIO pins\n",
2110 sc->bge_dev.dv_xname, powerlevel);
2111 #endif
2112 return EOPNOTSUPP;
2113 }
2114
2115
2116 /*
2117 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2118 * against our list and return its name if we find a match. Note
2119 * that since the Broadcom controller contains VPD support, we
2120 * can get the device name string from the controller itself instead
2121 * of the compiled-in string. This is a little slow, but it guarantees
2122 * we'll always announce the right product name.
2123 */
2124 int
2125 bge_probe(parent, match, aux)
2126 struct device *parent;
2127 struct cfdata *match;
2128 void *aux;
2129 {
2130 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2131
2132 if (bge_lookup(pa) != NULL)
2133 return (1);
2134
2135 return (0);
2136 }
2137
2138 void
2139 bge_attach(parent, self, aux)
2140 struct device *parent, *self;
2141 void *aux;
2142 {
2143 struct bge_softc *sc = (struct bge_softc *)self;
2144 struct pci_attach_args *pa = aux;
2145 const struct bge_product *bp;
2146 const struct bge_revision *br;
2147 pci_chipset_tag_t pc = pa->pa_pc;
2148 pci_intr_handle_t ih;
2149 const char *intrstr = NULL;
2150 bus_dma_segment_t seg;
2151 int rseg;
2152 u_int32_t hwcfg = 0;
2153 u_int32_t mac_addr = 0;
2154 u_int32_t command;
2155 struct ifnet *ifp;
2156 caddr_t kva;
2157 u_char eaddr[ETHER_ADDR_LEN];
2158 pcireg_t memtype;
2159 bus_addr_t memaddr;
2160 bus_size_t memsize;
2161 u_int32_t pm_ctl;
2162
2163 bp = bge_lookup(pa);
2164 KASSERT(bp != NULL);
2165
2166 sc->bge_pa = *pa;
2167
2168 aprint_naive(": Ethernet controller\n");
2169 aprint_normal(": %s\n", bp->bp_name);
2170
2171 /*
2172 * Map control/status registers.
2173 */
2174 DPRINTFN(5, ("Map control/status regs\n"));
2175 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2176 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2177 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
2178 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
2179
2180 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2181 aprint_error("%s: failed to enable memory mapping!\n",
2182 sc->bge_dev.dv_xname);
2183 return;
2184 }
2185
2186 DPRINTFN(5, ("pci_mem_find\n"));
2187 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
2188 switch (memtype) {
2189 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2190 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2191 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2192 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2193 &memaddr, &memsize) == 0)
2194 break;
2195 default:
2196 aprint_error("%s: can't find mem space\n",
2197 sc->bge_dev.dv_xname);
2198 return;
2199 }
2200
2201 DPRINTFN(5, ("pci_intr_map\n"));
2202 if (pci_intr_map(pa, &ih)) {
2203 aprint_error("%s: couldn't map interrupt\n",
2204 sc->bge_dev.dv_xname);
2205 return;
2206 }
2207
2208 DPRINTFN(5, ("pci_intr_string\n"));
2209 intrstr = pci_intr_string(pc, ih);
2210
2211 DPRINTFN(5, ("pci_intr_establish\n"));
2212 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2213
2214 if (sc->bge_intrhand == NULL) {
2215 aprint_error("%s: couldn't establish interrupt",
2216 sc->bge_dev.dv_xname);
2217 if (intrstr != NULL)
2218 aprint_normal(" at %s", intrstr);
2219 aprint_normal("\n");
2220 return;
2221 }
2222 aprint_normal("%s: interrupting at %s\n",
2223 sc->bge_dev.dv_xname, intrstr);
2224
2225 /*
2226 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2227 * can clobber the chip's PCI config-space power control registers,
2228 * leaving the card in D3 powersave state.
2229 * We do not have memory-mapped registers in this state,
2230 * so force device into D0 state before starting initialization.
2231 */
2232 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2233 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2234 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2235 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2236 DELAY(1000); /* 27 usec is allegedly sufficent */
2237
2238 /* Try to reset the chip. */
2239 DPRINTFN(5, ("bge_reset\n"));
2240 bge_reset(sc);
2241
2242 if (bge_chipinit(sc)) {
2243 aprint_error("%s: chip initialization failed\n",
2244 sc->bge_dev.dv_xname);
2245 bge_release_resources(sc);
2246 return;
2247 }
2248
2249 /*
2250 * Get station address from the EEPROM.
2251 */
2252 mac_addr = bge_readmem_ind(sc, 0x0c14);
2253 if ((mac_addr >> 16) == 0x484b) {
2254 eaddr[0] = (u_char)(mac_addr >> 8);
2255 eaddr[1] = (u_char)(mac_addr >> 0);
2256 mac_addr = bge_readmem_ind(sc, 0x0c18);
2257 eaddr[2] = (u_char)(mac_addr >> 24);
2258 eaddr[3] = (u_char)(mac_addr >> 16);
2259 eaddr[4] = (u_char)(mac_addr >> 8);
2260 eaddr[5] = (u_char)(mac_addr >> 0);
2261 } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
2262 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2263 aprint_error("%s: failed to read station address\n",
2264 sc->bge_dev.dv_xname);
2265 bge_release_resources(sc);
2266 return;
2267 }
2268
2269 /*
2270 * Save ASIC rev. Look up any quirks associated with this
2271 * ASIC.
2272 */
2273 sc->bge_chipid =
2274 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
2275 BGE_PCIMISCCTL_ASICREV;
2276 br = bge_lookup_rev(sc->bge_chipid);
2277
2278 aprint_normal("%s: ", sc->bge_dev.dv_xname);
2279
2280 if (br == NULL) {
2281 aprint_normal("unknown ASIC (0x%04x)", sc->bge_chipid >> 16);
2282 sc->bge_quirks = 0;
2283 } else {
2284 aprint_normal("ASIC %s (0x%04x)",
2285 br->br_name, sc->bge_chipid >> 16);
2286 sc->bge_quirks |= br->br_quirks;
2287 }
2288 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2289
2290 /* Allocate the general information block and ring buffers. */
2291 if (pci_dma64_available(pa))
2292 sc->bge_dmatag = pa->pa_dmat64;
2293 else
2294 sc->bge_dmatag = pa->pa_dmat;
2295 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2296 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2297 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2298 aprint_error("%s: can't alloc rx buffers\n",
2299 sc->bge_dev.dv_xname);
2300 return;
2301 }
2302 DPRINTFN(5, ("bus_dmamem_map\n"));
2303 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2304 sizeof(struct bge_ring_data), &kva,
2305 BUS_DMA_NOWAIT)) {
2306 aprint_error("%s: can't map DMA buffers (%d bytes)\n",
2307 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
2308 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2309 return;
2310 }
2311 DPRINTFN(5, ("bus_dmamem_create\n"));
2312 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2313 sizeof(struct bge_ring_data), 0,
2314 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2315 aprint_error("%s: can't create DMA map\n",
2316 sc->bge_dev.dv_xname);
2317 bus_dmamem_unmap(sc->bge_dmatag, kva,
2318 sizeof(struct bge_ring_data));
2319 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2320 return;
2321 }
2322 DPRINTFN(5, ("bus_dmamem_load\n"));
2323 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2324 sizeof(struct bge_ring_data), NULL,
2325 BUS_DMA_NOWAIT)) {
2326 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2327 bus_dmamem_unmap(sc->bge_dmatag, kva,
2328 sizeof(struct bge_ring_data));
2329 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2330 return;
2331 }
2332
2333 DPRINTFN(5, ("bzero\n"));
2334 sc->bge_rdata = (struct bge_ring_data *)kva;
2335
2336 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2337
2338 /* Try to allocate memory for jumbo buffers. */
2339 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2340 if (bge_alloc_jumbo_mem(sc)) {
2341 aprint_error("%s: jumbo buffer allocation failed\n",
2342 sc->bge_dev.dv_xname);
2343 } else
2344 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2345 }
2346
2347 /* Set default tuneable values. */
2348 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2349 sc->bge_rx_coal_ticks = 150;
2350 sc->bge_rx_max_coal_bds = 64;
2351 #ifdef ORIG_WPAUL_VALUES
2352 sc->bge_tx_coal_ticks = 150;
2353 sc->bge_tx_max_coal_bds = 128;
2354 #else
2355 sc->bge_tx_coal_ticks = 300;
2356 sc->bge_tx_max_coal_bds = 400;
2357 #endif
2358
2359 /* Set up ifnet structure */
2360 ifp = &sc->ethercom.ec_if;
2361 ifp->if_softc = sc;
2362 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2363 ifp->if_ioctl = bge_ioctl;
2364 ifp->if_start = bge_start;
2365 ifp->if_init = bge_init;
2366 ifp->if_watchdog = bge_watchdog;
2367 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2368 IFQ_SET_READY(&ifp->if_snd);
2369 DPRINTFN(5, ("bcopy\n"));
2370 strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
2371
2372 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
2373 sc->ethercom.ec_if.if_capabilities |=
2374 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
2375 sc->ethercom.ec_capabilities |=
2376 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2377
2378 /*
2379 * Do MII setup.
2380 */
2381 DPRINTFN(5, ("mii setup\n"));
2382 sc->bge_mii.mii_ifp = ifp;
2383 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2384 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2385 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2386
2387 /*
2388 * Figure out what sort of media we have by checking the
2389 * hardware config word in the first 32k of NIC internal memory,
2390 * or fall back to the config word in the EEPROM. Note: on some BCM5700
2391 * cards, this value appears to be unset. If that's the
2392 * case, we have to rely on identifying the NIC by its PCI
2393 * subsystem ID, as we do below for the SysKonnect SK-9D41.
2394 */
2395 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2396 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2397 } else {
2398 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2399 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2400 hwcfg = be32toh(hwcfg);
2401 }
2402 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2403 sc->bge_tbi = 1;
2404
2405 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2406 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
2407 SK_SUBSYSID_9D41)
2408 sc->bge_tbi = 1;
2409
2410 if (sc->bge_tbi) {
2411 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2412 bge_ifmedia_sts);
2413 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2414 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2415 0, NULL);
2416 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2417 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2418 } else {
2419 /*
2420 * Do transceiver setup.
2421 */
2422 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2423 bge_ifmedia_sts);
2424 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2425 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG);
2426
2427 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2428 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2429 ifmedia_add(&sc->bge_mii.mii_media,
2430 IFM_ETHER|IFM_MANUAL, 0, NULL);
2431 ifmedia_set(&sc->bge_mii.mii_media,
2432 IFM_ETHER|IFM_MANUAL);
2433 } else
2434 ifmedia_set(&sc->bge_mii.mii_media,
2435 IFM_ETHER|IFM_AUTO);
2436 }
2437
2438 /*
2439 * When using the BCM5701 in PCI-X mode, data corruption has
2440 * been observed in the first few bytes of some received packets.
2441 * Aligning the packet buffer in memory eliminates the corruption.
2442 * Unfortunately, this misaligns the packet payloads. On platforms
2443 * which do not support unaligned accesses, we will realign the
2444 * payloads by copying the received packets.
2445 */
2446 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
2447 /* If in PCI-X mode, work around the alignment bug. */
2448 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2449 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2450 BGE_PCISTATE_PCI_BUSSPEED)
2451 sc->bge_rx_alignment_bug = 1;
2452 }
2453
2454 /*
2455 * Call MI attach routine.
2456 */
2457 DPRINTFN(5, ("if_attach\n"));
2458 if_attach(ifp);
2459 DPRINTFN(5, ("ether_ifattach\n"));
2460 ether_ifattach(ifp, eaddr);
2461 DPRINTFN(5, ("callout_init\n"));
2462 callout_init(&sc->bge_timeout);
2463 }
2464
2465 void
2466 bge_release_resources(sc)
2467 struct bge_softc *sc;
2468 {
2469 if (sc->bge_vpd_prodname != NULL)
2470 free(sc->bge_vpd_prodname, M_DEVBUF);
2471
2472 if (sc->bge_vpd_readonly != NULL)
2473 free(sc->bge_vpd_readonly, M_DEVBUF);
2474 }
2475
2476 void
2477 bge_reset(sc)
2478 struct bge_softc *sc;
2479 {
2480 struct pci_attach_args *pa = &sc->bge_pa;
2481 u_int32_t cachesize, command, pcistate, new_pcistate;
2482 int i, val = 0;
2483
2484 /* Save some important PCI state. */
2485 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2486 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2487 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2488
2489 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2490 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2491 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2492
2493 /* Issue global reset */
2494 bge_writereg_ind(sc, BGE_MISC_CFG,
2495 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
2496
2497 DELAY(1000);
2498
2499 /* Reset some of the PCI state that got zapped by reset */
2500 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2501 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2502 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2503 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2504 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2505 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2506
2507 /* Enable memory arbiter. */
2508 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2509 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2510 }
2511
2512 /*
2513 * Prevent PXE restart: write a magic number to the
2514 * general communications memory at 0xB50.
2515 */
2516 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2517
2518 /*
2519 * Poll the value location we just wrote until
2520 * we see the 1's complement of the magic number.
2521 * This indicates that the firmware initialization
2522 * is complete.
2523 */
2524 for (i = 0; i < 750; i++) {
2525 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2526 if (val == ~BGE_MAGIC_NUMBER)
2527 break;
2528 DELAY(1000);
2529 }
2530
2531 if (i == 750) {
2532 printf("%s: firmware handshake timed out, val = %x\n",
2533 sc->bge_dev.dv_xname, val);
2534 return;
2535 }
2536
2537 /*
2538 * XXX Wait for the value of the PCISTATE register to
2539 * return to its original pre-reset state. This is a
2540 * fairly good indicator of reset completion. If we don't
2541 * wait for the reset to fully complete, trying to read
2542 * from the device's non-PCI registers may yield garbage
2543 * results.
2544 */
2545 for (i = 0; i < BGE_TIMEOUT; i++) {
2546 new_pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag,
2547 BGE_PCI_PCISTATE);
2548 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
2549 (pcistate & ~BGE_PCISTATE_RESERVED))
2550 break;
2551 DELAY(10);
2552 }
2553 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
2554 (pcistate & ~BGE_PCISTATE_RESERVED)) {
2555 printf("%s: pcistate failed to revert\n",
2556 sc->bge_dev.dv_xname);
2557 }
2558
2559 /* Enable memory arbiter. */
2560 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2561 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2562 }
2563
2564 /* Fix up byte swapping */
2565 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2566
2567 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2568
2569 DELAY(10000);
2570 }
2571
2572 /*
2573 * Frame reception handling. This is called if there's a frame
2574 * on the receive return list.
2575 *
2576 * Note: we have to be able to handle two possibilities here:
2577 * 1) the frame is from the jumbo recieve ring
2578 * 2) the frame is from the standard receive ring
2579 */
2580
2581 void
2582 bge_rxeof(sc)
2583 struct bge_softc *sc;
2584 {
2585 struct ifnet *ifp;
2586 int stdcnt = 0, jumbocnt = 0;
2587 int have_tag = 0;
2588 u_int16_t vlan_tag = 0;
2589 bus_dmamap_t dmamap;
2590 bus_addr_t offset, toff;
2591 bus_size_t tlen;
2592 int tosync;
2593
2594 ifp = &sc->ethercom.ec_if;
2595
2596 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2597 offsetof(struct bge_ring_data, bge_status_block),
2598 sizeof (struct bge_status_block),
2599 BUS_DMASYNC_POSTREAD);
2600
2601 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2602 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2603 sc->bge_rx_saved_considx;
2604
2605 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2606
2607 if (tosync < 0) {
2608 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2609 sizeof (struct bge_rx_bd);
2610 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2611 toff, tlen, BUS_DMASYNC_POSTREAD);
2612 tosync = -tosync;
2613 }
2614
2615 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2616 offset, tosync * sizeof (struct bge_rx_bd),
2617 BUS_DMASYNC_POSTREAD);
2618
2619 while(sc->bge_rx_saved_considx !=
2620 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2621 struct bge_rx_bd *cur_rx;
2622 u_int32_t rxidx;
2623 struct mbuf *m = NULL;
2624
2625 cur_rx = &sc->bge_rdata->
2626 bge_rx_return_ring[sc->bge_rx_saved_considx];
2627
2628 rxidx = cur_rx->bge_idx;
2629 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2630
2631 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2632 have_tag = 1;
2633 vlan_tag = cur_rx->bge_vlan_tag;
2634 }
2635
2636 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2637 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2638 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2639 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2640 jumbocnt++;
2641 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2642 ifp->if_ierrors++;
2643 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2644 continue;
2645 }
2646 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2647 NULL)== ENOBUFS) {
2648 ifp->if_ierrors++;
2649 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2650 continue;
2651 }
2652 } else {
2653 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2654 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2655 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2656 stdcnt++;
2657 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2658 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2659 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2660 ifp->if_ierrors++;
2661 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2662 continue;
2663 }
2664 if (bge_newbuf_std(sc, sc->bge_std,
2665 NULL, dmamap) == ENOBUFS) {
2666 ifp->if_ierrors++;
2667 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2668 continue;
2669 }
2670 }
2671
2672 ifp->if_ipackets++;
2673 #ifndef __NO_STRICT_ALIGNMENT
2674 /*
2675 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
2676 * the Rx buffer has the layer-2 header unaligned.
2677 * If our CPU requires alignment, re-align by copying.
2678 */
2679 if (sc->bge_rx_alignment_bug) {
2680 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data,
2681 cur_rx->bge_len);
2682 m->m_data += ETHER_ALIGN;
2683 }
2684 #endif
2685
2686 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
2687 m->m_pkthdr.rcvif = ifp;
2688
2689 #if NBPFILTER > 0
2690 /*
2691 * Handle BPF listeners. Let the BPF user see the packet.
2692 */
2693 if (ifp->if_bpf)
2694 bpf_mtap(ifp->if_bpf, m);
2695 #endif
2696
2697 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
2698
2699 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
2700 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2701 /*
2702 * Rx transport checksum-offload may also
2703 * have bugs with packets which, when transmitted,
2704 * were `runts' requiring padding.
2705 */
2706 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2707 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
2708 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
2709 m->m_pkthdr.csum_data =
2710 cur_rx->bge_tcp_udp_csum;
2711 m->m_pkthdr.csum_flags |=
2712 (M_CSUM_TCPv4|M_CSUM_UDPv4|
2713 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
2714 }
2715
2716 /*
2717 * If we received a packet with a vlan tag, pass it
2718 * to vlan_input() instead of ether_input().
2719 */
2720 if (have_tag) {
2721 struct m_tag *mtag;
2722
2723 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2724 M_NOWAIT);
2725 if (mtag != NULL) {
2726 *(u_int *)(mtag + 1) = vlan_tag;
2727 m_tag_prepend(m, mtag);
2728 have_tag = vlan_tag = 0;
2729 } else {
2730 printf("%s: no mbuf for tag\n", ifp->if_xname);
2731 m_freem(m);
2732 have_tag = vlan_tag = 0;
2733 continue;
2734 }
2735 }
2736 (*ifp->if_input)(ifp, m);
2737 }
2738
2739 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2740 if (stdcnt)
2741 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2742 if (jumbocnt)
2743 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2744 }
2745
2746 void
2747 bge_txeof(sc)
2748 struct bge_softc *sc;
2749 {
2750 struct bge_tx_bd *cur_tx = NULL;
2751 struct ifnet *ifp;
2752 struct txdmamap_pool_entry *dma;
2753 bus_addr_t offset, toff;
2754 bus_size_t tlen;
2755 int tosync;
2756 struct mbuf *m;
2757
2758 ifp = &sc->ethercom.ec_if;
2759
2760 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2761 offsetof(struct bge_ring_data, bge_status_block),
2762 sizeof (struct bge_status_block),
2763 BUS_DMASYNC_POSTREAD);
2764
2765 offset = offsetof(struct bge_ring_data, bge_tx_ring);
2766 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2767 sc->bge_tx_saved_considx;
2768
2769 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2770
2771 if (tosync < 0) {
2772 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2773 sizeof (struct bge_tx_bd);
2774 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2775 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2776 tosync = -tosync;
2777 }
2778
2779 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2780 offset, tosync * sizeof (struct bge_tx_bd),
2781 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2782
2783 /*
2784 * Go through our tx ring and free mbufs for those
2785 * frames that have been sent.
2786 */
2787 while (sc->bge_tx_saved_considx !=
2788 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2789 u_int32_t idx = 0;
2790
2791 idx = sc->bge_tx_saved_considx;
2792 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2793 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2794 ifp->if_opackets++;
2795 m = sc->bge_cdata.bge_tx_chain[idx];
2796 if (m != NULL) {
2797 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2798 dma = sc->txdma[idx];
2799 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2800 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2801 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2802 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2803 sc->txdma[idx] = NULL;
2804
2805 m_freem(m);
2806 }
2807 sc->bge_txcnt--;
2808 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2809 ifp->if_timer = 0;
2810 }
2811
2812 if (cur_tx != NULL)
2813 ifp->if_flags &= ~IFF_OACTIVE;
2814 }
2815
2816 int
2817 bge_intr(xsc)
2818 void *xsc;
2819 {
2820 struct bge_softc *sc;
2821 struct ifnet *ifp;
2822
2823 sc = xsc;
2824 ifp = &sc->ethercom.ec_if;
2825
2826 #ifdef notdef
2827 /* Avoid this for now -- checking this register is expensive. */
2828 /* Make sure this is really our interrupt. */
2829 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2830 return (0);
2831 #endif
2832 /* Ack interrupt and stop others from occuring. */
2833 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2834
2835 /*
2836 * Process link state changes.
2837 * Grrr. The link status word in the status block does
2838 * not work correctly on the BCM5700 rev AX and BX chips,
2839 * according to all avaibable information. Hence, we have
2840 * to enable MII interrupts in order to properly obtain
2841 * async link changes. Unfortunately, this also means that
2842 * we have to read the MAC status register to detect link
2843 * changes, thereby adding an additional register access to
2844 * the interrupt handler.
2845 */
2846
2847 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
2848 u_int32_t status;
2849
2850 status = CSR_READ_4(sc, BGE_MAC_STS);
2851 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2852 sc->bge_link = 0;
2853 callout_stop(&sc->bge_timeout);
2854 bge_tick(sc);
2855 /* Clear the interrupt */
2856 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2857 BGE_EVTENB_MI_INTERRUPT);
2858 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
2859 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
2860 BRGPHY_INTRS);
2861 }
2862 } else {
2863 if (sc->bge_rdata->bge_status_block.bge_status &
2864 BGE_STATFLAG_LINKSTATE_CHANGED) {
2865 sc->bge_link = 0;
2866 callout_stop(&sc->bge_timeout);
2867 bge_tick(sc);
2868 /* Clear the interrupt */
2869 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2870 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2871 BGE_MACSTAT_LINK_CHANGED);
2872 }
2873 }
2874
2875 if (ifp->if_flags & IFF_RUNNING) {
2876 /* Check RX return ring producer/consumer */
2877 bge_rxeof(sc);
2878
2879 /* Check TX ring producer/consumer */
2880 bge_txeof(sc);
2881 }
2882
2883 if (sc->bge_pending_rxintr_change) {
2884 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
2885 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
2886 uint32_t junk;
2887
2888 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
2889 DELAY(10);
2890 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
2891
2892 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
2893 DELAY(10);
2894 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
2895
2896 sc->bge_pending_rxintr_change = 0;
2897 }
2898 bge_handle_events(sc);
2899
2900 /* Re-enable interrupts. */
2901 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2902
2903 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2904 bge_start(ifp);
2905
2906 return (1);
2907 }
2908
2909 void
2910 bge_tick(xsc)
2911 void *xsc;
2912 {
2913 struct bge_softc *sc = xsc;
2914 struct mii_data *mii = &sc->bge_mii;
2915 struct ifmedia *ifm = NULL;
2916 struct ifnet *ifp = &sc->ethercom.ec_if;
2917 int s;
2918
2919 s = splnet();
2920
2921 bge_stats_update(sc);
2922 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2923 if (sc->bge_link) {
2924 splx(s);
2925 return;
2926 }
2927
2928 if (sc->bge_tbi) {
2929 ifm = &sc->bge_ifmedia;
2930 if (CSR_READ_4(sc, BGE_MAC_STS) &
2931 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2932 sc->bge_link++;
2933 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2934 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2935 bge_start(ifp);
2936 }
2937 splx(s);
2938 return;
2939 }
2940
2941 mii_tick(mii);
2942
2943 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2944 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2945 sc->bge_link++;
2946 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2947 bge_start(ifp);
2948 }
2949
2950 splx(s);
2951 }
2952
2953 void
2954 bge_stats_update(sc)
2955 struct bge_softc *sc;
2956 {
2957 struct ifnet *ifp = &sc->ethercom.ec_if;
2958 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2959 bus_size_t rstats = BGE_RX_STATS;
2960
2961 #define READ_RSTAT(sc, stats, stat) \
2962 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
2963
2964 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
2965 ifp->if_collisions +=
2966 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
2967 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
2968 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
2969 READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
2970 return;
2971 }
2972
2973 #undef READ_RSTAT
2974 #define READ_STAT(sc, stats, stat) \
2975 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2976
2977 ifp->if_collisions +=
2978 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
2979 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2980 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
2981 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
2982 ifp->if_collisions;
2983
2984 #undef READ_STAT
2985
2986 #ifdef notdef
2987 ifp->if_collisions +=
2988 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2989 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2990 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2991 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2992 ifp->if_collisions;
2993 #endif
2994 }
2995
2996 /*
2997 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2998 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2999 * but when such padded frames employ the bge IP/TCP checksum offload,
3000 * the hardware checksum assist gives incorrect results (possibly
3001 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3002 * If we pad such runts with zeros, the onboard checksum comes out correct.
3003 */
3004 static __inline int
3005 bge_cksum_pad(struct mbuf *pkt)
3006 {
3007 struct mbuf *last = NULL;
3008 int padlen;
3009
3010 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3011
3012 /* if there's only the packet-header and we can pad there, use it. */
3013 if (pkt->m_pkthdr.len == pkt->m_len &&
3014 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) {
3015 last = pkt;
3016 } else {
3017 /*
3018 * Walk packet chain to find last mbuf. We will either
3019 * pad there, or append a new mbuf and pad it
3020 * (thus perhaps avoiding the bcm5700 dma-min bug).
3021 */
3022 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3023 (void) 0; /* do nothing*/
3024 }
3025
3026 /* `last' now points to last in chain. */
3027 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) {
3028 (void) 0; /* we can pad here, in-place. */
3029 } else {
3030 /* Allocate new empty mbuf, pad it. Compact later. */
3031 struct mbuf *n;
3032 MGET(n, M_DONTWAIT, MT_DATA);
3033 n->m_len = 0;
3034 last->m_next = n;
3035 last = n;
3036 }
3037 }
3038
3039 #ifdef DEBUG
3040 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/
3041 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ );
3042 #endif
3043 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3044 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
3045 last->m_len += padlen;
3046 pkt->m_pkthdr.len += padlen;
3047 return 0;
3048 }
3049
3050 /*
3051 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3052 */
3053 static __inline int
3054 bge_compact_dma_runt(struct mbuf *pkt)
3055 {
3056 struct mbuf *m, *prev;
3057 int totlen, prevlen;
3058
3059 prev = NULL;
3060 totlen = 0;
3061 prevlen = -1;
3062
3063 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3064 int mlen = m->m_len;
3065 int shortfall = 8 - mlen ;
3066
3067 totlen += mlen;
3068 if (mlen == 0) {
3069 continue;
3070 }
3071 if (mlen >= 8)
3072 continue;
3073
3074 /* If we get here, mbuf data is too small for DMA engine.
3075 * Try to fix by shuffling data to prev or next in chain.
3076 * If that fails, do a compacting deep-copy of the whole chain.
3077 */
3078
3079 /* Internal frag. If fits in prev, copy it there. */
3080 if (prev && !M_READONLY(prev) &&
3081 M_TRAILINGSPACE(prev) >= m->m_len) {
3082 bcopy(m->m_data,
3083 prev->m_data+prev->m_len,
3084 mlen);
3085 prev->m_len += mlen;
3086 m->m_len = 0;
3087 /* XXX stitch chain */
3088 prev->m_next = m_free(m);
3089 m = prev;
3090 continue;
3091 }
3092 else if (m->m_next != NULL && !M_READONLY(m) &&
3093 M_TRAILINGSPACE(m) >= shortfall &&
3094 m->m_next->m_len >= (8 + shortfall)) {
3095 /* m is writable and have enough data in next, pull up. */
3096
3097 bcopy(m->m_next->m_data,
3098 m->m_data+m->m_len,
3099 shortfall);
3100 m->m_len += shortfall;
3101 m->m_next->m_len -= shortfall;
3102 m->m_next->m_data += shortfall;
3103 }
3104 else if (m->m_next == NULL || 1) {
3105 /* Got a runt at the very end of the packet.
3106 * borrow data from the tail of the preceding mbuf and
3107 * update its length in-place. (The original data is still
3108 * valid, so we can do this even if prev is not writable.)
3109 */
3110
3111 /* if we'd make prev a runt, just move all of its data. */
3112 #ifdef DEBUG
3113 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3114 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3115 #endif
3116 if ((prev->m_len - shortfall) < 8)
3117 shortfall = prev->m_len;
3118
3119 #ifdef notyet /* just do the safe slow thing for now */
3120 if (!M_READONLY(m)) {
3121 if (M_LEADINGSPACE(m) < shorfall) {
3122 void *m_dat;
3123 m_dat = (m->m_flags & M_PKTHDR) ?
3124 m->m_pktdat : m->dat;
3125 memmove(m_dat, mtod(m, void*), m->m_len);
3126 m->m_data = m_dat;
3127 }
3128 } else
3129 #endif /* just do the safe slow thing */
3130 {
3131 struct mbuf * n = NULL;
3132 int newprevlen = prev->m_len - shortfall;
3133
3134 MGET(n, M_NOWAIT, MT_DATA);
3135 if (n == NULL)
3136 return ENOBUFS;
3137 KASSERT(m->m_len + shortfall < MLEN
3138 /*,
3139 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3140
3141 /* first copy the data we're stealing from prev */
3142 bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
3143
3144 /* update prev->m_len accordingly */
3145 prev->m_len -= shortfall;
3146
3147 /* copy data from runt m */
3148 bcopy(m->m_data, n->m_data + shortfall, m->m_len);
3149
3150 /* n holds what we stole from prev, plus m */
3151 n->m_len = shortfall + m->m_len;
3152
3153 /* stitch n into chain and free m */
3154 n->m_next = m->m_next;
3155 prev->m_next = n;
3156 /* KASSERT(m->m_next == NULL); */
3157 m->m_next = NULL;
3158 m_free(m);
3159 m = n; /* for continuing loop */
3160 }
3161 }
3162 prevlen = m->m_len;
3163 }
3164 return 0;
3165 }
3166
3167 /*
3168 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3169 * pointers to descriptors.
3170 */
3171 int
3172 bge_encap(sc, m_head, txidx)
3173 struct bge_softc *sc;
3174 struct mbuf *m_head;
3175 u_int32_t *txidx;
3176 {
3177 struct bge_tx_bd *f = NULL;
3178 u_int32_t frag, cur, cnt = 0;
3179 u_int16_t csum_flags = 0;
3180 struct txdmamap_pool_entry *dma;
3181 bus_dmamap_t dmamap;
3182 int i = 0;
3183 struct m_tag *mtag;
3184
3185 cur = frag = *txidx;
3186
3187 if (m_head->m_pkthdr.csum_flags) {
3188 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3189 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3190 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3191 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3192 }
3193
3194 /*
3195 * If we were asked to do an outboard checksum, and the NIC
3196 * has the bug where it sometimes adds in the Ethernet padding,
3197 * explicitly pad with zeros so the cksum will be correct either way.
3198 * (For now, do this for all chip versions, until newer
3199 * are confirmed to not require the workaround.)
3200 */
3201 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3202 #ifdef notyet
3203 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3204 #endif
3205 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3206 goto check_dma_bug;
3207
3208 if (bge_cksum_pad(m_head) != 0)
3209 return ENOBUFS;
3210
3211 check_dma_bug:
3212 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
3213 goto doit;
3214 /*
3215 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3216 * less than eight bytes. If we encounter a teeny mbuf
3217 * at the end of a chain, we can pad. Otherwise, copy.
3218 */
3219 if (bge_compact_dma_runt(m_head) != 0)
3220 return ENOBUFS;
3221
3222 doit:
3223 dma = SLIST_FIRST(&sc->txdma_list);
3224 if (dma == NULL)
3225 return ENOBUFS;
3226 dmamap = dma->dmamap;
3227
3228 /*
3229 * Start packing the mbufs in this chain into
3230 * the fragment pointers. Stop when we run out
3231 * of fragments or hit the end of the mbuf chain.
3232 */
3233 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3234 BUS_DMA_NOWAIT))
3235 return(ENOBUFS);
3236
3237 mtag = sc->ethercom.ec_nvlans ?
3238 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
3239
3240 for (i = 0; i < dmamap->dm_nsegs; i++) {
3241 f = &sc->bge_rdata->bge_tx_ring[frag];
3242 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3243 break;
3244 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
3245 f->bge_len = dmamap->dm_segs[i].ds_len;
3246 f->bge_flags = csum_flags;
3247
3248 if (mtag != NULL) {
3249 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3250 f->bge_vlan_tag = *(u_int *)(mtag + 1);
3251 } else {
3252 f->bge_vlan_tag = 0;
3253 }
3254 /*
3255 * Sanity check: avoid coming within 16 descriptors
3256 * of the end of the ring.
3257 */
3258 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
3259 return(ENOBUFS);
3260 cur = frag;
3261 BGE_INC(frag, BGE_TX_RING_CNT);
3262 cnt++;
3263 }
3264
3265 if (i < dmamap->dm_nsegs)
3266 return ENOBUFS;
3267
3268 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3269 BUS_DMASYNC_PREWRITE);
3270
3271 if (frag == sc->bge_tx_saved_considx)
3272 return(ENOBUFS);
3273
3274 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3275 sc->bge_cdata.bge_tx_chain[cur] = m_head;
3276 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3277 sc->txdma[cur] = dma;
3278 sc->bge_txcnt += cnt;
3279
3280 *txidx = frag;
3281
3282 return(0);
3283 }
3284
3285 /*
3286 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3287 * to the mbuf data regions directly in the transmit descriptors.
3288 */
3289 void
3290 bge_start(ifp)
3291 struct ifnet *ifp;
3292 {
3293 struct bge_softc *sc;
3294 struct mbuf *m_head = NULL;
3295 u_int32_t prodidx = 0;
3296 int pkts = 0;
3297
3298 sc = ifp->if_softc;
3299
3300 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3301 return;
3302
3303 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3304
3305 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3306 IFQ_POLL(&ifp->if_snd, m_head);
3307 if (m_head == NULL)
3308 break;
3309
3310 #if 0
3311 /*
3312 * XXX
3313 * safety overkill. If this is a fragmented packet chain
3314 * with delayed TCP/UDP checksums, then only encapsulate
3315 * it if we have enough descriptors to handle the entire
3316 * chain at once.
3317 * (paranoia -- may not actually be needed)
3318 */
3319 if (m_head->m_flags & M_FIRSTFRAG &&
3320 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3321 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3322 m_head->m_pkthdr.csum_data + 16) {
3323 ifp->if_flags |= IFF_OACTIVE;
3324 break;
3325 }
3326 }
3327 #endif
3328
3329 /*
3330 * Pack the data into the transmit ring. If we
3331 * don't have room, set the OACTIVE flag and wait
3332 * for the NIC to drain the ring.
3333 */
3334 if (bge_encap(sc, m_head, &prodidx)) {
3335 ifp->if_flags |= IFF_OACTIVE;
3336 break;
3337 }
3338
3339 /* now we are committed to transmit the packet */
3340 IFQ_DEQUEUE(&ifp->if_snd, m_head);
3341 pkts++;
3342
3343 #if NBPFILTER > 0
3344 /*
3345 * If there's a BPF listener, bounce a copy of this frame
3346 * to him.
3347 */
3348 if (ifp->if_bpf)
3349 bpf_mtap(ifp->if_bpf, m_head);
3350 #endif
3351 }
3352 if (pkts == 0)
3353 return;
3354
3355 /* Transmit */
3356 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3357 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
3358 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3359
3360 /*
3361 * Set a timeout in case the chip goes out to lunch.
3362 */
3363 ifp->if_timer = 5;
3364 }
3365
3366 int
3367 bge_init(ifp)
3368 struct ifnet *ifp;
3369 {
3370 struct bge_softc *sc = ifp->if_softc;
3371 u_int16_t *m;
3372 int s, error;
3373
3374 s = splnet();
3375
3376 ifp = &sc->ethercom.ec_if;
3377
3378 /* Cancel pending I/O and flush buffers. */
3379 bge_stop(sc);
3380 bge_reset(sc);
3381 bge_chipinit(sc);
3382
3383 /*
3384 * Init the various state machines, ring
3385 * control blocks and firmware.
3386 */
3387 error = bge_blockinit(sc);
3388 if (error != 0) {
3389 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
3390 error);
3391 splx(s);
3392 return error;
3393 }
3394
3395 ifp = &sc->ethercom.ec_if;
3396
3397 /* Specify MTU. */
3398 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3399 ETHER_HDR_LEN + ETHER_CRC_LEN);
3400
3401 /* Load our MAC address. */
3402 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
3403 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3404 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3405
3406 /* Enable or disable promiscuous mode as needed. */
3407 if (ifp->if_flags & IFF_PROMISC) {
3408 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3409 } else {
3410 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3411 }
3412
3413 /* Program multicast filter. */
3414 bge_setmulti(sc);
3415
3416 /* Init RX ring. */
3417 bge_init_rx_ring_std(sc);
3418
3419 /* Init jumbo RX ring. */
3420 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3421 bge_init_rx_ring_jumbo(sc);
3422
3423 /* Init our RX return ring index */
3424 sc->bge_rx_saved_considx = 0;
3425
3426 /* Init TX ring. */
3427 bge_init_tx_ring(sc);
3428
3429 /* Turn on transmitter */
3430 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3431
3432 /* Turn on receiver */
3433 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3434
3435 /* Tell firmware we're alive. */
3436 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3437
3438 /* Enable host interrupts. */
3439 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3440 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3441 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3442
3443 bge_ifmedia_upd(ifp);
3444
3445 ifp->if_flags |= IFF_RUNNING;
3446 ifp->if_flags &= ~IFF_OACTIVE;
3447
3448 splx(s);
3449
3450 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3451
3452 return 0;
3453 }
3454
3455 /*
3456 * Set media options.
3457 */
3458 int
3459 bge_ifmedia_upd(ifp)
3460 struct ifnet *ifp;
3461 {
3462 struct bge_softc *sc = ifp->if_softc;
3463 struct mii_data *mii = &sc->bge_mii;
3464 struct ifmedia *ifm = &sc->bge_ifmedia;
3465
3466 /* If this is a 1000baseX NIC, enable the TBI port. */
3467 if (sc->bge_tbi) {
3468 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3469 return(EINVAL);
3470 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3471 case IFM_AUTO:
3472 break;
3473 case IFM_1000_SX:
3474 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3475 BGE_CLRBIT(sc, BGE_MAC_MODE,
3476 BGE_MACMODE_HALF_DUPLEX);
3477 } else {
3478 BGE_SETBIT(sc, BGE_MAC_MODE,
3479 BGE_MACMODE_HALF_DUPLEX);
3480 }
3481 break;
3482 default:
3483 return(EINVAL);
3484 }
3485 return(0);
3486 }
3487
3488 sc->bge_link = 0;
3489 mii_mediachg(mii);
3490
3491 return(0);
3492 }
3493
3494 /*
3495 * Report current media status.
3496 */
3497 void
3498 bge_ifmedia_sts(ifp, ifmr)
3499 struct ifnet *ifp;
3500 struct ifmediareq *ifmr;
3501 {
3502 struct bge_softc *sc = ifp->if_softc;
3503 struct mii_data *mii = &sc->bge_mii;
3504
3505 if (sc->bge_tbi) {
3506 ifmr->ifm_status = IFM_AVALID;
3507 ifmr->ifm_active = IFM_ETHER;
3508 if (CSR_READ_4(sc, BGE_MAC_STS) &
3509 BGE_MACSTAT_TBI_PCS_SYNCHED)
3510 ifmr->ifm_status |= IFM_ACTIVE;
3511 ifmr->ifm_active |= IFM_1000_SX;
3512 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3513 ifmr->ifm_active |= IFM_HDX;
3514 else
3515 ifmr->ifm_active |= IFM_FDX;
3516 return;
3517 }
3518
3519 mii_pollstat(mii);
3520 ifmr->ifm_active = mii->mii_media_active;
3521 ifmr->ifm_status = mii->mii_media_status;
3522 }
3523
3524 int
3525 bge_ioctl(ifp, command, data)
3526 struct ifnet *ifp;
3527 u_long command;
3528 caddr_t data;
3529 {
3530 struct bge_softc *sc = ifp->if_softc;
3531 struct ifreq *ifr = (struct ifreq *) data;
3532 int s, error = 0;
3533 struct mii_data *mii;
3534
3535 s = splnet();
3536
3537 switch(command) {
3538 case SIOCSIFFLAGS:
3539 if (ifp->if_flags & IFF_UP) {
3540 /*
3541 * If only the state of the PROMISC flag changed,
3542 * then just use the 'set promisc mode' command
3543 * instead of reinitializing the entire NIC. Doing
3544 * a full re-init means reloading the firmware and
3545 * waiting for it to start up, which may take a
3546 * second or two.
3547 */
3548 if (ifp->if_flags & IFF_RUNNING &&
3549 ifp->if_flags & IFF_PROMISC &&
3550 !(sc->bge_if_flags & IFF_PROMISC)) {
3551 BGE_SETBIT(sc, BGE_RX_MODE,
3552 BGE_RXMODE_RX_PROMISC);
3553 } else if (ifp->if_flags & IFF_RUNNING &&
3554 !(ifp->if_flags & IFF_PROMISC) &&
3555 sc->bge_if_flags & IFF_PROMISC) {
3556 BGE_CLRBIT(sc, BGE_RX_MODE,
3557 BGE_RXMODE_RX_PROMISC);
3558 } else
3559 bge_init(ifp);
3560 } else {
3561 if (ifp->if_flags & IFF_RUNNING) {
3562 bge_stop(sc);
3563 }
3564 }
3565 sc->bge_if_flags = ifp->if_flags;
3566 error = 0;
3567 break;
3568 case SIOCSIFMEDIA:
3569 case SIOCGIFMEDIA:
3570 if (sc->bge_tbi) {
3571 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3572 command);
3573 } else {
3574 mii = &sc->bge_mii;
3575 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3576 command);
3577 }
3578 error = 0;
3579 break;
3580 default:
3581 error = ether_ioctl(ifp, command, data);
3582 if (error == ENETRESET) {
3583 bge_setmulti(sc);
3584 error = 0;
3585 }
3586 break;
3587 }
3588
3589 splx(s);
3590
3591 return(error);
3592 }
3593
3594 void
3595 bge_watchdog(ifp)
3596 struct ifnet *ifp;
3597 {
3598 struct bge_softc *sc;
3599
3600 sc = ifp->if_softc;
3601
3602 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3603
3604 ifp->if_flags &= ~IFF_RUNNING;
3605 bge_init(ifp);
3606
3607 ifp->if_oerrors++;
3608 }
3609
3610 static void
3611 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
3612 {
3613 int i;
3614
3615 BGE_CLRBIT(sc, reg, bit);
3616
3617 for (i = 0; i < BGE_TIMEOUT; i++) {
3618 if ((CSR_READ_4(sc, reg) & bit) == 0)
3619 return;
3620 delay(100);
3621 }
3622
3623 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3624 sc->bge_dev.dv_xname, (u_long) reg, bit);
3625 }
3626
3627 /*
3628 * Stop the adapter and free any mbufs allocated to the
3629 * RX and TX lists.
3630 */
3631 void
3632 bge_stop(sc)
3633 struct bge_softc *sc;
3634 {
3635 struct ifnet *ifp = &sc->ethercom.ec_if;
3636
3637 callout_stop(&sc->bge_timeout);
3638
3639 /*
3640 * Disable all of the receiver blocks
3641 */
3642 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3643 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3644 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3645 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3646 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3647 }
3648 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3649 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3650 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3651
3652 /*
3653 * Disable all of the transmit blocks
3654 */
3655 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3656 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3657 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3658 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3659 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3660 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3661 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3662 }
3663 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3664
3665 /*
3666 * Shut down all of the memory managers and related
3667 * state machines.
3668 */
3669 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3670 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3671 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3672 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3673 }
3674
3675 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3676 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3677
3678 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3679 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3680 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3681 }
3682
3683 /* Disable host interrupts. */
3684 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3685 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3686
3687 /*
3688 * Tell firmware we're shutting down.
3689 */
3690 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3691
3692 /* Free the RX lists. */
3693 bge_free_rx_ring_std(sc);
3694
3695 /* Free jumbo RX list. */
3696 bge_free_rx_ring_jumbo(sc);
3697
3698 /* Free TX buffers. */
3699 bge_free_tx_ring(sc);
3700
3701 /*
3702 * Isolate/power down the PHY.
3703 */
3704 if (!sc->bge_tbi)
3705 mii_down(&sc->bge_mii);
3706
3707 sc->bge_link = 0;
3708
3709 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3710
3711 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3712 }
3713
3714 /*
3715 * Stop all chip I/O so that the kernel's probe routines don't
3716 * get confused by errant DMAs when rebooting.
3717 */
3718 void
3719 bge_shutdown(xsc)
3720 void *xsc;
3721 {
3722 struct bge_softc *sc = (struct bge_softc *)xsc;
3723
3724 bge_stop(sc);
3725 bge_reset(sc);
3726 }
3727
3728
3729 static int
3730 sysctl_bge_verify(SYSCTLFN_ARGS)
3731 {
3732 int error, t;
3733 struct sysctlnode node;
3734
3735 node = *rnode;
3736 t = *(int*)rnode->sysctl_data;
3737 node.sysctl_data = &t;
3738 error = sysctl_lookup(SYSCTLFN_CALL(&node));
3739 if (error || newp == NULL)
3740 return (error);
3741
3742 #if 0
3743 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
3744 node.sysctl_num, rnode->sysctl_num));
3745 #endif
3746
3747 if (node.sysctl_num == bge_rxthresh_nodenum) {
3748 if (t < 0 || t >= NBGE_RX_THRESH)
3749 return (EINVAL);
3750 bge_update_all_threshes(t);
3751 } else
3752 return (EINVAL);
3753
3754 *(int*)rnode->sysctl_data = t;
3755
3756 return (0);
3757 }
3758
3759 /*
3760 * Setup sysctl(3) MIB, hw.bge.*.
3761 *
3762 * TBD condition SYSCTL_PERMANENT on being an LKM or not
3763 */
3764 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
3765 {
3766 int rc;
3767 struct sysctlnode *node;
3768
3769 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
3770 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
3771 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
3772 goto err;
3773 }
3774
3775 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3776 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge", NULL,
3777 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
3778 goto err;
3779 }
3780
3781 bge_node_root = node;
3782
3783 /* BGE Rx interrupt mitigation level */
3784 if ((rc = sysctl_createv(clog, 0, NULL, &node,
3785 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
3786 CTLTYPE_INT, "rx_lvl", NULL, sysctl_bge_verify, 0,
3787 &bge_rx_thresh_lvl,
3788 0, CTL_HW, bge_node_root->sysctl_num, CTL_CREATE,
3789 CTL_EOL)) != 0) {
3790 goto err;
3791 }
3792
3793 bge_rxthresh_nodenum = node->sysctl_num;
3794 node = NULL;
3795
3796 return;
3797
3798 err:
3799 printf("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
3800 }
3801