if_bge.c revision 1.48 1 /* $NetBSD: if_bge.c,v 1.48 2003/08/26 10:17:02 hannken Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.48 2003/08/26 10:17:02 hannken Exp $");
83
84 #include "bpfilter.h"
85 #include "vlan.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/device.h>
95 #include <sys/socket.h>
96
97 #include <net/if.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_ether.h>
101
102 #ifdef INET
103 #include <netinet/in.h>
104 #include <netinet/in_systm.h>
105 #include <netinet/in_var.h>
106 #include <netinet/ip.h>
107 #endif
108
109 #if NBPFILTER > 0
110 #include <net/bpf.h>
111 #endif
112
113 #include <dev/pci/pcireg.h>
114 #include <dev/pci/pcivar.h>
115 #include <dev/pci/pcidevs.h>
116
117 #include <dev/mii/mii.h>
118 #include <dev/mii/miivar.h>
119 #include <dev/mii/miidevs.h>
120 #include <dev/mii/brgphyreg.h>
121
122 #include <dev/pci/if_bgereg.h>
123
124 #include <uvm/uvm_extern.h>
125
126 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
127
128 int bge_probe(struct device *, struct cfdata *, void *);
129 void bge_attach(struct device *, struct device *, void *);
130 void bge_release_resources(struct bge_softc *);
131 void bge_txeof(struct bge_softc *);
132 void bge_rxeof(struct bge_softc *);
133
134 void bge_tick(void *);
135 void bge_stats_update(struct bge_softc *);
136 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
137 static __inline int bge_cksum_pad(struct mbuf *pkt);
138 static __inline int bge_compact_dma_runt(struct mbuf *pkt);
139
140 int bge_intr(void *);
141 void bge_start(struct ifnet *);
142 int bge_ioctl(struct ifnet *, u_long, caddr_t);
143 int bge_init(struct ifnet *);
144 void bge_stop(struct bge_softc *);
145 void bge_watchdog(struct ifnet *);
146 void bge_shutdown(void *);
147 int bge_ifmedia_upd(struct ifnet *);
148 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149
150 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
151 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
152
153 void bge_setmulti(struct bge_softc *);
154
155 void bge_handle_events(struct bge_softc *);
156 int bge_alloc_jumbo_mem(struct bge_softc *);
157 void bge_free_jumbo_mem(struct bge_softc *);
158 void *bge_jalloc(struct bge_softc *);
159 void bge_jfree(struct mbuf *, caddr_t, size_t, void *);
160 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
161 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
162 int bge_init_rx_ring_std(struct bge_softc *);
163 void bge_free_rx_ring_std(struct bge_softc *);
164 int bge_init_rx_ring_jumbo(struct bge_softc *);
165 void bge_free_rx_ring_jumbo(struct bge_softc *);
166 void bge_free_tx_ring(struct bge_softc *);
167 int bge_init_tx_ring(struct bge_softc *);
168
169 int bge_chipinit(struct bge_softc *);
170 int bge_blockinit(struct bge_softc *);
171 int bge_setpowerstate(struct bge_softc *, int);
172
173 #ifdef notdef
174 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
175 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
176 void bge_vpd_read(struct bge_softc *);
177 #endif
178
179 u_int32_t bge_readmem_ind(struct bge_softc *, int);
180 void bge_writemem_ind(struct bge_softc *, int, int);
181 #ifdef notdef
182 u_int32_t bge_readreg_ind(struct bge_softc *, int);
183 #endif
184 void bge_writereg_ind(struct bge_softc *, int, int);
185
186 int bge_miibus_readreg(struct device *, int, int);
187 void bge_miibus_writereg(struct device *, int, int, int);
188 void bge_miibus_statchg(struct device *);
189
190 void bge_reset(struct bge_softc *);
191
192 void bge_dump_status(struct bge_softc *);
193 void bge_dump_rxbd(struct bge_rx_bd *);
194
195 #define BGE_DEBUG
196 #ifdef BGE_DEBUG
197 #define DPRINTF(x) if (bgedebug) printf x
198 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
199 int bgedebug = 0;
200 #else
201 #define DPRINTF(x)
202 #define DPRINTFN(n,x)
203 #endif
204
205 /* Various chip quirks. */
206 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001
207 #define BGE_QUIRK_CSUM_BROKEN 0x00000002
208 #define BGE_QUIRK_ONLY_PHY_1 0x00000004
209 #define BGE_QUIRK_5700_SMALLDMA 0x00000008
210 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010
211 #define BGE_QUIRK_PRODUCER_BUG 0x00000020
212 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040
213 #define BGE_QUIRK_5705_CORE 0x00000080
214
215 /* following bugs are common to bcm5700 rev B, all flavours */
216 #define BGE_QUIRK_5700_COMMON \
217 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
218
219 CFATTACH_DECL(bge, sizeof(struct bge_softc),
220 bge_probe, bge_attach, NULL, NULL);
221
222 u_int32_t
223 bge_readmem_ind(sc, off)
224 struct bge_softc *sc;
225 int off;
226 {
227 struct pci_attach_args *pa = &(sc->bge_pa);
228 pcireg_t val;
229
230 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
231 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
232 return val;
233 }
234
235 void
236 bge_writemem_ind(sc, off, val)
237 struct bge_softc *sc;
238 int off, val;
239 {
240 struct pci_attach_args *pa = &(sc->bge_pa);
241
242 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
243 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
244 }
245
246 #ifdef notdef
247 u_int32_t
248 bge_readreg_ind(sc, off)
249 struct bge_softc *sc;
250 int off;
251 {
252 struct pci_attach_args *pa = &(sc->bge_pa);
253
254 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
255 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
256 }
257 #endif
258
259 void
260 bge_writereg_ind(sc, off, val)
261 struct bge_softc *sc;
262 int off, val;
263 {
264 struct pci_attach_args *pa = &(sc->bge_pa);
265
266 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
267 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
268 }
269
270 #ifdef notdef
271 u_int8_t
272 bge_vpd_readbyte(sc, addr)
273 struct bge_softc *sc;
274 int addr;
275 {
276 int i;
277 u_int32_t val;
278 struct pci_attach_args *pa = &(sc->bge_pa);
279
280 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
281 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
282 DELAY(10);
283 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
284 BGE_VPD_FLAG)
285 break;
286 }
287
288 if (i == BGE_TIMEOUT) {
289 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
290 return(0);
291 }
292
293 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
294
295 return((val >> ((addr % 4) * 8)) & 0xFF);
296 }
297
298 void
299 bge_vpd_read_res(sc, res, addr)
300 struct bge_softc *sc;
301 struct vpd_res *res;
302 int addr;
303 {
304 int i;
305 u_int8_t *ptr;
306
307 ptr = (u_int8_t *)res;
308 for (i = 0; i < sizeof(struct vpd_res); i++)
309 ptr[i] = bge_vpd_readbyte(sc, i + addr);
310 }
311
312 void
313 bge_vpd_read(sc)
314 struct bge_softc *sc;
315 {
316 int pos = 0, i;
317 struct vpd_res res;
318
319 if (sc->bge_vpd_prodname != NULL)
320 free(sc->bge_vpd_prodname, M_DEVBUF);
321 if (sc->bge_vpd_readonly != NULL)
322 free(sc->bge_vpd_readonly, M_DEVBUF);
323 sc->bge_vpd_prodname = NULL;
324 sc->bge_vpd_readonly = NULL;
325
326 bge_vpd_read_res(sc, &res, pos);
327
328 if (res.vr_id != VPD_RES_ID) {
329 printf("%s: bad VPD resource id: expected %x got %x\n",
330 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
331 return;
332 }
333
334 pos += sizeof(res);
335 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
336 if (sc->bge_vpd_prodname == NULL)
337 panic("bge_vpd_read");
338 for (i = 0; i < res.vr_len; i++)
339 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
340 sc->bge_vpd_prodname[i] = '\0';
341 pos += i;
342
343 bge_vpd_read_res(sc, &res, pos);
344
345 if (res.vr_id != VPD_RES_READ) {
346 printf("%s: bad VPD resource id: expected %x got %x\n",
347 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
348 return;
349 }
350
351 pos += sizeof(res);
352 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
353 if (sc->bge_vpd_readonly == NULL)
354 panic("bge_vpd_read");
355 for (i = 0; i < res.vr_len + 1; i++)
356 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
357 }
358 #endif
359
360 /*
361 * Read a byte of data stored in the EEPROM at address 'addr.' The
362 * BCM570x supports both the traditional bitbang interface and an
363 * auto access interface for reading the EEPROM. We use the auto
364 * access method.
365 */
366 u_int8_t
367 bge_eeprom_getbyte(sc, addr, dest)
368 struct bge_softc *sc;
369 int addr;
370 u_int8_t *dest;
371 {
372 int i;
373 u_int32_t byte = 0;
374
375 /*
376 * Enable use of auto EEPROM access so we can avoid
377 * having to use the bitbang method.
378 */
379 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
380
381 /* Reset the EEPROM, load the clock period. */
382 CSR_WRITE_4(sc, BGE_EE_ADDR,
383 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
384 DELAY(20);
385
386 /* Issue the read EEPROM command. */
387 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
388
389 /* Wait for completion */
390 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
391 DELAY(10);
392 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
393 break;
394 }
395
396 if (i == BGE_TIMEOUT) {
397 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
398 return(0);
399 }
400
401 /* Get result. */
402 byte = CSR_READ_4(sc, BGE_EE_DATA);
403
404 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
405
406 return(0);
407 }
408
409 /*
410 * Read a sequence of bytes from the EEPROM.
411 */
412 int
413 bge_read_eeprom(sc, dest, off, cnt)
414 struct bge_softc *sc;
415 caddr_t dest;
416 int off;
417 int cnt;
418 {
419 int err = 0, i;
420 u_int8_t byte = 0;
421
422 for (i = 0; i < cnt; i++) {
423 err = bge_eeprom_getbyte(sc, off + i, &byte);
424 if (err)
425 break;
426 *(dest + i) = byte;
427 }
428
429 return(err ? 1 : 0);
430 }
431
432 int
433 bge_miibus_readreg(dev, phy, reg)
434 struct device *dev;
435 int phy, reg;
436 {
437 struct bge_softc *sc = (struct bge_softc *)dev;
438 struct ifnet *ifp;
439 u_int32_t val;
440 u_int32_t saved_autopoll;
441 int i;
442
443 ifp = &sc->ethercom.ec_if;
444
445 /*
446 * Several chips with builtin PHYs will incorrectly answer to
447 * other PHY instances than the builtin PHY at id 1.
448 */
449 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
450 return(0);
451
452 /* Reading with autopolling on may trigger PCI errors */
453 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
454 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
455 CSR_WRITE_4(sc, BGE_MI_MODE,
456 saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
457 DELAY(40);
458 }
459
460 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
461 BGE_MIPHY(phy)|BGE_MIREG(reg));
462
463 for (i = 0; i < BGE_TIMEOUT; i++) {
464 val = CSR_READ_4(sc, BGE_MI_COMM);
465 if (!(val & BGE_MICOMM_BUSY))
466 break;
467 delay(10);
468 }
469
470 if (i == BGE_TIMEOUT) {
471 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
472 val = 0;
473 goto done;
474 }
475
476 val = CSR_READ_4(sc, BGE_MI_COMM);
477
478 done:
479 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
480 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
481 DELAY(40);
482 }
483
484 if (val & BGE_MICOMM_READFAIL)
485 return(0);
486
487 return(val & 0xFFFF);
488 }
489
490 void
491 bge_miibus_writereg(dev, phy, reg, val)
492 struct device *dev;
493 int phy, reg, val;
494 {
495 struct bge_softc *sc = (struct bge_softc *)dev;
496 u_int32_t saved_autopoll;
497 int i;
498
499 /* Touching the PHY while autopolling is on may trigger PCI errors */
500 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
501 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
502 delay(40);
503 CSR_WRITE_4(sc, BGE_MI_MODE,
504 saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
505 delay(10); /* 40 usec is supposed to be adequate */
506 }
507
508 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
509 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
510
511 for (i = 0; i < BGE_TIMEOUT; i++) {
512 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
513 break;
514 delay(10);
515 }
516
517 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
518 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
519 delay(40);
520 }
521
522 if (i == BGE_TIMEOUT) {
523 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
524 }
525 }
526
527 void
528 bge_miibus_statchg(dev)
529 struct device *dev;
530 {
531 struct bge_softc *sc = (struct bge_softc *)dev;
532 struct mii_data *mii = &sc->bge_mii;
533
534 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
535 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
536 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
537 } else {
538 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
539 }
540
541 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
542 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
543 } else {
544 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
545 }
546 }
547
548 /*
549 * Handle events that have triggered interrupts.
550 */
551 void
552 bge_handle_events(sc)
553 struct bge_softc *sc;
554 {
555
556 return;
557 }
558
559 /*
560 * Memory management for jumbo frames.
561 */
562
563 int
564 bge_alloc_jumbo_mem(sc)
565 struct bge_softc *sc;
566 {
567 caddr_t ptr, kva;
568 bus_dma_segment_t seg;
569 int i, rseg, state, error;
570 struct bge_jpool_entry *entry;
571
572 state = error = 0;
573
574 /* Grab a big chunk o' storage. */
575 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
576 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
577 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
578 return ENOBUFS;
579 }
580
581 state = 1;
582 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
583 BUS_DMA_NOWAIT)) {
584 printf("%s: can't map DMA buffers (%d bytes)\n",
585 sc->bge_dev.dv_xname, (int)BGE_JMEM);
586 error = ENOBUFS;
587 goto out;
588 }
589
590 state = 2;
591 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
592 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
593 printf("%s: can't create DMA map\n", sc->bge_dev.dv_xname);
594 error = ENOBUFS;
595 goto out;
596 }
597
598 state = 3;
599 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
600 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
601 printf("%s: can't load DMA map\n", sc->bge_dev.dv_xname);
602 error = ENOBUFS;
603 goto out;
604 }
605
606 state = 4;
607 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
608 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
609
610 SLIST_INIT(&sc->bge_jfree_listhead);
611 SLIST_INIT(&sc->bge_jinuse_listhead);
612
613 /*
614 * Now divide it up into 9K pieces and save the addresses
615 * in an array.
616 */
617 ptr = sc->bge_cdata.bge_jumbo_buf;
618 for (i = 0; i < BGE_JSLOTS; i++) {
619 sc->bge_cdata.bge_jslots[i] = ptr;
620 ptr += BGE_JLEN;
621 entry = malloc(sizeof(struct bge_jpool_entry),
622 M_DEVBUF, M_NOWAIT);
623 if (entry == NULL) {
624 printf("%s: no memory for jumbo buffer queue!\n",
625 sc->bge_dev.dv_xname);
626 error = ENOBUFS;
627 goto out;
628 }
629 entry->slot = i;
630 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
631 entry, jpool_entries);
632 }
633 out:
634 if (error != 0) {
635 switch (state) {
636 case 4:
637 bus_dmamap_unload(sc->bge_dmatag,
638 sc->bge_cdata.bge_rx_jumbo_map);
639 case 3:
640 bus_dmamap_destroy(sc->bge_dmatag,
641 sc->bge_cdata.bge_rx_jumbo_map);
642 case 2:
643 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
644 case 1:
645 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
646 break;
647 default:
648 break;
649 }
650 }
651
652 return error;
653 }
654
655 /*
656 * Allocate a jumbo buffer.
657 */
658 void *
659 bge_jalloc(sc)
660 struct bge_softc *sc;
661 {
662 struct bge_jpool_entry *entry;
663
664 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
665
666 if (entry == NULL) {
667 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
668 return(NULL);
669 }
670
671 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
672 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
673 return(sc->bge_cdata.bge_jslots[entry->slot]);
674 }
675
676 /*
677 * Release a jumbo buffer.
678 */
679 void
680 bge_jfree(m, buf, size, arg)
681 struct mbuf *m;
682 caddr_t buf;
683 size_t size;
684 void *arg;
685 {
686 struct bge_jpool_entry *entry;
687 struct bge_softc *sc;
688 int i, s;
689
690 /* Extract the softc struct pointer. */
691 sc = (struct bge_softc *)arg;
692
693 if (sc == NULL)
694 panic("bge_jfree: can't find softc pointer!");
695
696 /* calculate the slot this buffer belongs to */
697
698 i = ((caddr_t)buf
699 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
700
701 if ((i < 0) || (i >= BGE_JSLOTS))
702 panic("bge_jfree: asked to free buffer that we don't manage!");
703
704 s = splvm();
705 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
706 if (entry == NULL)
707 panic("bge_jfree: buffer not in use!");
708 entry->slot = i;
709 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
710 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
711
712 if (__predict_true(m != NULL))
713 pool_cache_put(&mbpool_cache, m);
714 splx(s);
715 }
716
717
718 /*
719 * Intialize a standard receive ring descriptor.
720 */
721 int
722 bge_newbuf_std(sc, i, m, dmamap)
723 struct bge_softc *sc;
724 int i;
725 struct mbuf *m;
726 bus_dmamap_t dmamap;
727 {
728 struct mbuf *m_new = NULL;
729 struct bge_rx_bd *r;
730 int error;
731
732 if (dmamap == NULL) {
733 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
734 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
735 if (error != 0)
736 return error;
737 }
738
739 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
740
741 if (m == NULL) {
742 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
743 if (m_new == NULL) {
744 return(ENOBUFS);
745 }
746
747 MCLGET(m_new, M_DONTWAIT);
748 if (!(m_new->m_flags & M_EXT)) {
749 m_freem(m_new);
750 return(ENOBUFS);
751 }
752 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
753 if (!sc->bge_rx_alignment_bug)
754 m_adj(m_new, ETHER_ALIGN);
755
756 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
757 BUS_DMA_READ|BUS_DMA_NOWAIT))
758 return(ENOBUFS);
759 } else {
760 m_new = m;
761 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
762 m_new->m_data = m_new->m_ext.ext_buf;
763 if (!sc->bge_rx_alignment_bug)
764 m_adj(m_new, ETHER_ALIGN);
765 }
766
767 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
768 r = &sc->bge_rdata->bge_rx_std_ring[i];
769 bge_set_hostaddr(&r->bge_addr,
770 dmamap->dm_segs[0].ds_addr);
771 r->bge_flags = BGE_RXBDFLAG_END;
772 r->bge_len = m_new->m_len;
773 r->bge_idx = i;
774
775 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
776 offsetof(struct bge_ring_data, bge_rx_std_ring) +
777 i * sizeof (struct bge_rx_bd),
778 sizeof (struct bge_rx_bd),
779 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
780
781 return(0);
782 }
783
784 /*
785 * Initialize a jumbo receive ring descriptor. This allocates
786 * a jumbo buffer from the pool managed internally by the driver.
787 */
788 int
789 bge_newbuf_jumbo(sc, i, m)
790 struct bge_softc *sc;
791 int i;
792 struct mbuf *m;
793 {
794 struct mbuf *m_new = NULL;
795 struct bge_rx_bd *r;
796
797 if (m == NULL) {
798 caddr_t *buf = NULL;
799
800 /* Allocate the mbuf. */
801 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
802 if (m_new == NULL) {
803 return(ENOBUFS);
804 }
805
806 /* Allocate the jumbo buffer */
807 buf = bge_jalloc(sc);
808 if (buf == NULL) {
809 m_freem(m_new);
810 printf("%s: jumbo allocation failed "
811 "-- packet dropped!\n", sc->bge_dev.dv_xname);
812 return(ENOBUFS);
813 }
814
815 /* Attach the buffer to the mbuf. */
816 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
817 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
818 bge_jfree, sc);
819 } else {
820 m_new = m;
821 m_new->m_data = m_new->m_ext.ext_buf;
822 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
823 }
824
825 if (!sc->bge_rx_alignment_bug)
826 m_adj(m_new, ETHER_ALIGN);
827 /* Set up the descriptor. */
828 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
829 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
830 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
831 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
832 r->bge_len = m_new->m_len;
833 r->bge_idx = i;
834
835 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
836 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
837 i * sizeof (struct bge_rx_bd),
838 sizeof (struct bge_rx_bd),
839 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
840
841 return(0);
842 }
843
844 /*
845 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
846 * that's 1MB or memory, which is a lot. For now, we fill only the first
847 * 256 ring entries and hope that our CPU is fast enough to keep up with
848 * the NIC.
849 */
850 int
851 bge_init_rx_ring_std(sc)
852 struct bge_softc *sc;
853 {
854 int i;
855
856 if (sc->bge_flags & BGE_RXRING_VALID)
857 return 0;
858
859 for (i = 0; i < BGE_SSLOTS; i++) {
860 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
861 return(ENOBUFS);
862 }
863
864 sc->bge_std = i - 1;
865 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
866
867 sc->bge_flags |= BGE_RXRING_VALID;
868
869 return(0);
870 }
871
872 void
873 bge_free_rx_ring_std(sc)
874 struct bge_softc *sc;
875 {
876 int i;
877
878 if (!(sc->bge_flags & BGE_RXRING_VALID))
879 return;
880
881 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
882 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
883 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
884 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
885 bus_dmamap_destroy(sc->bge_dmatag,
886 sc->bge_cdata.bge_rx_std_map[i]);
887 }
888 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
889 sizeof(struct bge_rx_bd));
890 }
891
892 sc->bge_flags &= ~BGE_RXRING_VALID;
893 }
894
895 int
896 bge_init_rx_ring_jumbo(sc)
897 struct bge_softc *sc;
898 {
899 int i;
900 volatile struct bge_rcb *rcb;
901
902 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
903 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
904 return(ENOBUFS);
905 };
906
907 sc->bge_jumbo = i - 1;
908
909 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
910 rcb->bge_maxlen_flags = 0;
911 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
912
913 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
914
915 return(0);
916 }
917
918 void
919 bge_free_rx_ring_jumbo(sc)
920 struct bge_softc *sc;
921 {
922 int i;
923
924 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
925 return;
926
927 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
928 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
929 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
930 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
931 }
932 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
933 sizeof(struct bge_rx_bd));
934 }
935
936 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
937 }
938
939 void
940 bge_free_tx_ring(sc)
941 struct bge_softc *sc;
942 {
943 int i, freed;
944 struct txdmamap_pool_entry *dma;
945
946 if (!(sc->bge_flags & BGE_TXRING_VALID))
947 return;
948
949 freed = 0;
950
951 for (i = 0; i < BGE_TX_RING_CNT; i++) {
952 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
953 freed++;
954 m_freem(sc->bge_cdata.bge_tx_chain[i]);
955 sc->bge_cdata.bge_tx_chain[i] = NULL;
956 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
957 link);
958 sc->txdma[i] = 0;
959 }
960 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
961 sizeof(struct bge_tx_bd));
962 }
963
964 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
965 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
966 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
967 free(dma, M_DEVBUF);
968 }
969
970 sc->bge_flags &= ~BGE_TXRING_VALID;
971 }
972
973 int
974 bge_init_tx_ring(sc)
975 struct bge_softc *sc;
976 {
977 int i;
978 bus_dmamap_t dmamap;
979 struct txdmamap_pool_entry *dma;
980
981 if (sc->bge_flags & BGE_TXRING_VALID)
982 return 0;
983
984 sc->bge_txcnt = 0;
985 sc->bge_tx_saved_considx = 0;
986 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
987 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
988 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
989
990 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
991 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
992 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
993
994 SLIST_INIT(&sc->txdma_list);
995 for (i = 0; i < BGE_RSLOTS; i++) {
996 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
997 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
998 &dmamap))
999 return(ENOBUFS);
1000 if (dmamap == NULL)
1001 panic("dmamap NULL in bge_init_tx_ring");
1002 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1003 if (dma == NULL) {
1004 printf("%s: can't alloc txdmamap_pool_entry\n",
1005 sc->bge_dev.dv_xname);
1006 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1007 return (ENOMEM);
1008 }
1009 dma->dmamap = dmamap;
1010 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1011 }
1012
1013 sc->bge_flags |= BGE_TXRING_VALID;
1014
1015 return(0);
1016 }
1017
1018 void
1019 bge_setmulti(sc)
1020 struct bge_softc *sc;
1021 {
1022 struct ethercom *ac = &sc->ethercom;
1023 struct ifnet *ifp = &ac->ec_if;
1024 struct ether_multi *enm;
1025 struct ether_multistep step;
1026 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1027 u_int32_t h;
1028 int i;
1029
1030 if (ifp->if_flags & IFF_PROMISC)
1031 goto allmulti;
1032
1033 /* Now program new ones. */
1034 ETHER_FIRST_MULTI(step, ac, enm);
1035 while (enm != NULL) {
1036 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1037 /*
1038 * We must listen to a range of multicast addresses.
1039 * For now, just accept all multicasts, rather than
1040 * trying to set only those filter bits needed to match
1041 * the range. (At this time, the only use of address
1042 * ranges is for IP multicast routing, for which the
1043 * range is big enough to require all bits set.)
1044 */
1045 goto allmulti;
1046 }
1047
1048 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1049
1050 /* Just want the 7 least-significant bits. */
1051 h &= 0x7f;
1052
1053 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1054 ETHER_NEXT_MULTI(step, enm);
1055 }
1056
1057 ifp->if_flags &= ~IFF_ALLMULTI;
1058 goto setit;
1059
1060 allmulti:
1061 ifp->if_flags |= IFF_ALLMULTI;
1062 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1063
1064 setit:
1065 for (i = 0; i < 4; i++)
1066 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1067 }
1068
1069 const int bge_swapbits[] = {
1070 0,
1071 BGE_MODECTL_BYTESWAP_DATA,
1072 BGE_MODECTL_WORDSWAP_DATA,
1073 BGE_MODECTL_BYTESWAP_NONFRAME,
1074 BGE_MODECTL_WORDSWAP_NONFRAME,
1075
1076 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1077 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1078 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1079
1080 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1081 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1082
1083 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1084
1085 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1086 BGE_MODECTL_BYTESWAP_NONFRAME,
1087 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1088 BGE_MODECTL_WORDSWAP_NONFRAME,
1089 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1090 BGE_MODECTL_WORDSWAP_NONFRAME,
1091 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1092 BGE_MODECTL_WORDSWAP_NONFRAME,
1093
1094 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1095 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1096 };
1097
1098 int bge_swapindex = 0;
1099
1100 /*
1101 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1102 * self-test results.
1103 */
1104 int
1105 bge_chipinit(sc)
1106 struct bge_softc *sc;
1107 {
1108 u_int32_t cachesize;
1109 int i;
1110 u_int32_t dma_rw_ctl;
1111 struct pci_attach_args *pa = &(sc->bge_pa);
1112
1113
1114 /* Set endianness before we access any non-PCI registers. */
1115 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1116 BGE_INIT);
1117
1118 /* Set power state to D0. */
1119 bge_setpowerstate(sc, 0);
1120
1121 /*
1122 * Check the 'ROM failed' bit on the RX CPU to see if
1123 * self-tests passed.
1124 */
1125 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1126 printf("%s: RX CPU self-diagnostics failed!\n",
1127 sc->bge_dev.dv_xname);
1128 return(ENODEV);
1129 }
1130
1131 /* Clear the MAC control register */
1132 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1133
1134 /*
1135 * Clear the MAC statistics block in the NIC's
1136 * internal memory.
1137 */
1138 for (i = BGE_STATS_BLOCK;
1139 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1140 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1141
1142 for (i = BGE_STATUS_BLOCK;
1143 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1144 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1145
1146 /* Set up the PCI DMA control register. */
1147 if (pci_conf_read(pa->pa_pc, pa->pa_tag,BGE_PCI_PCISTATE) &
1148 BGE_PCISTATE_PCI_BUSMODE) {
1149 /* Conventional PCI bus */
1150 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n", sc->bge_dev.dv_xname));
1151 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1152 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1153 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1154 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1155 dma_rw_ctl |= 0x0F;
1156 }
1157 } else {
1158 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n", sc->bge_dev.dv_xname));
1159 /* PCI-X bus */
1160 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1161 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1162 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1163 (0x0F);
1164 /*
1165 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1166 * for hardware bugs, which means we should also clear
1167 * the low-order MINDMA bits. In addition, the 5704
1168 * uses a different encoding of read/write watermarks.
1169 */
1170 if (sc->bge_asicrev == BGE_ASICREV_BCM5704_A0 ||
1171 sc->bge_asicrev == BGE_ASICREV_BCM5704_A1 ||
1172 sc->bge_asicrev == BGE_ASICREV_BCM5704_A2) {
1173 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1174 /* should be 0x1f0000 */
1175 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1176 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1177 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1178 }
1179 else if ((sc->bge_asicrev >> 28) ==
1180 (BGE_ASICREV_BCM5703_A0 >> 28)) {
1181 dma_rw_ctl &= 0xfffffff0;
1182 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1183 }
1184 }
1185
1186 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1187
1188 /*
1189 * Set up general mode register.
1190 */
1191 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1192 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1193 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
1194 BGE_MODECTL_RX_NO_PHDR_CSUM);
1195
1196 /* Get cache line size. */
1197 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1198
1199 /*
1200 * Avoid violating PCI spec on certain chip revs.
1201 */
1202 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1203 PCIM_CMD_MWIEN) {
1204 switch(cachesize) {
1205 case 1:
1206 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1207 BGE_PCI_WRITE_BNDRY_16BYTES);
1208 break;
1209 case 2:
1210 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1211 BGE_PCI_WRITE_BNDRY_32BYTES);
1212 break;
1213 case 4:
1214 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1215 BGE_PCI_WRITE_BNDRY_64BYTES);
1216 break;
1217 case 8:
1218 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1219 BGE_PCI_WRITE_BNDRY_128BYTES);
1220 break;
1221 case 16:
1222 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1223 BGE_PCI_WRITE_BNDRY_256BYTES);
1224 break;
1225 case 32:
1226 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1227 BGE_PCI_WRITE_BNDRY_512BYTES);
1228 break;
1229 case 64:
1230 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1231 BGE_PCI_WRITE_BNDRY_1024BYTES);
1232 break;
1233 default:
1234 /* Disable PCI memory write and invalidate. */
1235 #if 0
1236 if (bootverbose)
1237 printf("%s: cache line size %d not "
1238 "supported; disabling PCI MWI\n",
1239 sc->bge_dev.dv_xname, cachesize);
1240 #endif
1241 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1242 PCIM_CMD_MWIEN);
1243 break;
1244 }
1245 }
1246
1247 /*
1248 * Disable memory write invalidate. Apparently it is not supported
1249 * properly by these devices.
1250 */
1251 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
1252
1253
1254 #ifdef __brokenalpha__
1255 /*
1256 * Must insure that we do not cross an 8K (bytes) boundary
1257 * for DMA reads. Our highest limit is 1K bytes. This is a
1258 * restriction on some ALPHA platforms with early revision
1259 * 21174 PCI chipsets, such as the AlphaPC 164lx
1260 */
1261 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1262 #endif
1263
1264 /* Set the timer prescaler (always 66MHz) */
1265 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1266
1267 return(0);
1268 }
1269
1270 int
1271 bge_blockinit(sc)
1272 struct bge_softc *sc;
1273 {
1274 volatile struct bge_rcb *rcb;
1275 bus_size_t rcb_addr;
1276 int i;
1277 struct ifnet *ifp = &sc->ethercom.ec_if;
1278 bge_hostaddr taddr;
1279
1280 /*
1281 * Initialize the memory window pointer register so that
1282 * we can access the first 32K of internal NIC RAM. This will
1283 * allow us to set up the TX send ring RCBs and the RX return
1284 * ring RCBs, plus other things which live in NIC memory.
1285 */
1286
1287 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1288 BGE_PCI_MEMWIN_BASEADDR, 0);
1289
1290 /* Configure mbuf memory pool */
1291 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1292 if (sc->bge_extram) {
1293 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1294 BGE_EXT_SSRAM);
1295 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1296 } else {
1297 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1298 BGE_BUFFPOOL_1);
1299 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1300 }
1301
1302 /* Configure DMA resource pool */
1303 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1304 BGE_DMA_DESCRIPTORS);
1305 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1306 }
1307
1308 /* Configure mbuf pool watermarks */
1309 #ifdef ORIG_WPAUL_VALUES
1310 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1311 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1312 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1313 #else
1314 /* new broadcom docs strongly recommend these: */
1315 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1316 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1317 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1318 } else {
1319 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1320 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1321 }
1322 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1323 #endif
1324
1325 /* Configure DMA resource watermarks */
1326 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1327 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1328
1329 /* Enable buffer manager */
1330 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1331 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1332 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1333
1334 /* Poll for buffer manager start indication */
1335 for (i = 0; i < BGE_TIMEOUT; i++) {
1336 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1337 break;
1338 DELAY(10);
1339 }
1340
1341 if (i == BGE_TIMEOUT) {
1342 printf("%s: buffer manager failed to start\n",
1343 sc->bge_dev.dv_xname);
1344 return(ENXIO);
1345 }
1346 }
1347
1348 /* Enable flow-through queues */
1349 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1350 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1351
1352 /* Wait until queue initialization is complete */
1353 for (i = 0; i < BGE_TIMEOUT; i++) {
1354 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1355 break;
1356 DELAY(10);
1357 }
1358
1359 if (i == BGE_TIMEOUT) {
1360 printf("%s: flow-through queue init failed\n",
1361 sc->bge_dev.dv_xname);
1362 return(ENXIO);
1363 }
1364
1365 /* Initialize the standard RX ring control block */
1366 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1367 bge_set_hostaddr(&rcb->bge_hostaddr,
1368 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1369 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1370 rcb->bge_maxlen_flags =
1371 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1372 } else {
1373 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1374 }
1375 if (sc->bge_extram)
1376 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1377 else
1378 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1379 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1380 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1381 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1382 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1383
1384 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1385 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1386 } else {
1387 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1388 }
1389
1390 /*
1391 * Initialize the jumbo RX ring control block
1392 * We set the 'ring disabled' bit in the flags
1393 * field until we're actually ready to start
1394 * using this ring (i.e. once we set the MTU
1395 * high enough to require it).
1396 */
1397 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1398 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1399 bge_set_hostaddr(&rcb->bge_hostaddr,
1400 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1401 rcb->bge_maxlen_flags =
1402 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1403 BGE_RCB_FLAG_RING_DISABLED);
1404 if (sc->bge_extram)
1405 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1406 else
1407 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1408
1409 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1410 rcb->bge_hostaddr.bge_addr_hi);
1411 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1412 rcb->bge_hostaddr.bge_addr_lo);
1413 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1414 rcb->bge_maxlen_flags);
1415 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1416
1417 /* Set up dummy disabled mini ring RCB */
1418 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1419 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1420 BGE_RCB_FLAG_RING_DISABLED);
1421 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1422 rcb->bge_maxlen_flags);
1423
1424 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1425 offsetof(struct bge_ring_data, bge_info),
1426 sizeof (struct bge_gib),
1427 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1428 }
1429
1430 /*
1431 * Set the BD ring replentish thresholds. The recommended
1432 * values are 1/8th the number of descriptors allocated to
1433 * each ring.
1434 */
1435 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1436 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1437
1438 /*
1439 * Disable all unused send rings by setting the 'ring disabled'
1440 * bit in the flags field of all the TX send ring control blocks.
1441 * These are located in NIC memory.
1442 */
1443 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1444 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1445 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1446 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1447 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1448 rcb_addr += sizeof(struct bge_rcb);
1449 }
1450
1451 /* Configure TX RCB 0 (we use only the first ring) */
1452 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1453 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1454 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1455 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1456 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1457 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1458 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1459 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1460 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1461 }
1462
1463 /* Disable all unused RX return rings */
1464 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1465 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1466 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1467 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1468 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1469 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1470 BGE_RCB_FLAG_RING_DISABLED));
1471 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1472 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1473 (i * (sizeof(u_int64_t))), 0);
1474 rcb_addr += sizeof(struct bge_rcb);
1475 }
1476
1477 /* Initialize RX ring indexes */
1478 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1479 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1480 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1481
1482 /*
1483 * Set up RX return ring 0
1484 * Note that the NIC address for RX return rings is 0x00000000.
1485 * The return rings live entirely within the host, so the
1486 * nicaddr field in the RCB isn't used.
1487 */
1488 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1489 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1490 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1491 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1492 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1493 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1494 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1495
1496 /* Set random backoff seed for TX */
1497 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1498 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1499 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1500 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1501 BGE_TX_BACKOFF_SEED_MASK);
1502
1503 /* Set inter-packet gap */
1504 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1505
1506 /*
1507 * Specify which ring to use for packets that don't match
1508 * any RX rules.
1509 */
1510 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1511
1512 /*
1513 * Configure number of RX lists. One interrupt distribution
1514 * list, sixteen active lists, one bad frames class.
1515 */
1516 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1517
1518 /* Inialize RX list placement stats mask. */
1519 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1520 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1521
1522 /* Disable host coalescing until we get it set up */
1523 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1524
1525 /* Poll to make sure it's shut down. */
1526 for (i = 0; i < BGE_TIMEOUT; i++) {
1527 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1528 break;
1529 DELAY(10);
1530 }
1531
1532 if (i == BGE_TIMEOUT) {
1533 printf("%s: host coalescing engine failed to idle\n",
1534 sc->bge_dev.dv_xname);
1535 return(ENXIO);
1536 }
1537
1538 /* Set up host coalescing defaults */
1539 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1540 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1541 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1542 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1543 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1544 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1545 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1546 }
1547 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1548 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1549
1550 /* Set up address of statistics block */
1551 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1552 bge_set_hostaddr(&taddr,
1553 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1554 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1555 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1556 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1557 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1558 }
1559
1560 /* Set up address of status block */
1561 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1562 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1563 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1564 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1565 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1566 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1567
1568 /* Turn on host coalescing state machine */
1569 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1570
1571 /* Turn on RX BD completion state machine and enable attentions */
1572 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1573 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1574
1575 /* Turn on RX list placement state machine */
1576 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1577
1578 /* Turn on RX list selector state machine. */
1579 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1580 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1581 }
1582
1583 /* Turn on DMA, clear stats */
1584 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1585 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1586 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1587 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1588 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1589
1590 /* Set misc. local control, enable interrupts on attentions */
1591 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1592
1593 #ifdef notdef
1594 /* Assert GPIO pins for PHY reset */
1595 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1596 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1597 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1598 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1599 #endif
1600
1601 #if defined(not_quite_yet)
1602 /* Linux driver enables enable gpio pin #1 on 5700s */
1603 if (sc->bge_asicrev == BGE_ASICREV_BCM5700) {
1604 sc->bge_local_ctrl_reg |=
1605 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
1606 }
1607 #endif
1608 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1609
1610 /* Turn on DMA completion state machine */
1611 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1612 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1613 }
1614
1615 /* Turn on write DMA state machine */
1616 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1617 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1618
1619 /* Turn on read DMA state machine */
1620 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1621 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1622
1623 /* Turn on RX data completion state machine */
1624 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1625
1626 /* Turn on RX BD initiator state machine */
1627 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1628
1629 /* Turn on RX data and RX BD initiator state machine */
1630 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1631
1632 /* Turn on Mbuf cluster free state machine */
1633 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1634 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1635 }
1636
1637 /* Turn on send BD completion state machine */
1638 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1639
1640 /* Turn on send data completion state machine */
1641 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1642
1643 /* Turn on send data initiator state machine */
1644 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1645
1646 /* Turn on send BD initiator state machine */
1647 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1648
1649 /* Turn on send BD selector state machine */
1650 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1651
1652 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1653 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1654 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1655
1656 /* init LED register */
1657 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000);
1658
1659 /* ack/clear link change events */
1660 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1661 BGE_MACSTAT_CFG_CHANGED);
1662 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1663
1664 /* Enable PHY auto polling (for MII/GMII only) */
1665 if (sc->bge_tbi) {
1666 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1667 } else {
1668 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1669 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1670 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1671 BGE_EVTENB_MI_INTERRUPT);
1672 }
1673
1674 /* Enable link state change attentions. */
1675 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1676
1677 return(0);
1678 }
1679
1680 static const struct bge_revision {
1681 uint32_t br_asicrev;
1682 uint32_t br_quirks;
1683 const char *br_name;
1684 } bge_revisions[] = {
1685 { BGE_ASICREV_BCM5700_A0,
1686 BGE_QUIRK_LINK_STATE_BROKEN,
1687 "BCM5700 A0" },
1688
1689 { BGE_ASICREV_BCM5700_A1,
1690 BGE_QUIRK_LINK_STATE_BROKEN,
1691 "BCM5700 A1" },
1692
1693 { BGE_ASICREV_BCM5700_B0,
1694 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
1695 "BCM5700 B0" },
1696
1697 { BGE_ASICREV_BCM5700_B1,
1698 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1699 "BCM5700 B1" },
1700
1701 { BGE_ASICREV_BCM5700_B2,
1702 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1703 "BCM5700 B2" },
1704
1705 /* This is treated like a BCM5700 Bx */
1706 { BGE_ASICREV_BCM5700_ALTIMA,
1707 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
1708 "BCM5700 Altima" },
1709
1710 { BGE_ASICREV_BCM5700_C0,
1711 0,
1712 "BCM5700 C0" },
1713
1714 { BGE_ASICREV_BCM5701_A0,
1715 0, /*XXX really, just not known */
1716 "BCM5701 A0" },
1717
1718 { BGE_ASICREV_BCM5701_B0,
1719 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1720 "BCM5701 B0" },
1721
1722 { BGE_ASICREV_BCM5701_B2,
1723 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1724 "BCM5701 B2" },
1725
1726 { BGE_ASICREV_BCM5701_B5,
1727 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
1728 "BCM5701 B5" },
1729
1730 { BGE_ASICREV_BCM5703_A0,
1731 0,
1732 "BCM5703 A0" },
1733
1734 { BGE_ASICREV_BCM5703_A1,
1735 0,
1736 "BCM5703 A1" },
1737
1738 { BGE_ASICREV_BCM5703_A2,
1739 BGE_QUIRK_ONLY_PHY_1,
1740 "BCM5703 A2" },
1741
1742 { BGE_ASICREV_BCM5704_A0,
1743 BGE_QUIRK_ONLY_PHY_1,
1744 "BCM5704 A0" },
1745
1746 { BGE_ASICREV_BCM5704_A1,
1747 BGE_QUIRK_ONLY_PHY_1,
1748 "BCM5704 A1" },
1749
1750 { BGE_ASICREV_BCM5704_A2,
1751 BGE_QUIRK_ONLY_PHY_1,
1752 "BCM5704 A2" },
1753
1754 { BGE_ASICREV_BCM5705_A1,
1755 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
1756 "BCM5705 A1" },
1757
1758 { 0, 0, NULL }
1759 };
1760
1761 static const struct bge_revision *
1762 bge_lookup_rev(uint32_t asicrev)
1763 {
1764 const struct bge_revision *br;
1765
1766 for (br = bge_revisions; br->br_name != NULL; br++) {
1767 if (br->br_asicrev == asicrev)
1768 return (br);
1769 }
1770
1771 return (NULL);
1772 }
1773
1774 static const struct bge_product {
1775 pci_vendor_id_t bp_vendor;
1776 pci_product_id_t bp_product;
1777 const char *bp_name;
1778 } bge_products[] = {
1779 /*
1780 * The BCM5700 documentation seems to indicate that the hardware
1781 * still has the Alteon vendor ID burned into it, though it
1782 * should always be overridden by the value in the EEPROM. We'll
1783 * check for it anyway.
1784 */
1785 { PCI_VENDOR_ALTEON,
1786 PCI_PRODUCT_ALTEON_BCM5700,
1787 "Broadcom BCM5700 Gigabit Ethernet" },
1788 { PCI_VENDOR_ALTEON,
1789 PCI_PRODUCT_ALTEON_BCM5701,
1790 "Broadcom BCM5701 Gigabit Ethernet" },
1791
1792 { PCI_VENDOR_ALTIMA,
1793 PCI_PRODUCT_ALTIMA_AC1000,
1794 "Altima AC1000 Gigabit Ethernet" },
1795 { PCI_VENDOR_ALTIMA,
1796 PCI_PRODUCT_ALTIMA_AC1001,
1797 "Altima AC1001 Gigabit Ethernet" },
1798 { PCI_VENDOR_ALTIMA,
1799 PCI_PRODUCT_ALTIMA_AC9100,
1800 "Altima AC9100 Gigabit Ethernet" },
1801
1802 { PCI_VENDOR_BROADCOM,
1803 PCI_PRODUCT_BROADCOM_BCM5700,
1804 "Broadcom BCM5700 Gigabit Ethernet" },
1805 { PCI_VENDOR_BROADCOM,
1806 PCI_PRODUCT_BROADCOM_BCM5701,
1807 "Broadcom BCM5701 Gigabit Ethernet" },
1808 { PCI_VENDOR_BROADCOM,
1809 PCI_PRODUCT_BROADCOM_BCM5702,
1810 "Broadcom BCM5702 Gigabit Ethernet" },
1811 { PCI_VENDOR_BROADCOM,
1812 PCI_PRODUCT_BROADCOM_BCM5702X,
1813 "Broadcom BCM5702X Gigabit Ethernet" },
1814 { PCI_VENDOR_BROADCOM,
1815 PCI_PRODUCT_BROADCOM_BCM5703,
1816 "Broadcom BCM5703 Gigabit Ethernet" },
1817 { PCI_VENDOR_BROADCOM,
1818 PCI_PRODUCT_BROADCOM_BCM5703X,
1819 "Broadcom BCM5703X Gigabit Ethernet" },
1820 { PCI_VENDOR_BROADCOM,
1821 PCI_PRODUCT_BROADCOM_BCM5704C,
1822 "Broadcom BCM5704C Dual Gigabit Ethernet" },
1823 { PCI_VENDOR_BROADCOM,
1824 PCI_PRODUCT_BROADCOM_BCM5704S,
1825 "Broadcom BCM5704S Dual Gigabit Ethernet" },
1826 { PCI_VENDOR_BROADCOM,
1827 PCI_PRODUCT_BROADCOM_BCM5705M,
1828 "Broadcom BCM5705M Gigabit Ethernet" },
1829
1830 { PCI_VENDOR_SCHNEIDERKOCH,
1831 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
1832 "SysKonnect SK-9Dx1 Gigabit Ethernet" },
1833
1834 { PCI_VENDOR_3COM,
1835 PCI_PRODUCT_3COM_3C996,
1836 "3Com 3c996 Gigabit Ethernet" },
1837
1838 { 0,
1839 0,
1840 NULL },
1841 };
1842
1843 static const struct bge_product *
1844 bge_lookup(const struct pci_attach_args *pa)
1845 {
1846 const struct bge_product *bp;
1847
1848 for (bp = bge_products; bp->bp_name != NULL; bp++) {
1849 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
1850 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
1851 return (bp);
1852 }
1853
1854 return (NULL);
1855 }
1856
1857 int
1858 bge_setpowerstate(sc, powerlevel)
1859 struct bge_softc *sc;
1860 int powerlevel;
1861 {
1862 #ifdef NOTYET
1863 u_int32_t pm_ctl = 0;
1864
1865 /* XXX FIXME: make sure indirect accesses enabled? */
1866 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
1867 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
1868 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
1869
1870 /* clear the PME_assert bit and power state bits, enable PME */
1871 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
1872 pm_ctl &= ~PCIM_PSTAT_DMASK;
1873 pm_ctl |= (1 << 8);
1874
1875 if (powerlevel == 0) {
1876 pm_ctl |= PCIM_PSTAT_D0;
1877 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
1878 pm_ctl, 2);
1879 DELAY(10000);
1880 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1881 DELAY(10000);
1882
1883 #ifdef NOTYET
1884 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
1885 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
1886 #endif
1887 DELAY(40); DELAY(40); DELAY(40);
1888 DELAY(10000); /* above not quite adequate on 5700 */
1889 return 0;
1890 }
1891
1892
1893 /*
1894 * Entering ACPI power states D1-D3 is achieved by wiggling
1895 * GMII gpio pins. Example code assumes all hardware vendors
1896 * followed Broadom's sample pcb layout. Until we verify that
1897 * for all supported OEM cards, states D1-D3 are unsupported.
1898 */
1899 printf("%s: power state %d unimplemented; check GPIO pins\n",
1900 sc->bge_dev.dv_xname, powerlevel);
1901 #endif
1902 return EOPNOTSUPP;
1903 }
1904
1905
1906 /*
1907 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1908 * against our list and return its name if we find a match. Note
1909 * that since the Broadcom controller contains VPD support, we
1910 * can get the device name string from the controller itself instead
1911 * of the compiled-in string. This is a little slow, but it guarantees
1912 * we'll always announce the right product name.
1913 */
1914 int
1915 bge_probe(parent, match, aux)
1916 struct device *parent;
1917 struct cfdata *match;
1918 void *aux;
1919 {
1920 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1921
1922 if (bge_lookup(pa) != NULL)
1923 return (1);
1924
1925 return (0);
1926 }
1927
1928 void
1929 bge_attach(parent, self, aux)
1930 struct device *parent, *self;
1931 void *aux;
1932 {
1933 struct bge_softc *sc = (struct bge_softc *)self;
1934 struct pci_attach_args *pa = aux;
1935 const struct bge_product *bp;
1936 const struct bge_revision *br;
1937 pci_chipset_tag_t pc = pa->pa_pc;
1938 pci_intr_handle_t ih;
1939 const char *intrstr = NULL;
1940 bus_dma_segment_t seg;
1941 int rseg;
1942 u_int32_t hwcfg = 0;
1943 u_int32_t mac_addr = 0;
1944 u_int32_t command;
1945 struct ifnet *ifp;
1946 caddr_t kva;
1947 u_char eaddr[ETHER_ADDR_LEN];
1948 pcireg_t memtype;
1949 bus_addr_t memaddr;
1950 bus_size_t memsize;
1951 u_int32_t pm_ctl;
1952
1953 bp = bge_lookup(pa);
1954 KASSERT(bp != NULL);
1955
1956 sc->bge_pa = *pa;
1957
1958 aprint_naive(": Ethernet controller\n");
1959 aprint_normal(": %s\n", bp->bp_name);
1960
1961 /*
1962 * Map control/status registers.
1963 */
1964 DPRINTFN(5, ("Map control/status regs\n"));
1965 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1966 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1967 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1968 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1969
1970 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1971 aprint_error("%s: failed to enable memory mapping!\n",
1972 sc->bge_dev.dv_xname);
1973 return;
1974 }
1975
1976 DPRINTFN(5, ("pci_mem_find\n"));
1977 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1978 switch (memtype) {
1979 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1980 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1981 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
1982 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
1983 &memaddr, &memsize) == 0)
1984 break;
1985 default:
1986 aprint_error("%s: can't find mem space\n",
1987 sc->bge_dev.dv_xname);
1988 return;
1989 }
1990
1991 DPRINTFN(5, ("pci_intr_map\n"));
1992 if (pci_intr_map(pa, &ih)) {
1993 aprint_error("%s: couldn't map interrupt\n",
1994 sc->bge_dev.dv_xname);
1995 return;
1996 }
1997
1998 DPRINTFN(5, ("pci_intr_string\n"));
1999 intrstr = pci_intr_string(pc, ih);
2000
2001 DPRINTFN(5, ("pci_intr_establish\n"));
2002 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2003
2004 if (sc->bge_intrhand == NULL) {
2005 aprint_error("%s: couldn't establish interrupt",
2006 sc->bge_dev.dv_xname);
2007 if (intrstr != NULL)
2008 aprint_normal(" at %s", intrstr);
2009 aprint_normal("\n");
2010 return;
2011 }
2012 aprint_normal("%s: interrupting at %s\n",
2013 sc->bge_dev.dv_xname, intrstr);
2014
2015 /*
2016 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2017 * can clobber the chip's PCI config-space power control registers,
2018 * leaving the card in D3 powersave state.
2019 * We do not have memory-mapped registers in this state,
2020 * so force device into D0 state before starting initialization.
2021 */
2022 pm_ctl = pci_conf_read(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD);
2023 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2024 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2025 pci_conf_write(pc, pa->pa_tag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2026 DELAY(1000); /* 27 usec is allegedly sufficent */
2027
2028 /* Try to reset the chip. */
2029 DPRINTFN(5, ("bge_reset\n"));
2030 bge_reset(sc);
2031
2032 if (bge_chipinit(sc)) {
2033 aprint_error("%s: chip initialization failed\n",
2034 sc->bge_dev.dv_xname);
2035 bge_release_resources(sc);
2036 return;
2037 }
2038
2039 /*
2040 * Get station address from the EEPROM.
2041 */
2042 mac_addr = bge_readmem_ind(sc, 0x0c14);
2043 if ((mac_addr >> 16) == 0x484b) {
2044 eaddr[0] = (u_char)(mac_addr >> 8);
2045 eaddr[1] = (u_char)(mac_addr >> 0);
2046 mac_addr = bge_readmem_ind(sc, 0x0c18);
2047 eaddr[2] = (u_char)(mac_addr >> 24);
2048 eaddr[3] = (u_char)(mac_addr >> 16);
2049 eaddr[4] = (u_char)(mac_addr >> 8);
2050 eaddr[5] = (u_char)(mac_addr >> 0);
2051 } else if (bge_read_eeprom(sc, (caddr_t)eaddr,
2052 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
2053 aprint_error("%s: failed to read station address\n",
2054 sc->bge_dev.dv_xname);
2055 bge_release_resources(sc);
2056 return;
2057 }
2058
2059 /*
2060 * Save ASIC rev. Look up any quirks associated with this
2061 * ASIC.
2062 */
2063 sc->bge_asicrev =
2064 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
2065 BGE_PCIMISCCTL_ASICREV;
2066 br = bge_lookup_rev(sc->bge_asicrev);
2067
2068 aprint_normal("%s: ", sc->bge_dev.dv_xname);
2069 if (br == NULL) {
2070 aprint_normal("unknown ASIC 0x%08x", sc->bge_asicrev);
2071 sc->bge_quirks = 0;
2072 } else {
2073 aprint_normal("ASIC %s", br->br_name);
2074 sc->bge_quirks = br->br_quirks;
2075 }
2076 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2077
2078 /* Allocate the general information block and ring buffers. */
2079 if (pci_dma64_available(pa))
2080 sc->bge_dmatag = pa->pa_dmat64;
2081 else
2082 sc->bge_dmatag = pa->pa_dmat;
2083 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2084 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2085 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2086 aprint_error("%s: can't alloc rx buffers\n",
2087 sc->bge_dev.dv_xname);
2088 return;
2089 }
2090 DPRINTFN(5, ("bus_dmamem_map\n"));
2091 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2092 sizeof(struct bge_ring_data), &kva,
2093 BUS_DMA_NOWAIT)) {
2094 aprint_error("%s: can't map DMA buffers (%d bytes)\n",
2095 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
2096 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2097 return;
2098 }
2099 DPRINTFN(5, ("bus_dmamem_create\n"));
2100 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2101 sizeof(struct bge_ring_data), 0,
2102 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2103 aprint_error("%s: can't create DMA map\n",
2104 sc->bge_dev.dv_xname);
2105 bus_dmamem_unmap(sc->bge_dmatag, kva,
2106 sizeof(struct bge_ring_data));
2107 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2108 return;
2109 }
2110 DPRINTFN(5, ("bus_dmamem_load\n"));
2111 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2112 sizeof(struct bge_ring_data), NULL,
2113 BUS_DMA_NOWAIT)) {
2114 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2115 bus_dmamem_unmap(sc->bge_dmatag, kva,
2116 sizeof(struct bge_ring_data));
2117 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2118 return;
2119 }
2120
2121 DPRINTFN(5, ("bzero\n"));
2122 sc->bge_rdata = (struct bge_ring_data *)kva;
2123
2124 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2125
2126 /* Try to allocate memory for jumbo buffers. */
2127 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2128 if (bge_alloc_jumbo_mem(sc)) {
2129 aprint_error("%s: jumbo buffer allocation failed\n",
2130 sc->bge_dev.dv_xname);
2131 } else
2132 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2133 }
2134
2135 /* Set default tuneable values. */
2136 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2137 sc->bge_rx_coal_ticks = 150;
2138 sc->bge_rx_max_coal_bds = 64;
2139 #ifdef ORIG_WPAUL_VALUES
2140 sc->bge_tx_coal_ticks = 150;
2141 sc->bge_tx_max_coal_bds = 128;
2142 #else
2143 sc->bge_tx_coal_ticks = 300;
2144 sc->bge_tx_max_coal_bds = 400;
2145 #endif
2146
2147 /* Set up ifnet structure */
2148 ifp = &sc->ethercom.ec_if;
2149 ifp->if_softc = sc;
2150 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2151 ifp->if_ioctl = bge_ioctl;
2152 ifp->if_start = bge_start;
2153 ifp->if_init = bge_init;
2154 ifp->if_watchdog = bge_watchdog;
2155 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2156 IFQ_SET_READY(&ifp->if_snd);
2157 DPRINTFN(5, ("bcopy\n"));
2158 strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
2159
2160 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
2161 sc->ethercom.ec_if.if_capabilities |=
2162 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
2163 sc->ethercom.ec_capabilities |=
2164 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2165
2166 /*
2167 * Do MII setup.
2168 */
2169 DPRINTFN(5, ("mii setup\n"));
2170 sc->bge_mii.mii_ifp = ifp;
2171 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2172 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2173 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2174
2175 /*
2176 * Figure out what sort of media we have by checking the
2177 * hardware config word in the first 32k of NIC internal memory,
2178 * or fall back to the config word in the EEPROM. Note: on some BCM5700
2179 * cards, this value appears to be unset. If that's the
2180 * case, we have to rely on identifying the NIC by its PCI
2181 * subsystem ID, as we do below for the SysKonnect SK-9D41.
2182 */
2183 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2184 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2185 } else {
2186 bge_read_eeprom(sc, (caddr_t)&hwcfg,
2187 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2188 hwcfg = be32toh(hwcfg);
2189 }
2190 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2191 sc->bge_tbi = 1;
2192
2193 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2194 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
2195 SK_SUBSYSID_9D41)
2196 sc->bge_tbi = 1;
2197
2198 if (sc->bge_tbi) {
2199 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2200 bge_ifmedia_sts);
2201 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2202 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2203 0, NULL);
2204 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2205 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2206 } else {
2207 /*
2208 * Do transceiver setup.
2209 */
2210 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2211 bge_ifmedia_sts);
2212 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
2213 MII_PHY_ANY, MII_OFFSET_ANY, 0);
2214
2215 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
2216 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
2217 ifmedia_add(&sc->bge_mii.mii_media,
2218 IFM_ETHER|IFM_MANUAL, 0, NULL);
2219 ifmedia_set(&sc->bge_mii.mii_media,
2220 IFM_ETHER|IFM_MANUAL);
2221 } else
2222 ifmedia_set(&sc->bge_mii.mii_media,
2223 IFM_ETHER|IFM_AUTO);
2224 }
2225
2226 /*
2227 * When using the BCM5701 in PCI-X mode, data corruption has
2228 * been observed in the first few bytes of some received packets.
2229 * Aligning the packet buffer in memory eliminates the corruption.
2230 * Unfortunately, this misaligns the packet payloads. On platforms
2231 * which do not support unaligned accesses, we will realign the
2232 * payloads by copying the received packets.
2233 */
2234 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
2235 /* If in PCI-X mode, work around the alignment bug. */
2236 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) &
2237 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2238 BGE_PCISTATE_PCI_BUSSPEED)
2239 sc->bge_rx_alignment_bug = 1;
2240 }
2241
2242 /*
2243 * Call MI attach routine.
2244 */
2245 DPRINTFN(5, ("if_attach\n"));
2246 if_attach(ifp);
2247 DPRINTFN(5, ("ether_ifattach\n"));
2248 ether_ifattach(ifp, eaddr);
2249 DPRINTFN(5, ("callout_init\n"));
2250 callout_init(&sc->bge_timeout);
2251 }
2252
2253 void
2254 bge_release_resources(sc)
2255 struct bge_softc *sc;
2256 {
2257 if (sc->bge_vpd_prodname != NULL)
2258 free(sc->bge_vpd_prodname, M_DEVBUF);
2259
2260 if (sc->bge_vpd_readonly != NULL)
2261 free(sc->bge_vpd_readonly, M_DEVBUF);
2262 }
2263
2264 void
2265 bge_reset(sc)
2266 struct bge_softc *sc;
2267 {
2268 struct pci_attach_args *pa = &sc->bge_pa;
2269 u_int32_t cachesize, command, pcistate;
2270 int i, val = 0;
2271
2272 /* Save some important PCI state. */
2273 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
2274 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
2275 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
2276
2277 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2278 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2279 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2280
2281 /* Issue global reset */
2282 bge_writereg_ind(sc, BGE_MISC_CFG,
2283 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
2284
2285 DELAY(1000);
2286
2287 /* Reset some of the PCI state that got zapped by reset */
2288 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
2289 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2290 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2291 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
2292 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
2293 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
2294
2295 /* Enable memory arbiter. */
2296 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2297 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2298 }
2299
2300 /*
2301 * Prevent PXE restart: write a magic number to the
2302 * general communications memory at 0xB50.
2303 */
2304 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
2305
2306 /*
2307 * Poll the value location we just wrote until
2308 * we see the 1's complement of the magic number.
2309 * This indicates that the firmware initialization
2310 * is complete.
2311 */
2312 for (i = 0; i < 750; i++) {
2313 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
2314 if (val == ~BGE_MAGIC_NUMBER)
2315 break;
2316 DELAY(1000);
2317 }
2318
2319 if (i == 750) {
2320 printf("%s: firmware handshake timed out, val = %x\n",
2321 sc->bge_dev.dv_xname, val);
2322 return;
2323 }
2324
2325 /*
2326 * XXX Wait for the value of the PCISTATE register to
2327 * return to its original pre-reset state. This is a
2328 * fairly good indicator of reset completion. If we don't
2329 * wait for the reset to fully complete, trying to read
2330 * from the device's non-PCI registers may yield garbage
2331 * results.
2332 */
2333 for (i = 0; i < BGE_TIMEOUT; i++) {
2334 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) ==
2335 pcistate)
2336 break;
2337 DELAY(10);
2338 }
2339
2340 /* Enable memory arbiter. */
2341 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2342 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2343 }
2344
2345 /* Fix up byte swapping */
2346 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
2347
2348 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
2349
2350 DELAY(10000);
2351 }
2352
2353 /*
2354 * Frame reception handling. This is called if there's a frame
2355 * on the receive return list.
2356 *
2357 * Note: we have to be able to handle two possibilities here:
2358 * 1) the frame is from the jumbo recieve ring
2359 * 2) the frame is from the standard receive ring
2360 */
2361
2362 void
2363 bge_rxeof(sc)
2364 struct bge_softc *sc;
2365 {
2366 struct ifnet *ifp;
2367 int stdcnt = 0, jumbocnt = 0;
2368 int have_tag = 0;
2369 u_int16_t vlan_tag = 0;
2370 bus_dmamap_t dmamap;
2371 bus_addr_t offset, toff;
2372 bus_size_t tlen;
2373 int tosync;
2374
2375 ifp = &sc->ethercom.ec_if;
2376
2377 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2378 offsetof(struct bge_ring_data, bge_status_block),
2379 sizeof (struct bge_status_block),
2380 BUS_DMASYNC_POSTREAD);
2381
2382 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
2383 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
2384 sc->bge_rx_saved_considx;
2385
2386 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
2387
2388 if (tosync < 0) {
2389 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
2390 sizeof (struct bge_rx_bd);
2391 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2392 toff, tlen, BUS_DMASYNC_POSTREAD);
2393 tosync = -tosync;
2394 }
2395
2396 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2397 offset, tosync * sizeof (struct bge_rx_bd),
2398 BUS_DMASYNC_POSTREAD);
2399
2400 while(sc->bge_rx_saved_considx !=
2401 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
2402 struct bge_rx_bd *cur_rx;
2403 u_int32_t rxidx;
2404 struct mbuf *m = NULL;
2405
2406 cur_rx = &sc->bge_rdata->
2407 bge_rx_return_ring[sc->bge_rx_saved_considx];
2408
2409 rxidx = cur_rx->bge_idx;
2410 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
2411
2412 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2413 have_tag = 1;
2414 vlan_tag = cur_rx->bge_vlan_tag;
2415 }
2416
2417 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2418 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2419 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2420 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2421 jumbocnt++;
2422 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2423 ifp->if_ierrors++;
2424 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2425 continue;
2426 }
2427 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2428 NULL)== ENOBUFS) {
2429 ifp->if_ierrors++;
2430 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2431 continue;
2432 }
2433 } else {
2434 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2435 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2436 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2437 stdcnt++;
2438 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2439 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2440 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2441 ifp->if_ierrors++;
2442 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2443 continue;
2444 }
2445 if (bge_newbuf_std(sc, sc->bge_std,
2446 NULL, dmamap) == ENOBUFS) {
2447 ifp->if_ierrors++;
2448 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2449 continue;
2450 }
2451 }
2452
2453 ifp->if_ipackets++;
2454 #ifndef __NO_STRICT_ALIGNMENT
2455 /*
2456 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
2457 * the Rx buffer has the layer-2 header unaligned.
2458 * If our CPU requires alignment, re-align by copying.
2459 */
2460 if (sc->bge_rx_alignment_bug) {
2461 memmove(mtod(m, caddr_t) + ETHER_ALIGN, m->m_data,
2462 cur_rx->bge_len);
2463 m->m_data += ETHER_ALIGN;
2464 }
2465 #endif
2466
2467 m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
2468 m->m_pkthdr.rcvif = ifp;
2469
2470 #if NBPFILTER > 0
2471 /*
2472 * Handle BPF listeners. Let the BPF user see the packet.
2473 */
2474 if (ifp->if_bpf)
2475 bpf_mtap(ifp->if_bpf, m);
2476 #endif
2477
2478 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2479
2480 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
2481 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2482 /*
2483 * Rx transport checksum-offload may also
2484 * have bugs with packets which, when transmitted,
2485 * were `runts' requiring padding.
2486 */
2487 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
2488 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
2489 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
2490 m->m_pkthdr.csum_data =
2491 cur_rx->bge_tcp_udp_csum;
2492 m->m_pkthdr.csum_flags |=
2493 (M_CSUM_TCPv4|M_CSUM_UDPv4|
2494 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
2495 }
2496
2497 /*
2498 * If we received a packet with a vlan tag, pass it
2499 * to vlan_input() instead of ether_input().
2500 */
2501 if (have_tag) {
2502 struct m_tag *mtag;
2503
2504 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2505 M_NOWAIT);
2506 if (mtag != NULL) {
2507 *(u_int *)(mtag + 1) = vlan_tag;
2508 m_tag_prepend(m, mtag);
2509 have_tag = vlan_tag = 0;
2510 } else {
2511 printf("%s: no mbuf for tag\n", ifp->if_xname);
2512 m_freem(m);
2513 have_tag = vlan_tag = 0;
2514 continue;
2515 }
2516 }
2517 (*ifp->if_input)(ifp, m);
2518 }
2519
2520 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2521 if (stdcnt)
2522 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2523 if (jumbocnt)
2524 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2525 }
2526
2527 void
2528 bge_txeof(sc)
2529 struct bge_softc *sc;
2530 {
2531 struct bge_tx_bd *cur_tx = NULL;
2532 struct ifnet *ifp;
2533 struct txdmamap_pool_entry *dma;
2534 bus_addr_t offset, toff;
2535 bus_size_t tlen;
2536 int tosync;
2537 struct mbuf *m;
2538
2539 ifp = &sc->ethercom.ec_if;
2540
2541 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2542 offsetof(struct bge_ring_data, bge_status_block),
2543 sizeof (struct bge_status_block),
2544 BUS_DMASYNC_POSTREAD);
2545
2546 offset = offsetof(struct bge_ring_data, bge_tx_ring);
2547 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2548 sc->bge_tx_saved_considx;
2549
2550 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2551
2552 if (tosync < 0) {
2553 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2554 sizeof (struct bge_tx_bd);
2555 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2556 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2557 tosync = -tosync;
2558 }
2559
2560 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2561 offset, tosync * sizeof (struct bge_tx_bd),
2562 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2563
2564 /*
2565 * Go through our tx ring and free mbufs for those
2566 * frames that have been sent.
2567 */
2568 while (sc->bge_tx_saved_considx !=
2569 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2570 u_int32_t idx = 0;
2571
2572 idx = sc->bge_tx_saved_considx;
2573 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2574 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2575 ifp->if_opackets++;
2576 m = sc->bge_cdata.bge_tx_chain[idx];
2577 if (m != NULL) {
2578 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2579 dma = sc->txdma[idx];
2580 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2581 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2582 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2583 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2584 sc->txdma[idx] = NULL;
2585
2586 m_freem(m);
2587 }
2588 sc->bge_txcnt--;
2589 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2590 ifp->if_timer = 0;
2591 }
2592
2593 if (cur_tx != NULL)
2594 ifp->if_flags &= ~IFF_OACTIVE;
2595 }
2596
2597 int
2598 bge_intr(xsc)
2599 void *xsc;
2600 {
2601 struct bge_softc *sc;
2602 struct ifnet *ifp;
2603
2604 sc = xsc;
2605 ifp = &sc->ethercom.ec_if;
2606
2607 #ifdef notdef
2608 /* Avoid this for now -- checking this register is expensive. */
2609 /* Make sure this is really our interrupt. */
2610 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2611 return (0);
2612 #endif
2613 /* Ack interrupt and stop others from occuring. */
2614 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2615
2616 /*
2617 * Process link state changes.
2618 * Grrr. The link status word in the status block does
2619 * not work correctly on the BCM5700 rev AX and BX chips,
2620 * according to all avaibable information. Hence, we have
2621 * to enable MII interrupts in order to properly obtain
2622 * async link changes. Unfortunately, this also means that
2623 * we have to read the MAC status register to detect link
2624 * changes, thereby adding an additional register access to
2625 * the interrupt handler.
2626 */
2627
2628 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
2629 u_int32_t status;
2630
2631 status = CSR_READ_4(sc, BGE_MAC_STS);
2632 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2633 sc->bge_link = 0;
2634 callout_stop(&sc->bge_timeout);
2635 bge_tick(sc);
2636 /* Clear the interrupt */
2637 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2638 BGE_EVTENB_MI_INTERRUPT);
2639 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
2640 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
2641 BRGPHY_INTRS);
2642 }
2643 } else {
2644 if (sc->bge_rdata->bge_status_block.bge_status &
2645 BGE_STATFLAG_LINKSTATE_CHANGED) {
2646 sc->bge_link = 0;
2647 callout_stop(&sc->bge_timeout);
2648 bge_tick(sc);
2649 /* Clear the interrupt */
2650 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2651 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
2652 BGE_MACSTAT_LINK_CHANGED);
2653 }
2654 }
2655
2656 if (ifp->if_flags & IFF_RUNNING) {
2657 /* Check RX return ring producer/consumer */
2658 bge_rxeof(sc);
2659
2660 /* Check TX ring producer/consumer */
2661 bge_txeof(sc);
2662 }
2663
2664 bge_handle_events(sc);
2665
2666 /* Re-enable interrupts. */
2667 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2668
2669 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2670 bge_start(ifp);
2671
2672 return (1);
2673 }
2674
2675 void
2676 bge_tick(xsc)
2677 void *xsc;
2678 {
2679 struct bge_softc *sc = xsc;
2680 struct mii_data *mii = &sc->bge_mii;
2681 struct ifmedia *ifm = NULL;
2682 struct ifnet *ifp = &sc->ethercom.ec_if;
2683 int s;
2684
2685 s = splnet();
2686
2687 bge_stats_update(sc);
2688 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2689 if (sc->bge_link) {
2690 splx(s);
2691 return;
2692 }
2693
2694 if (sc->bge_tbi) {
2695 ifm = &sc->bge_ifmedia;
2696 if (CSR_READ_4(sc, BGE_MAC_STS) &
2697 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2698 sc->bge_link++;
2699 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2700 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2701 bge_start(ifp);
2702 }
2703 splx(s);
2704 return;
2705 }
2706
2707 mii_tick(mii);
2708
2709 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2710 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2711 sc->bge_link++;
2712 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2713 bge_start(ifp);
2714 }
2715
2716 splx(s);
2717 }
2718
2719 void
2720 bge_stats_update(sc)
2721 struct bge_softc *sc;
2722 {
2723 struct ifnet *ifp = &sc->ethercom.ec_if;
2724 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2725 bus_size_t rstats = BGE_RX_STATS;
2726
2727 #define READ_RSTAT(sc, stats, stat) \
2728 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
2729
2730 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
2731 ifp->if_collisions +=
2732 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
2733 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
2734 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
2735 READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
2736 return;
2737 }
2738
2739 #undef READ_RSTAT
2740 #define READ_STAT(sc, stats, stat) \
2741 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2742
2743 ifp->if_collisions +=
2744 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
2745 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2746 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
2747 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
2748 ifp->if_collisions;
2749
2750 #undef READ_STAT
2751
2752 #ifdef notdef
2753 ifp->if_collisions +=
2754 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2755 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2756 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2757 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2758 ifp->if_collisions;
2759 #endif
2760 }
2761
2762 /*
2763 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
2764 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
2765 * but when such padded frames employ the bge IP/TCP checksum offload,
2766 * the hardware checksum assist gives incorrect results (possibly
2767 * from incorporating its own padding into the UDP/TCP checksum; who knows).
2768 * If we pad such runts with zeros, the onboard checksum comes out correct.
2769 */
2770 static __inline int
2771 bge_cksum_pad(struct mbuf *pkt)
2772 {
2773 struct mbuf *last = NULL;
2774 int padlen;
2775
2776 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
2777
2778 /* if there's only the packet-header and we can pad there, use it. */
2779 if (pkt->m_pkthdr.len == pkt->m_len &&
2780 !M_READONLY(pkt) && M_TRAILINGSPACE(pkt) >= padlen) {
2781 last = pkt;
2782 } else {
2783 /*
2784 * Walk packet chain to find last mbuf. We will either
2785 * pad there, or append a new mbuf and pad it
2786 * (thus perhaps avoiding the bcm5700 dma-min bug).
2787 */
2788 for (last = pkt; last->m_next != NULL; last = last->m_next) {
2789 (void) 0; /* do nothing*/
2790 }
2791
2792 /* `last' now points to last in chain. */
2793 if (!M_READONLY(last) && M_TRAILINGSPACE(last) >= padlen) {
2794 (void) 0; /* we can pad here, in-place. */
2795 } else {
2796 /* Allocate new empty mbuf, pad it. Compact later. */
2797 struct mbuf *n;
2798 MGET(n, M_DONTWAIT, MT_DATA);
2799 n->m_len = 0;
2800 last->m_next = n;
2801 last = n;
2802 }
2803 }
2804
2805 #ifdef DEBUG
2806 /*KASSERT(M_WRITABLE(last), ("to-pad mbuf not writeable\n"));*/
2807 KASSERT(M_TRAILINGSPACE(last) >= padlen /*, ("insufficient space to pad\n")*/ );
2808 #endif
2809 /* Now zero the pad area, to avoid the bge cksum-assist bug */
2810 memset(mtod(last, caddr_t) + last->m_len, 0, padlen);
2811 last->m_len += padlen;
2812 pkt->m_pkthdr.len += padlen;
2813 return 0;
2814 }
2815
2816 /*
2817 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
2818 */
2819 static __inline int
2820 bge_compact_dma_runt(struct mbuf *pkt)
2821 {
2822 struct mbuf *m, *prev;
2823 int totlen, prevlen;
2824
2825 prev = NULL;
2826 totlen = 0;
2827 prevlen = -1;
2828
2829 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
2830 int mlen = m->m_len;
2831 int shortfall = 8 - mlen ;
2832
2833 totlen += mlen;
2834 if (mlen == 0) {
2835 continue;
2836 }
2837 if (mlen >= 8)
2838 continue;
2839
2840 /* If we get here, mbuf data is too small for DMA engine.
2841 * Try to fix by shuffling data to prev or next in chain.
2842 * If that fails, do a compacting deep-copy of the whole chain.
2843 */
2844
2845 /* Internal frag. If fits in prev, copy it there. */
2846 if (prev && !M_READONLY(prev) &&
2847 M_TRAILINGSPACE(prev) >= m->m_len) {
2848 bcopy(m->m_data,
2849 prev->m_data+prev->m_len,
2850 mlen);
2851 prev->m_len += mlen;
2852 m->m_len = 0;
2853 /* XXX stitch chain */
2854 prev->m_next = m_free(m);
2855 m = prev;
2856 continue;
2857 }
2858 else if (m->m_next != NULL && !M_READONLY(m) &&
2859 M_TRAILINGSPACE(m) >= shortfall &&
2860 m->m_next->m_len >= (8 + shortfall)) {
2861 /* m is writable and have enough data in next, pull up. */
2862
2863 bcopy(m->m_next->m_data,
2864 m->m_data+m->m_len,
2865 shortfall);
2866 m->m_len += shortfall;
2867 m->m_next->m_len -= shortfall;
2868 m->m_next->m_data += shortfall;
2869 }
2870 else if (m->m_next == NULL || 1) {
2871 /* Got a runt at the very end of the packet.
2872 * borrow data from the tail of the preceding mbuf and
2873 * update its length in-place. (The original data is still
2874 * valid, so we can do this even if prev is not writable.)
2875 */
2876
2877 /* if we'd make prev a runt, just move all of its data. */
2878 #ifdef DEBUG
2879 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
2880 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
2881 #endif
2882 if ((prev->m_len - shortfall) < 8)
2883 shortfall = prev->m_len;
2884
2885 #ifdef notyet /* just do the safe slow thing for now */
2886 if (!M_READONLY(m)) {
2887 if (M_LEADINGSPACE(m) < shorfall) {
2888 void *m_dat;
2889 m_dat = (m->m_flags & M_PKTHDR) ?
2890 m->m_pktdat : m->dat;
2891 memmove(m_dat, mtod(m, void*), m->m_len);
2892 m->m_data = m_dat;
2893 }
2894 } else
2895 #endif /* just do the safe slow thing */
2896 {
2897 struct mbuf * n = NULL;
2898 int newprevlen = prev->m_len - shortfall;
2899
2900 MGET(n, M_NOWAIT, MT_DATA);
2901 if (n == NULL)
2902 return ENOBUFS;
2903 KASSERT(m->m_len + shortfall < MLEN
2904 /*,
2905 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
2906
2907 /* first copy the data we're stealing from prev */
2908 bcopy(prev->m_data + newprevlen, n->m_data, shortfall);
2909
2910 /* update prev->m_len accordingly */
2911 prev->m_len -= shortfall;
2912
2913 /* copy data from runt m */
2914 bcopy(m->m_data, n->m_data + shortfall, m->m_len);
2915
2916 /* n holds what we stole from prev, plus m */
2917 n->m_len = shortfall + m->m_len;
2918
2919 /* stitch n into chain and free m */
2920 n->m_next = m->m_next;
2921 prev->m_next = n;
2922 /* KASSERT(m->m_next == NULL); */
2923 m->m_next = NULL;
2924 m_free(m);
2925 m = n; /* for continuing loop */
2926 }
2927 }
2928 prevlen = m->m_len;
2929 }
2930 return 0;
2931 }
2932
2933 /*
2934 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2935 * pointers to descriptors.
2936 */
2937 int
2938 bge_encap(sc, m_head, txidx)
2939 struct bge_softc *sc;
2940 struct mbuf *m_head;
2941 u_int32_t *txidx;
2942 {
2943 struct bge_tx_bd *f = NULL;
2944 u_int32_t frag, cur, cnt = 0;
2945 u_int16_t csum_flags = 0;
2946 struct txdmamap_pool_entry *dma;
2947 bus_dmamap_t dmamap;
2948 int i = 0;
2949 struct m_tag *mtag;
2950
2951 cur = frag = *txidx;
2952
2953 if (m_head->m_pkthdr.csum_flags) {
2954 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
2955 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2956 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
2957 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2958 }
2959
2960 /*
2961 * If we were asked to do an outboard checksum, and the NIC
2962 * has the bug where it sometimes adds in the Ethernet padding,
2963 * explicitly pad with zeros so the cksum will be correct either way.
2964 * (For now, do this for all chip versions, until newer
2965 * are confirmed to not require the workaround.)
2966 */
2967 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
2968 #ifdef notyet
2969 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
2970 #endif
2971 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
2972 goto check_dma_bug;
2973
2974 if (bge_cksum_pad(m_head) != 0)
2975 return ENOBUFS;
2976
2977 check_dma_bug:
2978 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
2979 goto doit;
2980 /*
2981 * bcm5700 Revision B silicon cannot handle DMA descriptors with
2982 * less than eight bytes. If we encounter a teeny mbuf
2983 * at the end of a chain, we can pad. Otherwise, copy.
2984 */
2985 if (bge_compact_dma_runt(m_head) != 0)
2986 return ENOBUFS;
2987
2988 doit:
2989 dma = SLIST_FIRST(&sc->txdma_list);
2990 if (dma == NULL)
2991 return ENOBUFS;
2992 dmamap = dma->dmamap;
2993
2994 /*
2995 * Start packing the mbufs in this chain into
2996 * the fragment pointers. Stop when we run out
2997 * of fragments or hit the end of the mbuf chain.
2998 */
2999 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3000 BUS_DMA_NOWAIT))
3001 return(ENOBUFS);
3002
3003 mtag = sc->ethercom.ec_nvlans ?
3004 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
3005
3006 for (i = 0; i < dmamap->dm_nsegs; i++) {
3007 f = &sc->bge_rdata->bge_tx_ring[frag];
3008 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3009 break;
3010 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
3011 f->bge_len = dmamap->dm_segs[i].ds_len;
3012 f->bge_flags = csum_flags;
3013
3014 if (mtag != NULL) {
3015 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3016 f->bge_vlan_tag = *(u_int *)(mtag + 1);
3017 } else {
3018 f->bge_vlan_tag = 0;
3019 }
3020 /*
3021 * Sanity check: avoid coming within 16 descriptors
3022 * of the end of the ring.
3023 */
3024 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
3025 return(ENOBUFS);
3026 cur = frag;
3027 BGE_INC(frag, BGE_TX_RING_CNT);
3028 cnt++;
3029 }
3030
3031 if (i < dmamap->dm_nsegs)
3032 return ENOBUFS;
3033
3034 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
3035 BUS_DMASYNC_PREWRITE);
3036
3037 if (frag == sc->bge_tx_saved_considx)
3038 return(ENOBUFS);
3039
3040 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
3041 sc->bge_cdata.bge_tx_chain[cur] = m_head;
3042 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
3043 sc->txdma[cur] = dma;
3044 sc->bge_txcnt += cnt;
3045
3046 *txidx = frag;
3047
3048 return(0);
3049 }
3050
3051 /*
3052 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
3053 * to the mbuf data regions directly in the transmit descriptors.
3054 */
3055 void
3056 bge_start(ifp)
3057 struct ifnet *ifp;
3058 {
3059 struct bge_softc *sc;
3060 struct mbuf *m_head = NULL;
3061 u_int32_t prodidx = 0;
3062 int pkts = 0;
3063
3064 sc = ifp->if_softc;
3065
3066 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
3067 return;
3068
3069 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
3070
3071 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
3072 IFQ_POLL(&ifp->if_snd, m_head);
3073 if (m_head == NULL)
3074 break;
3075
3076 #if 0
3077 /*
3078 * XXX
3079 * safety overkill. If this is a fragmented packet chain
3080 * with delayed TCP/UDP checksums, then only encapsulate
3081 * it if we have enough descriptors to handle the entire
3082 * chain at once.
3083 * (paranoia -- may not actually be needed)
3084 */
3085 if (m_head->m_flags & M_FIRSTFRAG &&
3086 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
3087 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
3088 m_head->m_pkthdr.csum_data + 16) {
3089 ifp->if_flags |= IFF_OACTIVE;
3090 break;
3091 }
3092 }
3093 #endif
3094
3095 /*
3096 * Pack the data into the transmit ring. If we
3097 * don't have room, set the OACTIVE flag and wait
3098 * for the NIC to drain the ring.
3099 */
3100 if (bge_encap(sc, m_head, &prodidx)) {
3101 ifp->if_flags |= IFF_OACTIVE;
3102 break;
3103 }
3104
3105 /* now we are committed to transmit the packet */
3106 IFQ_DEQUEUE(&ifp->if_snd, m_head);
3107 pkts++;
3108
3109 #if NBPFILTER > 0
3110 /*
3111 * If there's a BPF listener, bounce a copy of this frame
3112 * to him.
3113 */
3114 if (ifp->if_bpf)
3115 bpf_mtap(ifp->if_bpf, m_head);
3116 #endif
3117 }
3118 if (pkts == 0)
3119 return;
3120
3121 /* Transmit */
3122 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3123 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
3124 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
3125
3126 /*
3127 * Set a timeout in case the chip goes out to lunch.
3128 */
3129 ifp->if_timer = 5;
3130 }
3131
3132 int
3133 bge_init(ifp)
3134 struct ifnet *ifp;
3135 {
3136 struct bge_softc *sc = ifp->if_softc;
3137 u_int16_t *m;
3138 int s, error;
3139
3140 s = splnet();
3141
3142 ifp = &sc->ethercom.ec_if;
3143
3144 /* Cancel pending I/O and flush buffers. */
3145 bge_stop(sc);
3146 bge_reset(sc);
3147 bge_chipinit(sc);
3148
3149 /*
3150 * Init the various state machines, ring
3151 * control blocks and firmware.
3152 */
3153 error = bge_blockinit(sc);
3154 if (error != 0) {
3155 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
3156 error);
3157 splx(s);
3158 return error;
3159 }
3160
3161 ifp = &sc->ethercom.ec_if;
3162
3163 /* Specify MTU. */
3164 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
3165 ETHER_HDR_LEN + ETHER_CRC_LEN);
3166
3167 /* Load our MAC address. */
3168 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
3169 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
3170 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
3171
3172 /* Enable or disable promiscuous mode as needed. */
3173 if (ifp->if_flags & IFF_PROMISC) {
3174 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3175 } else {
3176 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
3177 }
3178
3179 /* Program multicast filter. */
3180 bge_setmulti(sc);
3181
3182 /* Init RX ring. */
3183 bge_init_rx_ring_std(sc);
3184
3185 /* Init jumbo RX ring. */
3186 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
3187 bge_init_rx_ring_jumbo(sc);
3188
3189 /* Init our RX return ring index */
3190 sc->bge_rx_saved_considx = 0;
3191
3192 /* Init TX ring. */
3193 bge_init_tx_ring(sc);
3194
3195 /* Turn on transmitter */
3196 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
3197
3198 /* Turn on receiver */
3199 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3200
3201 /* Tell firmware we're alive. */
3202 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3203
3204 /* Enable host interrupts. */
3205 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
3206 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3207 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
3208
3209 bge_ifmedia_upd(ifp);
3210
3211 ifp->if_flags |= IFF_RUNNING;
3212 ifp->if_flags &= ~IFF_OACTIVE;
3213
3214 splx(s);
3215
3216 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3217
3218 return 0;
3219 }
3220
3221 /*
3222 * Set media options.
3223 */
3224 int
3225 bge_ifmedia_upd(ifp)
3226 struct ifnet *ifp;
3227 {
3228 struct bge_softc *sc = ifp->if_softc;
3229 struct mii_data *mii = &sc->bge_mii;
3230 struct ifmedia *ifm = &sc->bge_ifmedia;
3231
3232 /* If this is a 1000baseX NIC, enable the TBI port. */
3233 if (sc->bge_tbi) {
3234 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
3235 return(EINVAL);
3236 switch(IFM_SUBTYPE(ifm->ifm_media)) {
3237 case IFM_AUTO:
3238 break;
3239 case IFM_1000_SX:
3240 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
3241 BGE_CLRBIT(sc, BGE_MAC_MODE,
3242 BGE_MACMODE_HALF_DUPLEX);
3243 } else {
3244 BGE_SETBIT(sc, BGE_MAC_MODE,
3245 BGE_MACMODE_HALF_DUPLEX);
3246 }
3247 break;
3248 default:
3249 return(EINVAL);
3250 }
3251 return(0);
3252 }
3253
3254 sc->bge_link = 0;
3255 mii_mediachg(mii);
3256
3257 return(0);
3258 }
3259
3260 /*
3261 * Report current media status.
3262 */
3263 void
3264 bge_ifmedia_sts(ifp, ifmr)
3265 struct ifnet *ifp;
3266 struct ifmediareq *ifmr;
3267 {
3268 struct bge_softc *sc = ifp->if_softc;
3269 struct mii_data *mii = &sc->bge_mii;
3270
3271 if (sc->bge_tbi) {
3272 ifmr->ifm_status = IFM_AVALID;
3273 ifmr->ifm_active = IFM_ETHER;
3274 if (CSR_READ_4(sc, BGE_MAC_STS) &
3275 BGE_MACSTAT_TBI_PCS_SYNCHED)
3276 ifmr->ifm_status |= IFM_ACTIVE;
3277 ifmr->ifm_active |= IFM_1000_SX;
3278 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
3279 ifmr->ifm_active |= IFM_HDX;
3280 else
3281 ifmr->ifm_active |= IFM_FDX;
3282 return;
3283 }
3284
3285 mii_pollstat(mii);
3286 ifmr->ifm_active = mii->mii_media_active;
3287 ifmr->ifm_status = mii->mii_media_status;
3288 }
3289
3290 int
3291 bge_ioctl(ifp, command, data)
3292 struct ifnet *ifp;
3293 u_long command;
3294 caddr_t data;
3295 {
3296 struct bge_softc *sc = ifp->if_softc;
3297 struct ifreq *ifr = (struct ifreq *) data;
3298 int s, error = 0;
3299 struct mii_data *mii;
3300
3301 s = splnet();
3302
3303 switch(command) {
3304 case SIOCSIFFLAGS:
3305 if (ifp->if_flags & IFF_UP) {
3306 /*
3307 * If only the state of the PROMISC flag changed,
3308 * then just use the 'set promisc mode' command
3309 * instead of reinitializing the entire NIC. Doing
3310 * a full re-init means reloading the firmware and
3311 * waiting for it to start up, which may take a
3312 * second or two.
3313 */
3314 if (ifp->if_flags & IFF_RUNNING &&
3315 ifp->if_flags & IFF_PROMISC &&
3316 !(sc->bge_if_flags & IFF_PROMISC)) {
3317 BGE_SETBIT(sc, BGE_RX_MODE,
3318 BGE_RXMODE_RX_PROMISC);
3319 } else if (ifp->if_flags & IFF_RUNNING &&
3320 !(ifp->if_flags & IFF_PROMISC) &&
3321 sc->bge_if_flags & IFF_PROMISC) {
3322 BGE_CLRBIT(sc, BGE_RX_MODE,
3323 BGE_RXMODE_RX_PROMISC);
3324 } else
3325 bge_init(ifp);
3326 } else {
3327 if (ifp->if_flags & IFF_RUNNING) {
3328 bge_stop(sc);
3329 }
3330 }
3331 sc->bge_if_flags = ifp->if_flags;
3332 error = 0;
3333 break;
3334 case SIOCSIFMEDIA:
3335 case SIOCGIFMEDIA:
3336 if (sc->bge_tbi) {
3337 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
3338 command);
3339 } else {
3340 mii = &sc->bge_mii;
3341 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
3342 command);
3343 }
3344 error = 0;
3345 break;
3346 default:
3347 error = ether_ioctl(ifp, command, data);
3348 if (error == ENETRESET) {
3349 bge_setmulti(sc);
3350 error = 0;
3351 }
3352 break;
3353 }
3354
3355 splx(s);
3356
3357 return(error);
3358 }
3359
3360 void
3361 bge_watchdog(ifp)
3362 struct ifnet *ifp;
3363 {
3364 struct bge_softc *sc;
3365
3366 sc = ifp->if_softc;
3367
3368 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
3369
3370 ifp->if_flags &= ~IFF_RUNNING;
3371 bge_init(ifp);
3372
3373 ifp->if_oerrors++;
3374 }
3375
3376 static void
3377 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
3378 {
3379 int i;
3380
3381 BGE_CLRBIT(sc, reg, bit);
3382
3383 for (i = 0; i < BGE_TIMEOUT; i++) {
3384 if ((CSR_READ_4(sc, reg) & bit) == 0)
3385 return;
3386 delay(100);
3387 }
3388
3389 printf("%s: block failed to stop: reg 0x%lx, bit 0x%08x\n",
3390 sc->bge_dev.dv_xname, (u_long) reg, bit);
3391 }
3392
3393 /*
3394 * Stop the adapter and free any mbufs allocated to the
3395 * RX and TX lists.
3396 */
3397 void
3398 bge_stop(sc)
3399 struct bge_softc *sc;
3400 {
3401 struct ifnet *ifp = &sc->ethercom.ec_if;
3402
3403 callout_stop(&sc->bge_timeout);
3404
3405 /*
3406 * Disable all of the receiver blocks
3407 */
3408 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
3409 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
3410 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
3411 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3412 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
3413 }
3414 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
3415 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
3416 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
3417
3418 /*
3419 * Disable all of the transmit blocks
3420 */
3421 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
3422 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
3423 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
3424 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
3425 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
3426 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3427 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
3428 }
3429 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
3430
3431 /*
3432 * Shut down all of the memory managers and related
3433 * state machines.
3434 */
3435 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
3436 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
3437 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3438 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
3439 }
3440
3441 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
3442 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
3443
3444 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
3445 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
3446 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
3447 }
3448
3449 /* Disable host interrupts. */
3450 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
3451 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
3452
3453 /*
3454 * Tell firmware we're shutting down.
3455 */
3456 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
3457
3458 /* Free the RX lists. */
3459 bge_free_rx_ring_std(sc);
3460
3461 /* Free jumbo RX list. */
3462 bge_free_rx_ring_jumbo(sc);
3463
3464 /* Free TX buffers. */
3465 bge_free_tx_ring(sc);
3466
3467 /*
3468 * Isolate/power down the PHY.
3469 */
3470 if (!sc->bge_tbi)
3471 mii_down(&sc->bge_mii);
3472
3473 sc->bge_link = 0;
3474
3475 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
3476
3477 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3478 }
3479
3480 /*
3481 * Stop all chip I/O so that the kernel's probe routines don't
3482 * get confused by errant DMAs when rebooting.
3483 */
3484 void
3485 bge_shutdown(xsc)
3486 void *xsc;
3487 {
3488 struct bge_softc *sc = (struct bge_softc *)xsc;
3489
3490 bge_stop(sc);
3491 bge_reset(sc);
3492 }
3493