if_bge.c revision 1.10 1 /* $NetBSD: if_bge.c,v 1.10 2002/06/28 18:39:45 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for FreeBSD.
40 *
41 * Written by Bill Paul <wpaul (at) windriver.com>
42 * Senior Engineer, Wind River Systems
43 */
44
45 /*
46 * The Broadcom BCM5700 is based on technology originally developed by
47 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
48 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
49 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
50 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
51 * frames, highly configurable RX filtering, and 16 RX and TX queues
52 * (which, along with RX filter rules, can be used for QOS applications).
53 * Other features, such as TCP segmentation, may be available as part
54 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
55 * firmware images can be stored in hardware and need not be compiled
56 * into the driver.
57 *
58 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
59 * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus.
60 *
61 * The BCM5701 is a single-chip solution incorporating both the BCM5700
62 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5700
63 * does not support external SSRAM.
64 *
65 * Broadcom also produces a variation of the BCM5700 under the "Altima"
66 * brand name, which is functionally similar but lacks PCI-X support.
67 *
68 * Without external SSRAM, you can only have at most 4 TX rings,
69 * and the use of the mini RX ring is disabled. This seems to imply
70 * that these features are simply not available on the BCM5701. As a
71 * result, this driver does not implement any support for the mini RX
72 * ring.
73 */
74
75 #include "bpfilter.h"
76 #include "vlan.h"
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/callout.h>
81 #include <sys/sockio.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/kernel.h>
85 #include <sys/device.h>
86 #include <sys/socket.h>
87
88 #include <net/if.h>
89 #include <net/if_dl.h>
90 #include <net/if_media.h>
91 #include <net/if_ether.h>
92
93 #ifdef INET
94 #include <netinet/in.h>
95 #include <netinet/in_systm.h>
96 #include <netinet/in_var.h>
97 #include <netinet/ip.h>
98 #endif
99
100 #if NBPFILTER > 0
101 #include <net/bpf.h>
102 #endif
103
104 #include <dev/pci/pcireg.h>
105 #include <dev/pci/pcivar.h>
106 #include <dev/pci/pcidevs.h>
107
108 #include <dev/mii/mii.h>
109 #include <dev/mii/miivar.h>
110 #include <dev/mii/miidevs.h>
111 #include <dev/mii/brgphyreg.h>
112
113 #include <dev/pci/if_bgereg.h>
114
115 #include <uvm/uvm_extern.h>
116
117 /* #define BGE_CHECKSUM */
118
119 int bge_probe(struct device *, struct cfdata *, void *);
120 void bge_attach(struct device *, struct device *, void *);
121 void bge_release_resources(struct bge_softc *);
122 void bge_txeof(struct bge_softc *);
123 void bge_rxeof(struct bge_softc *);
124
125 void bge_tick(void *);
126 void bge_stats_update(struct bge_softc *);
127 int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
128
129 int bge_intr(void *);
130 void bge_start(struct ifnet *);
131 int bge_ioctl(struct ifnet *, u_long, caddr_t);
132 int bge_init(struct ifnet *);
133 void bge_stop(struct bge_softc *);
134 void bge_watchdog(struct ifnet *);
135 void bge_shutdown(void *);
136 int bge_ifmedia_upd(struct ifnet *);
137 void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
138
139 u_int8_t bge_eeprom_getbyte(struct bge_softc *, int, u_int8_t *);
140 int bge_read_eeprom(struct bge_softc *, caddr_t, int, int);
141
142 u_int32_t bge_crc(struct bge_softc *, caddr_t);
143 void bge_setmulti(struct bge_softc *);
144
145 void bge_handle_events(struct bge_softc *);
146 int bge_alloc_jumbo_mem(struct bge_softc *);
147 void bge_free_jumbo_mem(struct bge_softc *);
148 void *bge_jalloc(struct bge_softc *);
149 void bge_jfree(struct mbuf *, caddr_t, u_int, void *);
150 int bge_newbuf_std(struct bge_softc *, int, struct mbuf *, bus_dmamap_t);
151 int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
152 int bge_init_rx_ring_std(struct bge_softc *);
153 void bge_free_rx_ring_std(struct bge_softc *);
154 int bge_init_rx_ring_jumbo(struct bge_softc *);
155 void bge_free_rx_ring_jumbo(struct bge_softc *);
156 void bge_free_tx_ring(struct bge_softc *);
157 int bge_init_tx_ring(struct bge_softc *);
158
159 int bge_chipinit(struct bge_softc *);
160 int bge_blockinit(struct bge_softc *);
161
162 #ifdef notdef
163 u_int8_t bge_vpd_readbyte(struct bge_softc *, int);
164 void bge_vpd_read_res(struct bge_softc *, struct vpd_res *, int);
165 void bge_vpd_read(struct bge_softc *);
166 #endif
167
168 u_int32_t bge_readmem_ind(struct bge_softc *, int);
169 void bge_writemem_ind(struct bge_softc *, int, int);
170 #ifdef notdef
171 u_int32_t bge_readreg_ind(struct bge_softc *, int);
172 #endif
173 void bge_writereg_ind(struct bge_softc *, int, int);
174
175 int bge_miibus_readreg(struct device *, int, int);
176 void bge_miibus_writereg(struct device *, int, int, int);
177 void bge_miibus_statchg(struct device *);
178
179 void bge_reset(struct bge_softc *);
180 void bge_phy_hack(struct bge_softc *);
181
182 void bge_dump_status(struct bge_softc *);
183 void bge_dump_rxbd(struct bge_rx_bd *);
184
185 #define BGE_DEBUG
186 #ifdef BGE_DEBUG
187 #define DPRINTF(x) if (bgedebug) printf x
188 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
189 int bgedebug = 0;
190 #else
191 #define DPRINTF(x)
192 #define DPRINTFN(n,x)
193 #endif
194
195 struct cfattach bge_ca = {
196 sizeof(struct bge_softc), bge_probe, bge_attach
197 };
198
199 u_int32_t
200 bge_readmem_ind(sc, off)
201 struct bge_softc *sc;
202 int off;
203 {
204 struct pci_attach_args *pa = &(sc->bge_pa);
205 pcireg_t val;
206
207 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
208 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA);
209 return val;
210 }
211
212 void
213 bge_writemem_ind(sc, off, val)
214 struct bge_softc *sc;
215 int off, val;
216 {
217 struct pci_attach_args *pa = &(sc->bge_pa);
218
219 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_BASEADDR, off);
220 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MEMWIN_DATA, val);
221 }
222
223 #ifdef notdef
224 u_int32_t
225 bge_readreg_ind(sc, off)
226 struct bge_softc *sc;
227 int off;
228 {
229 struct pci_attach_args *pa = &(sc->bge_pa);
230
231 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
232 return(pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA));
233 }
234 #endif
235
236 void
237 bge_writereg_ind(sc, off, val)
238 struct bge_softc *sc;
239 int off, val;
240 {
241 struct pci_attach_args *pa = &(sc->bge_pa);
242
243 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_BASEADDR, off);
244 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_REG_DATA, val);
245 }
246
247 #ifdef notdef
248 u_int8_t
249 bge_vpd_readbyte(sc, addr)
250 struct bge_softc *sc;
251 int addr;
252 {
253 int i;
254 u_int32_t val;
255 struct pci_attach_args *pa = &(sc->bge_pa);
256
257 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR, addr);
258 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
259 DELAY(10);
260 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_ADDR) &
261 BGE_VPD_FLAG)
262 break;
263 }
264
265 if (i == BGE_TIMEOUT) {
266 printf("%s: VPD read timed out\n", sc->bge_dev.dv_xname);
267 return(0);
268 }
269
270 val = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_VPD_DATA);
271
272 return((val >> ((addr % 4) * 8)) & 0xFF);
273 }
274
275 void
276 bge_vpd_read_res(sc, res, addr)
277 struct bge_softc *sc;
278 struct vpd_res *res;
279 int addr;
280 {
281 int i;
282 u_int8_t *ptr;
283
284 ptr = (u_int8_t *)res;
285 for (i = 0; i < sizeof(struct vpd_res); i++)
286 ptr[i] = bge_vpd_readbyte(sc, i + addr);
287 }
288
289 void
290 bge_vpd_read(sc)
291 struct bge_softc *sc;
292 {
293 int pos = 0, i;
294 struct vpd_res res;
295
296 if (sc->bge_vpd_prodname != NULL)
297 free(sc->bge_vpd_prodname, M_DEVBUF);
298 if (sc->bge_vpd_readonly != NULL)
299 free(sc->bge_vpd_readonly, M_DEVBUF);
300 sc->bge_vpd_prodname = NULL;
301 sc->bge_vpd_readonly = NULL;
302
303 bge_vpd_read_res(sc, &res, pos);
304
305 if (res.vr_id != VPD_RES_ID) {
306 printf("%s: bad VPD resource id: expected %x got %x\n",
307 sc->bge_dev.dv_xname, VPD_RES_ID, res.vr_id);
308 return;
309 }
310
311 pos += sizeof(res);
312 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
313 if (sc->bge_vpd_prodname == NULL)
314 panic("bge_vpd_read");
315 for (i = 0; i < res.vr_len; i++)
316 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
317 sc->bge_vpd_prodname[i] = '\0';
318 pos += i;
319
320 bge_vpd_read_res(sc, &res, pos);
321
322 if (res.vr_id != VPD_RES_READ) {
323 printf("%s: bad VPD resource id: expected %x got %x\n",
324 sc->bge_dev.dv_xname, VPD_RES_READ, res.vr_id);
325 return;
326 }
327
328 pos += sizeof(res);
329 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
330 if (sc->bge_vpd_readonly == NULL)
331 panic("bge_vpd_read");
332 for (i = 0; i < res.vr_len + 1; i++)
333 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
334 }
335 #endif
336
337 /*
338 * Read a byte of data stored in the EEPROM at address 'addr.' The
339 * BCM570x supports both the traditional bitbang interface and an
340 * auto access interface for reading the EEPROM. We use the auto
341 * access method.
342 */
343 u_int8_t
344 bge_eeprom_getbyte(sc, addr, dest)
345 struct bge_softc *sc;
346 int addr;
347 u_int8_t *dest;
348 {
349 int i;
350 u_int32_t byte = 0;
351
352 /*
353 * Enable use of auto EEPROM access so we can avoid
354 * having to use the bitbang method.
355 */
356 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
357
358 /* Reset the EEPROM, load the clock period. */
359 CSR_WRITE_4(sc, BGE_EE_ADDR,
360 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
361 DELAY(20);
362
363 /* Issue the read EEPROM command. */
364 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
365
366 /* Wait for completion */
367 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
368 DELAY(10);
369 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
370 break;
371 }
372
373 if (i == BGE_TIMEOUT) {
374 printf("%s: eeprom read timed out\n", sc->bge_dev.dv_xname);
375 return(0);
376 }
377
378 /* Get result. */
379 byte = CSR_READ_4(sc, BGE_EE_DATA);
380
381 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
382
383 return(0);
384 }
385
386 /*
387 * Read a sequence of bytes from the EEPROM.
388 */
389 int
390 bge_read_eeprom(sc, dest, off, cnt)
391 struct bge_softc *sc;
392 caddr_t dest;
393 int off;
394 int cnt;
395 {
396 int err = 0, i;
397 u_int8_t byte = 0;
398
399 for (i = 0; i < cnt; i++) {
400 err = bge_eeprom_getbyte(sc, off + i, &byte);
401 if (err)
402 break;
403 *(dest + i) = byte;
404 }
405
406 return(err ? 1 : 0);
407 }
408
409 int
410 bge_miibus_readreg(dev, phy, reg)
411 struct device *dev;
412 int phy, reg;
413 {
414 struct bge_softc *sc = (struct bge_softc *)dev;
415 struct ifnet *ifp;
416 u_int32_t val;
417 int i;
418
419 ifp = &sc->ethercom.ec_if;
420
421 if (sc->bge_asicrev == BGE_ASICREV_BCM5701_B5 && phy != 1)
422 return(0);
423
424 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
425 BGE_MIPHY(phy)|BGE_MIREG(reg));
426
427 for (i = 0; i < BGE_TIMEOUT; i++) {
428 val = CSR_READ_4(sc, BGE_MI_COMM);
429 if (!(val & BGE_MICOMM_BUSY))
430 break;
431 delay(10);
432 }
433
434 if (i == BGE_TIMEOUT) {
435 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
436 return(0);
437 }
438
439 val = CSR_READ_4(sc, BGE_MI_COMM);
440
441 if (val & BGE_MICOMM_READFAIL)
442 return(0);
443
444 return(val & 0xFFFF);
445 }
446
447 void
448 bge_miibus_writereg(dev, phy, reg, val)
449 struct device *dev;
450 int phy, reg, val;
451 {
452 struct bge_softc *sc = (struct bge_softc *)dev;
453 int i;
454
455 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
456 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
457
458 for (i = 0; i < BGE_TIMEOUT; i++) {
459 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY))
460 break;
461 delay(10);
462 }
463
464 if (i == BGE_TIMEOUT) {
465 printf("%s: PHY read timed out\n", sc->bge_dev.dv_xname);
466 }
467 }
468
469 void
470 bge_miibus_statchg(dev)
471 struct device *dev;
472 {
473 struct bge_softc *sc = (struct bge_softc *)dev;
474 struct mii_data *mii = &sc->bge_mii;
475
476 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
477 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
478 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
479 } else {
480 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
481 }
482
483 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
484 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
485 } else {
486 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
487 }
488
489 bge_phy_hack(sc);
490 }
491
492 /*
493 * Handle events that have triggered interrupts.
494 */
495 void
496 bge_handle_events(sc)
497 struct bge_softc *sc;
498 {
499
500 return;
501 }
502
503 /*
504 * Memory management for jumbo frames.
505 */
506
507 int
508 bge_alloc_jumbo_mem(sc)
509 struct bge_softc *sc;
510 {
511 caddr_t ptr, kva;
512 bus_dma_segment_t seg;
513 int i, rseg, state, error;
514 struct bge_jpool_entry *entry;
515
516 state = error = 0;
517
518 /* Grab a big chunk o' storage. */
519 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
520 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
521 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
522 return ENOBUFS;
523 }
524
525 state = 1;
526 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, &kva,
527 BUS_DMA_NOWAIT)) {
528 printf("%s: can't map dma buffers (%d bytes)\n",
529 sc->bge_dev.dv_xname, (int)BGE_JMEM);
530 error = ENOBUFS;
531 goto out;
532 }
533
534 state = 2;
535 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
536 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
537 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
538 error = ENOBUFS;
539 goto out;
540 }
541
542 state = 3;
543 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
544 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
545 printf("%s: can't load dma map\n", sc->bge_dev.dv_xname);
546 error = ENOBUFS;
547 goto out;
548 }
549
550 state = 4;
551 sc->bge_cdata.bge_jumbo_buf = (caddr_t)kva;
552 DPRINTFN(1,("bge_jumbo_buf = 0x%p\n", sc->bge_cdata.bge_jumbo_buf));
553
554 SLIST_INIT(&sc->bge_jfree_listhead);
555 SLIST_INIT(&sc->bge_jinuse_listhead);
556
557 /*
558 * Now divide it up into 9K pieces and save the addresses
559 * in an array.
560 */
561 ptr = sc->bge_cdata.bge_jumbo_buf;
562 for (i = 0; i < BGE_JSLOTS; i++) {
563 sc->bge_cdata.bge_jslots[i] = ptr;
564 ptr += BGE_JLEN;
565 entry = malloc(sizeof(struct bge_jpool_entry),
566 M_DEVBUF, M_NOWAIT);
567 if (entry == NULL) {
568 printf("%s: no memory for jumbo buffer queue!\n",
569 sc->bge_dev.dv_xname);
570 error = ENOBUFS;
571 goto out;
572 }
573 entry->slot = i;
574 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
575 entry, jpool_entries);
576 }
577 out:
578 if (error != 0) {
579 switch (state) {
580 case 4:
581 bus_dmamap_unload(sc->bge_dmatag,
582 sc->bge_cdata.bge_rx_jumbo_map);
583 case 3:
584 bus_dmamap_destroy(sc->bge_dmatag,
585 sc->bge_cdata.bge_rx_jumbo_map);
586 case 2:
587 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
588 case 1:
589 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
590 break;
591 default:
592 break;
593 }
594 }
595
596 return error;
597 }
598
599 /*
600 * Allocate a jumbo buffer.
601 */
602 void *
603 bge_jalloc(sc)
604 struct bge_softc *sc;
605 {
606 struct bge_jpool_entry *entry;
607
608 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
609
610 if (entry == NULL) {
611 printf("%s: no free jumbo buffers\n", sc->bge_dev.dv_xname);
612 return(NULL);
613 }
614
615 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
616 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
617 return(sc->bge_cdata.bge_jslots[entry->slot]);
618 }
619
620 /*
621 * Release a jumbo buffer.
622 */
623 void
624 bge_jfree(m, buf, size, arg)
625 struct mbuf *m;
626 caddr_t buf;
627 u_int size;
628 void *arg;
629 {
630 struct bge_jpool_entry *entry;
631 struct bge_softc *sc;
632 int i, s;
633
634 /* Extract the softc struct pointer. */
635 sc = (struct bge_softc *)arg;
636
637 if (sc == NULL)
638 panic("bge_jfree: can't find softc pointer!");
639
640 /* calculate the slot this buffer belongs to */
641
642 i = ((caddr_t)buf
643 - (caddr_t)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
644
645 if ((i < 0) || (i >= BGE_JSLOTS))
646 panic("bge_jfree: asked to free buffer that we don't manage!");
647
648 s = splvm();
649 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
650 if (entry == NULL)
651 panic("bge_jfree: buffer not in use!");
652 entry->slot = i;
653 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
654 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
655
656 if (__predict_true(m != NULL))
657 pool_cache_put(&mbpool_cache, m);
658 splx(s);
659 }
660
661
662 /*
663 * Intialize a standard receive ring descriptor.
664 */
665 int
666 bge_newbuf_std(sc, i, m, dmamap)
667 struct bge_softc *sc;
668 int i;
669 struct mbuf *m;
670 bus_dmamap_t dmamap;
671 {
672 struct mbuf *m_new = NULL;
673 struct bge_rx_bd *r;
674 int error;
675
676 if (dmamap == NULL) {
677 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
678 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
679 if (error != 0)
680 return error;
681 }
682
683 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
684
685 if (m == NULL) {
686 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
687 if (m_new == NULL) {
688 return(ENOBUFS);
689 }
690
691 MCLGET(m_new, M_DONTWAIT);
692 if (!(m_new->m_flags & M_EXT)) {
693 m_freem(m_new);
694 return(ENOBUFS);
695 }
696 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
697 m_adj(m_new, ETHER_ALIGN);
698
699 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
700 BUS_DMA_READ|BUS_DMA_NOWAIT))
701 return(ENOBUFS);
702 } else {
703 m_new = m;
704 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
705 m_new->m_data = m_new->m_ext.ext_buf;
706 m_adj(m_new, ETHER_ALIGN);
707 }
708
709 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
710 r = &sc->bge_rdata->bge_rx_std_ring[i];
711 bge_set_hostaddr(&r->bge_addr,
712 dmamap->dm_segs[0].ds_addr);
713 r->bge_flags = BGE_RXBDFLAG_END;
714 r->bge_len = m_new->m_len;
715 r->bge_idx = i;
716
717 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
718 offsetof(struct bge_ring_data, bge_rx_std_ring) +
719 i * sizeof (struct bge_rx_bd),
720 sizeof (struct bge_rx_bd),
721 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
722
723 return(0);
724 }
725
726 /*
727 * Initialize a jumbo receive ring descriptor. This allocates
728 * a jumbo buffer from the pool managed internally by the driver.
729 */
730 int
731 bge_newbuf_jumbo(sc, i, m)
732 struct bge_softc *sc;
733 int i;
734 struct mbuf *m;
735 {
736 struct mbuf *m_new = NULL;
737 struct bge_rx_bd *r;
738
739 if (m == NULL) {
740 caddr_t *buf = NULL;
741
742 /* Allocate the mbuf. */
743 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
744 if (m_new == NULL) {
745 return(ENOBUFS);
746 }
747
748 /* Allocate the jumbo buffer */
749 buf = bge_jalloc(sc);
750 if (buf == NULL) {
751 m_freem(m_new);
752 printf("%s: jumbo allocation failed "
753 "-- packet dropped!\n", sc->bge_dev.dv_xname);
754 return(ENOBUFS);
755 }
756
757 /* Attach the buffer to the mbuf. */
758 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
759 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
760 bge_jfree, sc);
761 } else {
762 m_new = m;
763 m_new->m_data = m_new->m_ext.ext_buf;
764 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
765 }
766
767 m_adj(m_new, ETHER_ALIGN);
768 /* Set up the descriptor. */
769 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
770 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
771 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
772 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
773 r->bge_len = m_new->m_len;
774 r->bge_idx = i;
775
776 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
777 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
778 i * sizeof (struct bge_rx_bd),
779 sizeof (struct bge_rx_bd),
780 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
781
782 return(0);
783 }
784
785 /*
786 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
787 * that's 1MB or memory, which is a lot. For now, we fill only the first
788 * 256 ring entries and hope that our CPU is fast enough to keep up with
789 * the NIC.
790 */
791 int
792 bge_init_rx_ring_std(sc)
793 struct bge_softc *sc;
794 {
795 int i;
796
797 if (sc->bge_flags & BGE_RXRING_VALID)
798 return 0;
799
800 for (i = 0; i < BGE_SSLOTS; i++) {
801 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
802 return(ENOBUFS);
803 }
804
805 sc->bge_std = i - 1;
806 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
807
808 sc->bge_flags |= BGE_RXRING_VALID;
809
810 return(0);
811 }
812
813 void
814 bge_free_rx_ring_std(sc)
815 struct bge_softc *sc;
816 {
817 int i;
818
819 if (!(sc->bge_flags & BGE_RXRING_VALID))
820 return;
821
822 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
823 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
824 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
825 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
826 bus_dmamap_destroy(sc->bge_dmatag,
827 sc->bge_cdata.bge_rx_std_map[i]);
828 }
829 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
830 sizeof(struct bge_rx_bd));
831 }
832
833 sc->bge_flags &= ~BGE_RXRING_VALID;
834 }
835
836 int
837 bge_init_rx_ring_jumbo(sc)
838 struct bge_softc *sc;
839 {
840 int i;
841 struct bge_rcb *rcb;
842 struct bge_rcb_opaque *rcbo;
843
844 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
845 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
846 return(ENOBUFS);
847 };
848
849 sc->bge_jumbo = i - 1;
850
851 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
852 rcbo = (struct bge_rcb_opaque *)rcb;
853 rcb->bge_flags = 0;
854 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
855
856 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
857
858 return(0);
859 }
860
861 void
862 bge_free_rx_ring_jumbo(sc)
863 struct bge_softc *sc;
864 {
865 int i;
866
867 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
868 return;
869
870 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
871 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
872 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
873 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
874 }
875 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
876 sizeof(struct bge_rx_bd));
877 }
878
879 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
880 }
881
882 void
883 bge_free_tx_ring(sc)
884 struct bge_softc *sc;
885 {
886 int i, freed;
887 struct txdmamap_pool_entry *dma;
888
889 if (!(sc->bge_flags & BGE_TXRING_VALID))
890 return;
891
892 freed = 0;
893
894 for (i = 0; i < BGE_TX_RING_CNT; i++) {
895 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
896 freed++;
897 m_freem(sc->bge_cdata.bge_tx_chain[i]);
898 sc->bge_cdata.bge_tx_chain[i] = NULL;
899 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
900 link);
901 sc->txdma[i] = 0;
902 }
903 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
904 sizeof(struct bge_tx_bd));
905 }
906
907 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
908 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
909 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
910 free(dma, M_DEVBUF);
911 }
912
913 sc->bge_flags &= ~BGE_TXRING_VALID;
914 }
915
916 int
917 bge_init_tx_ring(sc)
918 struct bge_softc *sc;
919 {
920 int i;
921 bus_dmamap_t dmamap;
922 struct txdmamap_pool_entry *dma;
923
924 if (sc->bge_flags & BGE_TXRING_VALID)
925 return 0;
926
927 sc->bge_txcnt = 0;
928 sc->bge_tx_saved_considx = 0;
929 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0);
930 CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
931
932 SLIST_INIT(&sc->txdma_list);
933 for (i = 0; i < BGE_RSLOTS; i++) {
934 if (bus_dmamap_create(sc->bge_dmatag, ETHER_MAX_LEN_JUMBO,
935 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
936 &dmamap))
937 return(ENOBUFS);
938 if (dmamap == NULL)
939 panic("dmamap NULL in bge_init_tx_ring");
940 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
941 if (dma == NULL) {
942 printf("%s: can't alloc txdmamap_pool_entry\n",
943 sc->bge_dev.dv_xname);
944 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
945 return (ENOMEM);
946 }
947 dma->dmamap = dmamap;
948 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
949 }
950
951 sc->bge_flags |= BGE_TXRING_VALID;
952
953 return(0);
954 }
955
956 #define BGE_POLY 0xEDB88320
957
958 u_int32_t
959 bge_crc(sc, addr)
960 struct bge_softc *sc;
961 caddr_t addr;
962 {
963 u_int32_t idx, bit, data, crc;
964
965 /* Compute CRC for the address value. */
966 crc = 0xFFFFFFFF; /* initial value */
967
968 for (idx = 0; idx < 6; idx++) {
969 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
970 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? BGE_POLY : 0);
971 }
972
973 return(crc & 0x7F);
974 }
975
976 void
977 bge_setmulti(sc)
978 struct bge_softc *sc;
979 {
980 struct ethercom *ac = &sc->ethercom;
981 struct ifnet *ifp = &ac->ec_if;
982 struct ether_multi *enm;
983 struct ether_multistep step;
984 u_int32_t hashes[4] = { 0, 0, 0, 0 };
985 u_int32_t h;
986 int i;
987
988 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
989 for (i = 0; i < 4; i++)
990 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
991 return;
992 }
993
994 /* First, zot all the existing filters. */
995 for (i = 0; i < 4; i++)
996 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
997
998 /* Now program new ones. */
999 ETHER_FIRST_MULTI(step, ac, enm);
1000 while (enm != NULL) {
1001 h = bge_crc(sc, LLADDR((struct sockaddr_dl *)enm->enm_addrlo));
1002 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1003 ETHER_NEXT_MULTI(step, enm);
1004 }
1005
1006 for (i = 0; i < 4; i++)
1007 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1008 }
1009
1010 int bge_swapbits[] = {
1011 0,
1012 BGE_MODECTL_BYTESWAP_DATA,
1013 BGE_MODECTL_WORDSWAP_DATA,
1014 BGE_MODECTL_BYTESWAP_NONFRAME,
1015 BGE_MODECTL_WORDSWAP_NONFRAME,
1016
1017 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1018 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1019 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1020
1021 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1022 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1023
1024 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1025
1026 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1027 BGE_MODECTL_BYTESWAP_NONFRAME,
1028 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1029 BGE_MODECTL_WORDSWAP_NONFRAME,
1030 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1031 BGE_MODECTL_WORDSWAP_NONFRAME,
1032 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1033 BGE_MODECTL_WORDSWAP_NONFRAME,
1034
1035 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1036 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1037 };
1038
1039 int bge_swapindex = 0;
1040
1041 /*
1042 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1043 * self-test results.
1044 */
1045 int
1046 bge_chipinit(sc)
1047 struct bge_softc *sc;
1048 {
1049 u_int32_t cachesize;
1050 int i;
1051 struct pci_attach_args *pa = &(sc->bge_pa);
1052
1053
1054 /* Set endianness before we access any non-PCI registers. */
1055 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1056 BGE_INIT);
1057
1058 /*
1059 * Check the 'ROM failed' bit on the RX CPU to see if
1060 * self-tests passed.
1061 */
1062 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1063 printf("%s: RX CPU self-diagnostics failed!\n",
1064 sc->bge_dev.dv_xname);
1065 return(ENODEV);
1066 }
1067
1068 /* Clear the MAC control register */
1069 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1070
1071 /*
1072 * Clear the MAC statistics block in the NIC's
1073 * internal memory.
1074 */
1075 for (i = BGE_STATS_BLOCK;
1076 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1077 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1078
1079 for (i = BGE_STATUS_BLOCK;
1080 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1081 BGE_MEMWIN_WRITE(pa->pa_pc, pa->pa_tag, i, 0);
1082
1083 /* Set up the PCI DMA control register. */
1084 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1085 BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD|0x0F);
1086
1087 /*
1088 * Set up general mode register.
1089 */
1090 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1091 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1092 BGE_MODECTL_NO_RX_CRC|BGE_MODECTL_TX_NO_PHDR_CSUM|
1093 BGE_MODECTL_RX_NO_PHDR_CSUM);
1094
1095 /* Get cache line size. */
1096 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1097
1098 /*
1099 * Avoid violating PCI spec on certain chip revs.
1100 */
1101 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD) &
1102 PCIM_CMD_MWIEN) {
1103 switch(cachesize) {
1104 case 1:
1105 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1106 BGE_PCI_WRITE_BNDRY_16BYTES);
1107 break;
1108 case 2:
1109 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1110 BGE_PCI_WRITE_BNDRY_32BYTES);
1111 break;
1112 case 4:
1113 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1114 BGE_PCI_WRITE_BNDRY_64BYTES);
1115 break;
1116 case 8:
1117 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1118 BGE_PCI_WRITE_BNDRY_128BYTES);
1119 break;
1120 case 16:
1121 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1122 BGE_PCI_WRITE_BNDRY_256BYTES);
1123 break;
1124 case 32:
1125 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1126 BGE_PCI_WRITE_BNDRY_512BYTES);
1127 break;
1128 case 64:
1129 PCI_SETBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_DMA_RW_CTL,
1130 BGE_PCI_WRITE_BNDRY_1024BYTES);
1131 break;
1132 default:
1133 /* Disable PCI memory write and invalidate. */
1134 #if 0
1135 if (bootverbose)
1136 printf("%s: cache line size %d not "
1137 "supported; disabling PCI MWI\n",
1138 sc->bge_dev.dv_xname, cachesize);
1139 #endif
1140 PCI_CLRBIT(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD,
1141 PCIM_CMD_MWIEN);
1142 break;
1143 }
1144 }
1145
1146 #ifdef __brokenalpha__
1147 /*
1148 * Must insure that we do not cross an 8K (bytes) boundary
1149 * for DMA reads. Our highest limit is 1K bytes. This is a
1150 * restriction on some ALPHA platforms with early revision
1151 * 21174 PCI chipsets, such as the AlphaPC 164lx
1152 */
1153 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1154 #endif
1155
1156 /* Set the timer prescaler (always 66Mhz) */
1157 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1158
1159 return(0);
1160 }
1161
1162 int
1163 bge_blockinit(sc)
1164 struct bge_softc *sc;
1165 {
1166 struct bge_rcb *rcb;
1167 struct bge_rcb_opaque *rcbo;
1168 bus_size_t rcb_addr;
1169 int i;
1170 struct ifnet *ifp = &sc->ethercom.ec_if;
1171 bge_hostaddr taddr;
1172
1173 /*
1174 * Initialize the memory window pointer register so that
1175 * we can access the first 32K of internal NIC RAM. This will
1176 * allow us to set up the TX send ring RCBs and the RX return
1177 * ring RCBs, plus other things which live in NIC memory.
1178 */
1179
1180 pci_conf_write(sc->bge_pa.pa_pc, sc->bge_pa.pa_tag,
1181 BGE_PCI_MEMWIN_BASEADDR, 0);
1182
1183 /* Configure mbuf memory pool */
1184 if (sc->bge_extram) {
1185 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM);
1186 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1187 } else {
1188 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1);
1189 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1190 }
1191
1192 /* Configure DMA resource pool */
1193 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS);
1194 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1195
1196 /* Configure mbuf pool watermarks */
1197 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1198 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1199 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1200
1201 /* Configure DMA resource watermarks */
1202 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1203 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1204
1205 /* Enable buffer manager */
1206 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1207 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1208
1209 /* Poll for buffer manager start indication */
1210 for (i = 0; i < BGE_TIMEOUT; i++) {
1211 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1212 break;
1213 DELAY(10);
1214 }
1215
1216 if (i == BGE_TIMEOUT) {
1217 printf("%s: buffer manager failed to start\n",
1218 sc->bge_dev.dv_xname);
1219 return(ENXIO);
1220 }
1221
1222 /* Enable flow-through queues */
1223 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1224 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1225
1226 /* Wait until queue initialization is complete */
1227 for (i = 0; i < BGE_TIMEOUT; i++) {
1228 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1229 break;
1230 DELAY(10);
1231 }
1232
1233 if (i == BGE_TIMEOUT) {
1234 printf("%s: flow-through queue init failed\n",
1235 sc->bge_dev.dv_xname);
1236 return(ENXIO);
1237 }
1238
1239 /* Initialize the standard RX ring control block */
1240 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1241 bge_set_hostaddr(&rcb->bge_hostaddr,
1242 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1243 rcb->bge_max_len = BGE_MAX_FRAMELEN;
1244 if (sc->bge_extram)
1245 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1246 else
1247 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1248 rcb->bge_flags = 0;
1249 rcbo = (struct bge_rcb_opaque *)rcb;
1250 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcbo->bge_reg0);
1251 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcbo->bge_reg1);
1252 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
1253 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcbo->bge_reg3);
1254
1255 /*
1256 * Initialize the jumbo RX ring control block
1257 * We set the 'ring disabled' bit in the flags
1258 * field until we're actually ready to start
1259 * using this ring (i.e. once we set the MTU
1260 * high enough to require it).
1261 */
1262 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1263 bge_set_hostaddr(&rcb->bge_hostaddr,
1264 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1265 rcb->bge_max_len = BGE_MAX_FRAMELEN;
1266 if (sc->bge_extram)
1267 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1268 else
1269 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1270 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED;
1271
1272 rcbo = (struct bge_rcb_opaque *)rcb;
1273 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcbo->bge_reg0);
1274 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcbo->bge_reg1);
1275 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
1276 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcbo->bge_reg3);
1277
1278 /* Set up dummy disabled mini ring RCB */
1279 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1280 rcb->bge_flags = BGE_RCB_FLAG_RING_DISABLED;
1281 rcbo = (struct bge_rcb_opaque *)rcb;
1282 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcbo->bge_reg2);
1283
1284 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1285 offsetof(struct bge_ring_data, bge_info), sizeof (struct bge_gib),
1286 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1287
1288 /*
1289 * Set the BD ring replentish thresholds. The recommended
1290 * values are 1/8th the number of descriptors allocated to
1291 * each ring.
1292 */
1293 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8);
1294 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1295
1296 /*
1297 * Disable all unused send rings by setting the 'ring disabled'
1298 * bit in the flags field of all the TX send ring control blocks.
1299 * These are located in NIC memory.
1300 */
1301 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1302 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1303 RCB_WRITE_2(sc, rcb_addr, bge_flags,
1304 BGE_RCB_FLAG_RING_DISABLED);
1305 RCB_WRITE_2(sc, rcb_addr, bge_max_len, 0);
1306 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1307 rcb_addr += sizeof(struct bge_rcb);
1308 }
1309
1310 /* Configure TX RCB 0 (we use only the first ring) */
1311 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1312 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1313 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1314 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1315 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1316 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1317 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_TX_RING_CNT);
1318 RCB_WRITE_2(sc, rcb_addr, bge_flags, 0);
1319
1320 /* Disable all unused RX return rings */
1321 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1322 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1323 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1324 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1325 RCB_WRITE_2(sc, rcb_addr, bge_flags,
1326 BGE_RCB_FLAG_RING_DISABLED);
1327 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT);
1328 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1329 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO +
1330 (i * (sizeof(u_int64_t))), 0);
1331 rcb_addr += sizeof(struct bge_rcb);
1332 }
1333
1334 /* Initialize RX ring indexes */
1335 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1336 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1337 CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1338
1339 /*
1340 * Set up RX return ring 0
1341 * Note that the NIC address for RX return rings is 0x00000000.
1342 * The return rings live entirely within the host, so the
1343 * nicaddr field in the RCB isn't used.
1344 */
1345 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1346 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1347 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1348 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1349 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1350 RCB_WRITE_2(sc, rcb_addr, bge_max_len, BGE_RETURN_RING_CNT);
1351 RCB_WRITE_2(sc, rcb_addr, bge_flags, 0);
1352
1353 /* Set random backoff seed for TX */
1354 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1355 LLADDR(ifp->if_sadl)[0] + LLADDR(ifp->if_sadl)[1] +
1356 LLADDR(ifp->if_sadl)[2] + LLADDR(ifp->if_sadl)[3] +
1357 LLADDR(ifp->if_sadl)[4] + LLADDR(ifp->if_sadl)[5] +
1358 BGE_TX_BACKOFF_SEED_MASK);
1359
1360 /* Set inter-packet gap */
1361 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1362
1363 /*
1364 * Specify which ring to use for packets that don't match
1365 * any RX rules.
1366 */
1367 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1368
1369 /*
1370 * Configure number of RX lists. One interrupt distribution
1371 * list, sixteen active lists, one bad frames class.
1372 */
1373 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1374
1375 /* Inialize RX list placement stats mask. */
1376 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1377 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1378
1379 /* Disable host coalescing until we get it set up */
1380 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1381
1382 /* Poll to make sure it's shut down. */
1383 for (i = 0; i < BGE_TIMEOUT; i++) {
1384 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1385 break;
1386 DELAY(10);
1387 }
1388
1389 if (i == BGE_TIMEOUT) {
1390 printf("%s: host coalescing engine failed to idle\n",
1391 sc->bge_dev.dv_xname);
1392 return(ENXIO);
1393 }
1394
1395 /* Set up host coalescing defaults */
1396 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1397 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1398 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1399 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1400 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1401 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1402 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1403 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1404 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1405
1406 /* Set up address of statistics block */
1407 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1408 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1409 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1410 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1411
1412 /* Set up address of status block */
1413 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1414 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1415 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1416 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1417 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1418 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1419
1420 /* Turn on host coalescing state machine */
1421 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1422
1423 /* Turn on RX BD completion state machine and enable attentions */
1424 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1425 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1426
1427 /* Turn on RX list placement state machine */
1428 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1429
1430 /* Turn on RX list selector state machine. */
1431 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1432
1433 /* Turn on DMA, clear stats */
1434 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1435 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1436 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1437 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1438 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1439
1440 /* Set misc. local control, enable interrupts on attentions */
1441 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN);
1442
1443 #ifdef notdef
1444 /* Assert GPIO pins for PHY reset */
1445 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1446 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1447 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1448 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1449 #endif
1450
1451 /* Turn on DMA completion state machine */
1452 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1453
1454 /* Turn on write DMA state machine */
1455 CSR_WRITE_4(sc, BGE_WDMA_MODE,
1456 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS);
1457
1458 /* Turn on read DMA state machine */
1459 CSR_WRITE_4(sc, BGE_RDMA_MODE,
1460 BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS);
1461
1462 /* Turn on RX data completion state machine */
1463 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1464
1465 /* Turn on RX BD initiator state machine */
1466 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1467
1468 /* Turn on RX data and RX BD initiator state machine */
1469 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1470
1471 /* Turn on Mbuf cluster free state machine */
1472 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1473
1474 /* Turn on send BD completion state machine */
1475 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1476
1477 /* Turn on send data completion state machine */
1478 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1479
1480 /* Turn on send data initiator state machine */
1481 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1482
1483 /* Turn on send BD initiator state machine */
1484 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1485
1486 /* Turn on send BD selector state machine */
1487 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1488
1489 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1490 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1491 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1492
1493 /* init LED register */
1494 CSR_WRITE_4(sc, BGE_MAC_LED_CTL, 0x00000000);
1495
1496 /* ack/clear link change events */
1497 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1498 BGE_MACSTAT_CFG_CHANGED);
1499 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1500
1501 /* Enable PHY auto polling (for MII/GMII only) */
1502 if (sc->bge_tbi) {
1503 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1504 } else {
1505 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1506 if (BGE_IS_5700_Ax_Bx(sc->bge_asicrev))
1507 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1508 BGE_EVTENB_MI_INTERRUPT);
1509 }
1510
1511 /* Enable link state change attentions. */
1512 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1513
1514 return(0);
1515 }
1516
1517 static const struct bge_product {
1518 pci_vendor_id_t bp_vendor;
1519 pci_product_id_t bp_product;
1520 const char *bp_name;
1521 } bge_products[] = {
1522 /*
1523 * The BCM5700 documentation seems to indicate that the hardware
1524 * still has the Alteon vendor ID burned into it, though it
1525 * should always be overridden by the value in the EEPROM. We'll
1526 * check for it anyway.
1527 */
1528 { PCI_VENDOR_ALTEON,
1529 PCI_PRODUCT_ALTEON_BCM5700,
1530 "Broadcom BCM5700 Gigabit Ethernet" },
1531 { PCI_VENDOR_ALTEON,
1532 PCI_PRODUCT_ALTEON_BCM5701,
1533 "Broadcom BCM5701 Gigabit Ethernet" },
1534
1535 { PCI_VENDOR_ALTIMA,
1536 PCI_PRODUCT_ALTIMA_AC1000,
1537 "Altima AC1000 Gigabit Ethernet" },
1538 { PCI_VENDOR_ALTIMA,
1539 PCI_PRODUCT_ALTIMA_AC9100,
1540 "Altima AC9100 Gigabit Ethernet" },
1541
1542 { PCI_VENDOR_BROADCOM,
1543 PCI_PRODUCT_BROADCOM_BCM5700,
1544 "Broadcom BCM5700 Gigabit Ethernet" },
1545 { PCI_VENDOR_BROADCOM,
1546 PCI_PRODUCT_BROADCOM_BCM5701,
1547 "Broadcom BCM5700 Gigabit Ethernet" },
1548
1549 { PCI_VENDOR_SCHNEIDERKOCH,
1550 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
1551 "SysKonnect SK-9Dx1 Gigabit Ethernet" },
1552
1553 { PCI_VENDOR_3COM,
1554 PCI_PRODUCT_3COM_3C996,
1555 "3Com 3c996 Gigabit Ethernet" },
1556
1557 { 0,
1558 0,
1559 NULL },
1560 };
1561
1562 static const struct bge_product *
1563 bge_lookup(const struct pci_attach_args *pa)
1564 {
1565 const struct bge_product *bp;
1566
1567 for (bp = bge_products; bp->bp_name != NULL; bp++) {
1568 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
1569 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
1570 return (bp);
1571 }
1572
1573 return (NULL);
1574 }
1575
1576 /*
1577 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
1578 * against our list and return its name if we find a match. Note
1579 * that since the Broadcom controller contains VPD support, we
1580 * can get the device name string from the controller itself instead
1581 * of the compiled-in string. This is a little slow, but it guarantees
1582 * we'll always announce the right product name.
1583 */
1584 int
1585 bge_probe(parent, match, aux)
1586 struct device *parent;
1587 struct cfdata *match;
1588 void *aux;
1589 {
1590 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1591
1592 if (bge_lookup(pa) != NULL)
1593 return (1);
1594
1595 return (0);
1596 }
1597
1598 void
1599 bge_attach(parent, self, aux)
1600 struct device *parent, *self;
1601 void *aux;
1602 {
1603 struct bge_softc *sc = (struct bge_softc *)self;
1604 struct pci_attach_args *pa = aux;
1605 const struct bge_product *bp;
1606 pci_chipset_tag_t pc = pa->pa_pc;
1607 pci_intr_handle_t ih;
1608 const char *intrstr = NULL;
1609 bus_dma_segment_t seg;
1610 int rseg;
1611 u_int32_t hwcfg = 0;
1612 u_int32_t command;
1613 struct ifnet *ifp;
1614 int unit;
1615 caddr_t kva;
1616 u_char eaddr[ETHER_ADDR_LEN];
1617 pcireg_t memtype;
1618 bus_addr_t memaddr;
1619 bus_size_t memsize;
1620
1621 bp = bge_lookup(pa);
1622 KASSERT(bp != NULL);
1623
1624 sc->bge_pa = *pa;
1625
1626 printf(": %s, rev. 0x%02x\n", bp->bp_name, PCI_REVISION(pa->pa_class));
1627
1628 /*
1629 * Map control/status registers.
1630 */
1631 DPRINTFN(5, ("Map control/status regs\n"));
1632 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1633 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1634 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
1635 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1636
1637 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
1638 printf("%s: failed to enable memory mapping!\n",
1639 sc->bge_dev.dv_xname);
1640 return;
1641 }
1642
1643 DPRINTFN(5, ("pci_mem_find\n"));
1644 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BGE_PCI_BAR0);
1645 switch (memtype) {
1646 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1647 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1648 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
1649 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
1650 &memaddr, &memsize) == 0)
1651 break;
1652 default:
1653 printf("%s: can't find mem space\n",
1654 sc->bge_dev.dv_xname);
1655 return;
1656 }
1657
1658 DPRINTFN(5, ("pci_intr_map\n"));
1659 if (pci_intr_map(pa, &ih)) {
1660 printf("%s: couldn't map interrupt\n",
1661 sc->bge_dev.dv_xname);
1662 return;
1663 }
1664
1665 DPRINTFN(5, ("pci_intr_string\n"));
1666 intrstr = pci_intr_string(pc, ih);
1667
1668 DPRINTFN(5, ("pci_intr_establish\n"));
1669 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
1670
1671 if (sc->bge_intrhand == NULL) {
1672 printf("%s: couldn't establish interrupt",
1673 sc->bge_dev.dv_xname);
1674 if (intrstr != NULL)
1675 printf(" at %s", intrstr);
1676 printf("\n");
1677 return;
1678 }
1679 printf("%s: interrupting at %s\n", sc->bge_dev.dv_xname, intrstr);
1680
1681 /* Try to reset the chip. */
1682 DPRINTFN(5, ("bge_reset\n"));
1683 bge_reset(sc);
1684
1685 if (bge_chipinit(sc)) {
1686 printf("%s: chip initializatino failed\n",
1687 sc->bge_dev.dv_xname);
1688 bge_release_resources(sc);
1689 return;
1690 }
1691
1692 /*
1693 * Get station address from the EEPROM.
1694 */
1695 if (bge_read_eeprom(sc, (caddr_t)eaddr,
1696 BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) {
1697 printf("bge%d: failed to read station address\n", unit);
1698 bge_release_resources(sc);
1699 return;
1700 }
1701
1702 /*
1703 * A Broadcom chip was detected. Inform the world.
1704 */
1705 printf("%s: Ethernet address %s\n", sc->bge_dev.dv_xname,
1706 ether_sprintf(eaddr));
1707
1708 /* Allocate the general information block and ring buffers. */
1709 sc->bge_dmatag = pa->pa_dmat;
1710 DPRINTFN(5, ("bus_dmamem_alloc\n"));
1711 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
1712 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1713 printf("%s: can't alloc rx buffers\n", sc->bge_dev.dv_xname);
1714 return;
1715 }
1716 DPRINTFN(5, ("bus_dmamem_map\n"));
1717 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
1718 sizeof(struct bge_ring_data), &kva,
1719 BUS_DMA_NOWAIT)) {
1720 printf("%s: can't map dma buffers (%d bytes)\n",
1721 sc->bge_dev.dv_xname, (int)sizeof(struct bge_ring_data));
1722 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1723 return;
1724 }
1725 DPRINTFN(5, ("bus_dmamem_create\n"));
1726 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
1727 sizeof(struct bge_ring_data), 0,
1728 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
1729 printf("%s: can't create dma map\n", sc->bge_dev.dv_xname);
1730 bus_dmamem_unmap(sc->bge_dmatag, kva,
1731 sizeof(struct bge_ring_data));
1732 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1733 return;
1734 }
1735 DPRINTFN(5, ("bus_dmamem_load\n"));
1736 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
1737 sizeof(struct bge_ring_data), NULL,
1738 BUS_DMA_NOWAIT)) {
1739 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
1740 bus_dmamem_unmap(sc->bge_dmatag, kva,
1741 sizeof(struct bge_ring_data));
1742 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
1743 return;
1744 }
1745
1746 DPRINTFN(5, ("bzero\n"));
1747 sc->bge_rdata = (struct bge_ring_data *)kva;
1748
1749 memset(sc->bge_rdata, sizeof(struct bge_ring_data), 0);
1750
1751 /* Try to allocate memory for jumbo buffers. */
1752 if (bge_alloc_jumbo_mem(sc)) {
1753 printf("%s: jumbo buffer allocation failed\n",
1754 sc->bge_dev.dv_xname);
1755 } else
1756 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1757
1758 /* Set default tuneable values. */
1759 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
1760 sc->bge_rx_coal_ticks = 150;
1761 sc->bge_tx_coal_ticks = 150;
1762 sc->bge_rx_max_coal_bds = 64;
1763 sc->bge_tx_max_coal_bds = 128;
1764
1765 /* Set up ifnet structure */
1766 ifp = &sc->ethercom.ec_if;
1767 ifp->if_softc = sc;
1768 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1769 ifp->if_ioctl = bge_ioctl;
1770 ifp->if_start = bge_start;
1771 ifp->if_init = bge_init;
1772 ifp->if_watchdog = bge_watchdog;
1773 IFQ_SET_MAXLEN(&ifp->if_snd, BGE_TX_RING_CNT - 1);
1774 IFQ_SET_READY(&ifp->if_snd);
1775 DPRINTFN(5, ("bcopy\n"));
1776 strcpy(ifp->if_xname, sc->bge_dev.dv_xname);
1777
1778 sc->ethercom.ec_if.if_capabilities |=
1779 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1780 sc->ethercom.ec_capabilities |=
1781 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
1782
1783 /*
1784 * Do MII setup.
1785 */
1786 DPRINTFN(5, ("mii setup\n"));
1787 sc->bge_mii.mii_ifp = ifp;
1788 sc->bge_mii.mii_readreg = bge_miibus_readreg;
1789 sc->bge_mii.mii_writereg = bge_miibus_writereg;
1790 sc->bge_mii.mii_statchg = bge_miibus_statchg;
1791
1792 /* Save ASIC rev. */
1793
1794 sc->bge_asicrev =
1795 pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL) &
1796 BGE_PCIMISCCTL_ASICREV;
1797
1798 /*
1799 * Figure out what sort of media we have by checking the
1800 * hardware config word in the EEPROM. Note: on some BCM5700
1801 * cards, this value appears to be unset. If that's the
1802 * case, we have to rely on identifying the NIC by its PCI
1803 * subsystem ID, as we do below for the SysKonnect SK-9D41.
1804 */
1805 bge_read_eeprom(sc, (caddr_t)&hwcfg,
1806 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
1807 if ((be32toh(hwcfg) & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
1808 sc->bge_tbi = 1;
1809
1810 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
1811 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_SUBSYS) >> 16) ==
1812 SK_SUBSYSID_9D41)
1813 sc->bge_tbi = 1;
1814
1815 if (sc->bge_tbi) {
1816 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
1817 bge_ifmedia_sts);
1818 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
1819 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
1820 0, NULL);
1821 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1822 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
1823 } else {
1824 /*
1825 * Do transceiver setup.
1826 */
1827 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
1828 bge_ifmedia_sts);
1829 mii_attach(&sc->bge_dev, &sc->bge_mii, 0xffffffff,
1830 MII_PHY_ANY, MII_OFFSET_ANY, 0);
1831
1832 if (LIST_FIRST(&sc->bge_mii.mii_phys) == NULL) {
1833 printf("%s: no PHY found!\n", sc->bge_dev.dv_xname);
1834 ifmedia_add(&sc->bge_mii.mii_media,
1835 IFM_ETHER|IFM_MANUAL, 0, NULL);
1836 ifmedia_set(&sc->bge_mii.mii_media,
1837 IFM_ETHER|IFM_MANUAL);
1838 } else
1839 ifmedia_set(&sc->bge_mii.mii_media,
1840 IFM_ETHER|IFM_AUTO);
1841 }
1842
1843 /*
1844 * Call MI attach routine.
1845 */
1846 DPRINTFN(5, ("if_attach\n"));
1847 if_attach(ifp);
1848 DPRINTFN(5, ("ether_ifattach\n"));
1849 ether_ifattach(ifp, eaddr);
1850 DPRINTFN(5, ("callout_init\n"));
1851 callout_init(&sc->bge_timeout);
1852 }
1853
1854 void
1855 bge_release_resources(sc)
1856 struct bge_softc *sc;
1857 {
1858 if (sc->bge_vpd_prodname != NULL)
1859 free(sc->bge_vpd_prodname, M_DEVBUF);
1860
1861 if (sc->bge_vpd_readonly != NULL)
1862 free(sc->bge_vpd_readonly, M_DEVBUF);
1863 }
1864
1865 void
1866 bge_reset(sc)
1867 struct bge_softc *sc;
1868 {
1869 struct pci_attach_args *pa = &sc->bge_pa;
1870 u_int32_t cachesize, command, pcistate;
1871 int i, val = 0;
1872
1873 /* Save some important PCI state. */
1874 cachesize = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ);
1875 command = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD);
1876 pcistate = pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE);
1877
1878 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1879 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1880 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
1881
1882 /* Issue global reset */
1883 bge_writereg_ind(sc, BGE_MISC_CFG,
1884 BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1));
1885
1886 DELAY(1000);
1887
1888 /* Reset some of the PCI state that got zapped by reset */
1889 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_MISC_CTL,
1890 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
1891 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
1892 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CMD, command);
1893 pci_conf_write(pa->pa_pc, pa->pa_tag, BGE_PCI_CACHESZ, cachesize);
1894 bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1));
1895
1896 /* Enable memory arbiter. */
1897 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1898
1899 /*
1900 * Prevent PXE restart: write a magic number to the
1901 * general communications memory at 0xB50.
1902 */
1903 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
1904
1905 /*
1906 * Poll the value location we just wrote until
1907 * we see the 1's complement of the magic number.
1908 * This indicates that the firmware initialization
1909 * is complete.
1910 */
1911 for (i = 0; i < 750; i++) {
1912 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
1913 if (val == ~BGE_MAGIC_NUMBER)
1914 break;
1915 DELAY(1000);
1916 }
1917
1918 if (i == 750) {
1919 printf("%s: firmware handshake timed out, val = %x\n",
1920 sc->bge_dev.dv_xname, val);
1921 return;
1922 }
1923
1924 /*
1925 * XXX Wait for the value of the PCISTATE register to
1926 * return to its original pre-reset state. This is a
1927 * fairly good indicator of reset completion. If we don't
1928 * wait for the reset to fully complete, trying to read
1929 * from the device's non-PCI registers may yield garbage
1930 * results.
1931 */
1932 for (i = 0; i < BGE_TIMEOUT; i++) {
1933 if (pci_conf_read(pa->pa_pc, pa->pa_tag, BGE_PCI_PCISTATE) ==
1934 pcistate)
1935 break;
1936 DELAY(10);
1937 }
1938
1939 /* Enable memory arbiter. */
1940 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
1941
1942 /* Fix up byte swapping */
1943 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
1944
1945 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1946
1947 DELAY(10000);
1948 }
1949
1950 /*
1951 * Frame reception handling. This is called if there's a frame
1952 * on the receive return list.
1953 *
1954 * Note: we have to be able to handle two possibilities here:
1955 * 1) the frame is from the jumbo recieve ring
1956 * 2) the frame is from the standard receive ring
1957 */
1958
1959 void
1960 bge_rxeof(sc)
1961 struct bge_softc *sc;
1962 {
1963 struct ifnet *ifp;
1964 int stdcnt = 0, jumbocnt = 0;
1965 int have_tag = 0;
1966 u_int16_t vlan_tag = 0;
1967 bus_dmamap_t dmamap;
1968 bus_addr_t offset, toff;
1969 bus_size_t tlen;
1970 int tosync;
1971
1972 ifp = &sc->ethercom.ec_if;
1973
1974 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1975 offsetof(struct bge_ring_data, bge_status_block),
1976 sizeof (struct bge_status_block),
1977 BUS_DMASYNC_POSTREAD);
1978
1979 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
1980 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
1981 sc->bge_rx_saved_considx;
1982
1983 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
1984
1985 if (tosync < 0) {
1986 tlen = (BGE_RETURN_RING_CNT - sc->bge_rx_saved_considx) *
1987 sizeof (struct bge_rx_bd);
1988 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1989 toff, tlen, BUS_DMASYNC_POSTREAD);
1990 tosync = -tosync;
1991 }
1992
1993 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1994 offset, tosync * sizeof (struct bge_rx_bd),
1995 BUS_DMASYNC_POSTREAD);
1996
1997 while(sc->bge_rx_saved_considx !=
1998 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
1999 struct bge_rx_bd *cur_rx;
2000 u_int32_t rxidx;
2001 struct mbuf *m = NULL;
2002
2003 cur_rx = &sc->bge_rdata->
2004 bge_rx_return_ring[sc->bge_rx_saved_considx];
2005
2006 rxidx = cur_rx->bge_idx;
2007 BGE_INC(sc->bge_rx_saved_considx, BGE_RETURN_RING_CNT);
2008
2009 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
2010 have_tag = 1;
2011 vlan_tag = cur_rx->bge_vlan_tag;
2012 }
2013
2014 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
2015 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
2016 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
2017 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
2018 jumbocnt++;
2019 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2020 ifp->if_ierrors++;
2021 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2022 continue;
2023 }
2024 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
2025 NULL)== ENOBUFS) {
2026 ifp->if_ierrors++;
2027 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
2028 continue;
2029 }
2030 } else {
2031 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
2032 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
2033 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
2034 stdcnt++;
2035 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
2036 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
2037 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
2038 ifp->if_ierrors++;
2039 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2040 continue;
2041 }
2042 if (bge_newbuf_std(sc, sc->bge_std,
2043 NULL, dmamap) == ENOBUFS) {
2044 ifp->if_ierrors++;
2045 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
2046 continue;
2047 }
2048 }
2049
2050 ifp->if_ipackets++;
2051 m->m_pkthdr.len = m->m_len = cur_rx->bge_len;
2052 m->m_pkthdr.rcvif = ifp;
2053
2054 #if NBPFILTER > 0
2055 /*
2056 * Handle BPF listeners. Let the BPF user see the packet.
2057 */
2058 if (ifp->if_bpf)
2059 bpf_mtap(ifp->if_bpf, m);
2060 #endif
2061
2062 if (sc->bge_asicrev != BGE_ASICREV_BCM5700_B0) {
2063 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2064 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
2065 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2066 #if 0 /* XXX appears to be broken */
2067 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) {
2068 m->m_pkthdr.csum_data =
2069 cur_rx->bge_tcp_udp_csum;
2070 m->m_pkthdr.csum_flags |=
2071 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_DATA);
2072 }
2073 #endif
2074 }
2075
2076 /*
2077 * If we received a packet with a vlan tag, pass it
2078 * to vlan_input() instead of ether_input().
2079 */
2080 if (have_tag) {
2081 struct mbuf *n;
2082
2083 n = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
2084 if (n != NULL) {
2085 *mtod(n, int *) = vlan_tag;
2086 n->m_len = sizeof(int);
2087 have_tag = vlan_tag = 0;
2088 } else {
2089 printf("%s: no mbuf for tag\n", ifp->if_xname);
2090 m_freem(m);
2091 have_tag = vlan_tag = 0;
2092 continue;
2093 }
2094 }
2095 (*ifp->if_input)(ifp, m);
2096 }
2097
2098 CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
2099 if (stdcnt)
2100 CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
2101 if (jumbocnt)
2102 CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
2103 }
2104
2105 void
2106 bge_txeof(sc)
2107 struct bge_softc *sc;
2108 {
2109 struct bge_tx_bd *cur_tx = NULL;
2110 struct ifnet *ifp;
2111 struct txdmamap_pool_entry *dma;
2112 bus_addr_t offset, toff;
2113 bus_size_t tlen;
2114 int tosync;
2115 struct mbuf *m;
2116
2117 ifp = &sc->ethercom.ec_if;
2118
2119 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2120 offsetof(struct bge_ring_data, bge_status_block),
2121 sizeof (struct bge_status_block),
2122 BUS_DMASYNC_POSTREAD);
2123
2124 offset = offsetof(struct bge_ring_data, bge_tx_ring);
2125 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
2126 sc->bge_tx_saved_considx;
2127
2128 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
2129
2130 if (tosync < 0) {
2131 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
2132 sizeof (struct bge_tx_bd);
2133 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2134 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2135 tosync = -tosync;
2136 }
2137
2138 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
2139 offset, tosync * sizeof (struct bge_tx_bd),
2140 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2141
2142 /*
2143 * Go through our tx ring and free mbufs for those
2144 * frames that have been sent.
2145 */
2146 while (sc->bge_tx_saved_considx !=
2147 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
2148 u_int32_t idx = 0;
2149
2150 idx = sc->bge_tx_saved_considx;
2151 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
2152 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
2153 ifp->if_opackets++;
2154 m = sc->bge_cdata.bge_tx_chain[idx];
2155 if (m != NULL) {
2156 sc->bge_cdata.bge_tx_chain[idx] = NULL;
2157 dma = sc->txdma[idx];
2158 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
2159 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2160 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
2161 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
2162 sc->txdma[idx] = NULL;
2163
2164 m_freem(m);
2165 }
2166 sc->bge_txcnt--;
2167 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
2168 ifp->if_timer = 0;
2169 }
2170
2171 if (cur_tx != NULL)
2172 ifp->if_flags &= ~IFF_OACTIVE;
2173 }
2174
2175 int
2176 bge_intr(xsc)
2177 void *xsc;
2178 {
2179 struct bge_softc *sc;
2180 struct ifnet *ifp;
2181
2182 sc = xsc;
2183 ifp = &sc->ethercom.ec_if;
2184
2185 #ifdef notdef
2186 /* Avoid this for now -- checking this register is expensive. */
2187 /* Make sure this is really our interrupt. */
2188 if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE))
2189 return (0);
2190 #endif
2191 /* Ack interrupt and stop others from occuring. */
2192 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2193
2194 /*
2195 * Process link state changes.
2196 * Grrr. The link status word in the status block does
2197 * not work correctly on the BCM5700 rev AX and BX chips,
2198 * according to all avaibable information. Hence, we have
2199 * to enable MII interrupts in order to properly obtain
2200 * async link changes. Unfortunately, this also means that
2201 * we have to read the MAC status register to detect link
2202 * changes, thereby adding an additional register access to
2203 * the interrupt handler.
2204 */
2205
2206 if (BGE_IS_5700_Ax_Bx(sc->bge_asicrev)) {
2207 u_int32_t status;
2208
2209 status = CSR_READ_4(sc, BGE_MAC_STS);
2210 if (status & BGE_MACSTAT_MI_INTERRUPT) {
2211 sc->bge_link = 0;
2212 callout_stop(&sc->bge_timeout);
2213 bge_tick(sc);
2214 /* Clear the interrupt */
2215 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
2216 BGE_EVTENB_MI_INTERRUPT);
2217 bge_miibus_readreg(&sc->bge_dev, 1, BRGPHY_MII_ISR);
2218 bge_miibus_writereg(&sc->bge_dev, 1, BRGPHY_MII_IMR,
2219 BRGPHY_INTRS);
2220 }
2221 } else {
2222 if (sc->bge_rdata->bge_status_block.bge_status &
2223 BGE_STATFLAG_LINKSTATE_CHANGED) {
2224 sc->bge_link = 0;
2225 callout_stop(&sc->bge_timeout);
2226 bge_tick(sc);
2227 /* Clear the interrupt */
2228 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
2229 BGE_MACSTAT_CFG_CHANGED);
2230 }
2231 }
2232
2233 if (ifp->if_flags & IFF_RUNNING) {
2234 /* Check RX return ring producer/consumer */
2235 bge_rxeof(sc);
2236
2237 /* Check TX ring producer/consumer */
2238 bge_txeof(sc);
2239 }
2240
2241 bge_handle_events(sc);
2242
2243 /* Re-enable interrupts. */
2244 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2245
2246 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
2247 bge_start(ifp);
2248
2249 return (1);
2250 }
2251
2252 void
2253 bge_tick(xsc)
2254 void *xsc;
2255 {
2256 struct bge_softc *sc = xsc;
2257 struct mii_data *mii = &sc->bge_mii;
2258 struct ifmedia *ifm = NULL;
2259 struct ifnet *ifp = &sc->ethercom.ec_if;
2260 int s;
2261
2262 s = splnet();
2263
2264 bge_stats_update(sc);
2265 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2266 if (sc->bge_link) {
2267 splx(s);
2268 return;
2269 }
2270
2271 if (sc->bge_tbi) {
2272 ifm = &sc->bge_ifmedia;
2273 if (CSR_READ_4(sc, BGE_MAC_STS) &
2274 BGE_MACSTAT_TBI_PCS_SYNCHED) {
2275 sc->bge_link++;
2276 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
2277 printf("%s: gigabit link up\n", sc->bge_dev.dv_xname);
2278 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2279 bge_start(ifp);
2280 }
2281 splx(s);
2282 return;
2283 }
2284
2285 mii_tick(mii);
2286
2287 if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE &&
2288 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2289 sc->bge_link++;
2290 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
2291 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
2292 printf("%s: gigabit link up\n", sc->bge_dev.dv_xname);
2293 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2294 bge_start(ifp);
2295 }
2296
2297 splx(s);
2298 }
2299
2300 void
2301 bge_stats_update(sc)
2302 struct bge_softc *sc;
2303 {
2304 struct ifnet *ifp = &sc->ethercom.ec_if;
2305 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
2306
2307 #define READ_STAT(sc, stats, stat) \
2308 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
2309
2310 ifp->if_collisions +=
2311 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
2312 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
2313 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
2314 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
2315 ifp->if_collisions;
2316
2317 #undef READ_STAT
2318
2319 #ifdef notdef
2320 ifp->if_collisions +=
2321 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
2322 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
2323 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
2324 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
2325 ifp->if_collisions;
2326 #endif
2327 }
2328
2329 /*
2330 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
2331 * pointers to descriptors.
2332 */
2333 int
2334 bge_encap(sc, m_head, txidx)
2335 struct bge_softc *sc;
2336 struct mbuf *m_head;
2337 u_int32_t *txidx;
2338 {
2339 struct bge_tx_bd *f = NULL;
2340 u_int32_t frag, cur, cnt = 0;
2341 u_int16_t csum_flags = 0;
2342 struct txdmamap_pool_entry *dma;
2343 bus_dmamap_t dmamap;
2344 int i = 0;
2345 struct mbuf *n;
2346
2347 cur = frag = *txidx;
2348
2349 if (m_head->m_pkthdr.csum_flags) {
2350 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
2351 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
2352 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
2353 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
2354 }
2355
2356 dma = SLIST_FIRST(&sc->txdma_list);
2357 if (dma == NULL)
2358 return ENOBUFS;
2359 dmamap = dma->dmamap;
2360
2361 /*
2362 * Start packing the mbufs in this chain into
2363 * the fragment pointers. Stop when we run out
2364 * of fragments or hit the end of the mbuf chain.
2365 */
2366 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
2367 BUS_DMA_NOWAIT))
2368 return(ENOBUFS);
2369
2370 n = sc->ethercom.ec_nvlans ?
2371 m_aux_find(m_head, AF_LINK, ETHERTYPE_VLAN) : NULL;
2372
2373 for (i = 0; i < dmamap->dm_nsegs; i++) {
2374 f = &sc->bge_rdata->bge_tx_ring[frag];
2375 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
2376 break;
2377 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
2378 f->bge_len = dmamap->dm_segs[i].ds_len;
2379 f->bge_flags = csum_flags;
2380
2381 if (n != NULL) {
2382 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
2383 f->bge_vlan_tag = *mtod(n, int *);
2384 } else {
2385 f->bge_vlan_tag = 0;
2386 }
2387 /*
2388 * Sanity check: avoid coming within 16 descriptors
2389 * of the end of the ring.
2390 */
2391 if ((BGE_TX_RING_CNT - (sc->bge_txcnt + cnt)) < 16)
2392 return(ENOBUFS);
2393 cur = frag;
2394 BGE_INC(frag, BGE_TX_RING_CNT);
2395 cnt++;
2396 }
2397
2398 if (i < dmamap->dm_nsegs)
2399 return ENOBUFS;
2400
2401 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
2402 BUS_DMASYNC_PREWRITE);
2403
2404 if (frag == sc->bge_tx_saved_considx)
2405 return(ENOBUFS);
2406
2407 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
2408 sc->bge_cdata.bge_tx_chain[cur] = m_head;
2409 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
2410 sc->txdma[cur] = dma;
2411 sc->bge_txcnt += cnt;
2412
2413 *txidx = frag;
2414
2415 return(0);
2416 }
2417
2418 /*
2419 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2420 * to the mbuf data regions directly in the transmit descriptors.
2421 */
2422 void
2423 bge_start(ifp)
2424 struct ifnet *ifp;
2425 {
2426 struct bge_softc *sc;
2427 struct mbuf *m_head = NULL;
2428 u_int32_t prodidx = 0;
2429 int pkts = 0;
2430
2431 sc = ifp->if_softc;
2432
2433 if (!sc->bge_link && ifp->if_snd.ifq_len < 10)
2434 return;
2435
2436 prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO);
2437
2438 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
2439 IFQ_POLL(&ifp->if_snd, m_head);
2440 if (m_head == NULL)
2441 break;
2442
2443 #if 0
2444 /*
2445 * XXX
2446 * safety overkill. If this is a fragmented packet chain
2447 * with delayed TCP/UDP checksums, then only encapsulate
2448 * it if we have enough descriptors to handle the entire
2449 * chain at once.
2450 * (paranoia -- may not actually be needed)
2451 */
2452 if (m_head->m_flags & M_FIRSTFRAG &&
2453 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
2454 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
2455 m_head->m_pkthdr.csum_data + 16) {
2456 ifp->if_flags |= IFF_OACTIVE;
2457 break;
2458 }
2459 }
2460 #endif
2461
2462 /*
2463 * Pack the data into the transmit ring. If we
2464 * don't have room, set the OACTIVE flag and wait
2465 * for the NIC to drain the ring.
2466 */
2467 if (bge_encap(sc, m_head, &prodidx)) {
2468 ifp->if_flags |= IFF_OACTIVE;
2469 break;
2470 }
2471
2472 /* now we are committed to transmit the packet */
2473 IFQ_DEQUEUE(&ifp->if_snd, m_head);
2474 pkts++;
2475
2476 #if NBPFILTER > 0
2477 /*
2478 * If there's a BPF listener, bounce a copy of this frame
2479 * to him.
2480 */
2481 if (ifp->if_bpf)
2482 bpf_mtap(ifp->if_bpf, m_head);
2483 #endif
2484 }
2485 if (pkts == 0)
2486 return;
2487
2488 /* Transmit */
2489 CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
2490
2491 /*
2492 * Set a timeout in case the chip goes out to lunch.
2493 */
2494 ifp->if_timer = 5;
2495 }
2496
2497 /*
2498 * If we have a BCM5400 or BCM5401 PHY, we need to properly
2499 * program its internal DSP. Failing to do this can result in
2500 * massive packet loss at 1Gb speeds.
2501 */
2502 void
2503 bge_phy_hack(sc)
2504 struct bge_softc *sc;
2505 {
2506 struct bge_bcom_hack bhack[] = {
2507 { BRGPHY_MII_AUXCTL, 0x4C20 },
2508 { BRGPHY_MII_DSP_ADDR_REG, 0x0012 },
2509 { BRGPHY_MII_DSP_RW_PORT, 0x1804 },
2510 { BRGPHY_MII_DSP_ADDR_REG, 0x0013 },
2511 { BRGPHY_MII_DSP_RW_PORT, 0x1204 },
2512 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 },
2513 { BRGPHY_MII_DSP_RW_PORT, 0x0132 },
2514 { BRGPHY_MII_DSP_ADDR_REG, 0x8006 },
2515 { BRGPHY_MII_DSP_RW_PORT, 0x0232 },
2516 { BRGPHY_MII_DSP_ADDR_REG, 0x201F },
2517 { BRGPHY_MII_DSP_RW_PORT, 0x0A20 },
2518 { 0, 0 } };
2519 u_int16_t vid, did;
2520 int i;
2521
2522 vid = bge_miibus_readreg(&sc->bge_dev, 1, MII_PHYIDR1);
2523 did = bge_miibus_readreg(&sc->bge_dev, 1, MII_PHYIDR2);
2524
2525 if (MII_OUI(vid, did) == MII_OUI_BROADCOM &&
2526 (MII_MODEL(did) == MII_MODEL_BROADCOM_BCM5400 ||
2527 MII_MODEL(did) == MII_MODEL_BROADCOM_BCM5401)) {
2528 i = 0;
2529 while (bhack[i].reg) {
2530 bge_miibus_writereg(&sc->bge_dev, 1, bhack[i].reg,
2531 bhack[i].val);
2532 i++;
2533 }
2534 }
2535 }
2536
2537 int
2538 bge_init(ifp)
2539 struct ifnet *ifp;
2540 {
2541 struct bge_softc *sc = ifp->if_softc;
2542 u_int16_t *m;
2543 int s, error;
2544
2545 s = splnet();
2546
2547 ifp = &sc->ethercom.ec_if;
2548
2549 /* Cancel pending I/O and flush buffers. */
2550 bge_stop(sc);
2551 bge_reset(sc);
2552 bge_chipinit(sc);
2553
2554 /*
2555 * Init the various state machines, ring
2556 * control blocks and firmware.
2557 */
2558 error = bge_blockinit(sc);
2559 if (error != 0) {
2560 printf("%s: initialization error %d\n", sc->bge_dev.dv_xname,
2561 error);
2562 splx(s);
2563 return error;
2564 }
2565
2566 ifp = &sc->ethercom.ec_if;
2567
2568 /* Specify MTU. */
2569 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
2570 ETHER_HDR_LEN + ETHER_CRC_LEN);
2571
2572 /* Load our MAC address. */
2573 m = (u_int16_t *)&(LLADDR(ifp->if_sadl)[0]);
2574 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
2575 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
2576
2577 /* Enable or disable promiscuous mode as needed. */
2578 if (ifp->if_flags & IFF_PROMISC) {
2579 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2580 } else {
2581 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
2582 }
2583
2584 /* Program multicast filter. */
2585 bge_setmulti(sc);
2586
2587 /* Init RX ring. */
2588 bge_init_rx_ring_std(sc);
2589
2590 /* Init jumbo RX ring. */
2591 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2592 bge_init_rx_ring_jumbo(sc);
2593
2594 /* Init our RX return ring index */
2595 sc->bge_rx_saved_considx = 0;
2596
2597 /* Init TX ring. */
2598 bge_init_tx_ring(sc);
2599
2600 /* Turn on transmitter */
2601 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
2602
2603 /* Turn on receiver */
2604 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2605
2606 /* Tell firmware we're alive. */
2607 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2608
2609 /* Enable host interrupts. */
2610 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
2611 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2612 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0);
2613
2614 bge_ifmedia_upd(ifp);
2615
2616 ifp->if_flags |= IFF_RUNNING;
2617 ifp->if_flags &= ~IFF_OACTIVE;
2618
2619 splx(s);
2620
2621 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
2622
2623 return 0;
2624 }
2625
2626 /*
2627 * Set media options.
2628 */
2629 int
2630 bge_ifmedia_upd(ifp)
2631 struct ifnet *ifp;
2632 {
2633 struct bge_softc *sc = ifp->if_softc;
2634 struct mii_data *mii = &sc->bge_mii;
2635 struct ifmedia *ifm = &sc->bge_ifmedia;
2636
2637 /* If this is a 1000baseX NIC, enable the TBI port. */
2638 if (sc->bge_tbi) {
2639 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2640 return(EINVAL);
2641 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2642 case IFM_AUTO:
2643 break;
2644 case IFM_1000_SX:
2645 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
2646 BGE_CLRBIT(sc, BGE_MAC_MODE,
2647 BGE_MACMODE_HALF_DUPLEX);
2648 } else {
2649 BGE_SETBIT(sc, BGE_MAC_MODE,
2650 BGE_MACMODE_HALF_DUPLEX);
2651 }
2652 break;
2653 default:
2654 return(EINVAL);
2655 }
2656 return(0);
2657 }
2658
2659 sc->bge_link = 0;
2660 if (mii->mii_instance) {
2661 struct mii_softc *miisc;
2662 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
2663 miisc = LIST_NEXT(miisc, mii_list))
2664 mii_phy_reset(miisc);
2665 }
2666 bge_phy_hack(sc);
2667 mii_mediachg(mii);
2668
2669 return(0);
2670 }
2671
2672 /*
2673 * Report current media status.
2674 */
2675 void
2676 bge_ifmedia_sts(ifp, ifmr)
2677 struct ifnet *ifp;
2678 struct ifmediareq *ifmr;
2679 {
2680 struct bge_softc *sc = ifp->if_softc;
2681 struct mii_data *mii = &sc->bge_mii;
2682
2683 if (sc->bge_tbi) {
2684 ifmr->ifm_status = IFM_AVALID;
2685 ifmr->ifm_active = IFM_ETHER;
2686 if (CSR_READ_4(sc, BGE_MAC_STS) &
2687 BGE_MACSTAT_TBI_PCS_SYNCHED)
2688 ifmr->ifm_status |= IFM_ACTIVE;
2689 ifmr->ifm_active |= IFM_1000_SX;
2690 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
2691 ifmr->ifm_active |= IFM_HDX;
2692 else
2693 ifmr->ifm_active |= IFM_FDX;
2694 return;
2695 }
2696
2697 mii_pollstat(mii);
2698 ifmr->ifm_active = mii->mii_media_active;
2699 ifmr->ifm_status = mii->mii_media_status;
2700 }
2701
2702 int
2703 bge_ioctl(ifp, command, data)
2704 struct ifnet *ifp;
2705 u_long command;
2706 caddr_t data;
2707 {
2708 struct bge_softc *sc = ifp->if_softc;
2709 struct ifreq *ifr = (struct ifreq *) data;
2710 int s, error = 0;
2711 struct mii_data *mii;
2712
2713 s = splnet();
2714
2715 switch(command) {
2716 case SIOCSIFFLAGS:
2717 if (ifp->if_flags & IFF_UP) {
2718 /*
2719 * If only the state of the PROMISC flag changed,
2720 * then just use the 'set promisc mode' command
2721 * instead of reinitializing the entire NIC. Doing
2722 * a full re-init means reloading the firmware and
2723 * waiting for it to start up, which may take a
2724 * second or two.
2725 */
2726 if (ifp->if_flags & IFF_RUNNING &&
2727 ifp->if_flags & IFF_PROMISC &&
2728 !(sc->bge_if_flags & IFF_PROMISC)) {
2729 BGE_SETBIT(sc, BGE_RX_MODE,
2730 BGE_RXMODE_RX_PROMISC);
2731 } else if (ifp->if_flags & IFF_RUNNING &&
2732 !(ifp->if_flags & IFF_PROMISC) &&
2733 sc->bge_if_flags & IFF_PROMISC) {
2734 BGE_CLRBIT(sc, BGE_RX_MODE,
2735 BGE_RXMODE_RX_PROMISC);
2736 } else
2737 bge_init(ifp);
2738 } else {
2739 if (ifp->if_flags & IFF_RUNNING) {
2740 bge_stop(sc);
2741 }
2742 }
2743 sc->bge_if_flags = ifp->if_flags;
2744 error = 0;
2745 break;
2746 case SIOCSIFMEDIA:
2747 case SIOCGIFMEDIA:
2748 if (sc->bge_tbi) {
2749 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
2750 command);
2751 } else {
2752 mii = &sc->bge_mii;
2753 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
2754 command);
2755 }
2756 error = 0;
2757 break;
2758 default:
2759 error = ether_ioctl(ifp, command, data);
2760 if (error == ENETRESET) {
2761 bge_setmulti(sc);
2762 error = 0;
2763 }
2764 break;
2765 }
2766
2767 splx(s);
2768
2769 return(error);
2770 }
2771
2772 void
2773 bge_watchdog(ifp)
2774 struct ifnet *ifp;
2775 {
2776 struct bge_softc *sc;
2777
2778 sc = ifp->if_softc;
2779
2780 printf("%s: watchdog timeout -- resetting\n", sc->bge_dev.dv_xname);
2781
2782 ifp->if_flags &= ~IFF_RUNNING;
2783 bge_init(ifp);
2784
2785 ifp->if_oerrors++;
2786 }
2787
2788 /*
2789 * Stop the adapter and free any mbufs allocated to the
2790 * RX and TX lists.
2791 */
2792 void
2793 bge_stop(sc)
2794 struct bge_softc *sc;
2795 {
2796 struct ifnet *ifp = &sc->ethercom.ec_if;
2797
2798 callout_stop(&sc->bge_timeout);
2799
2800 /*
2801 * Disable all of the receiver blocks
2802 */
2803 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
2804 BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
2805 BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
2806 BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
2807 BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
2808 BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
2809 BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
2810
2811 /*
2812 * Disable all of the transmit blocks
2813 */
2814 BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
2815 BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
2816 BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
2817 BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
2818 BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
2819 BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
2820 BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
2821
2822 /*
2823 * Shut down all of the memory managers and related
2824 * state machines.
2825 */
2826 BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
2827 BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
2828 BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
2829 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
2830 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
2831 BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
2832 BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
2833
2834 /* Disable host interrupts. */
2835 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
2836 CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1);
2837
2838 /*
2839 * Tell firmware we're shutting down.
2840 */
2841 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
2842
2843 /* Free the RX lists. */
2844 bge_free_rx_ring_std(sc);
2845
2846 /* Free jumbo RX list. */
2847 bge_free_rx_ring_jumbo(sc);
2848
2849 /* Free TX buffers. */
2850 bge_free_tx_ring(sc);
2851
2852 /*
2853 * Isolate/power down the PHY.
2854 */
2855 if (!sc->bge_tbi)
2856 mii_down(&sc->bge_mii);
2857
2858 sc->bge_link = 0;
2859
2860 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
2861
2862 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2863 }
2864
2865 /*
2866 * Stop all chip I/O so that the kernel's probe routines don't
2867 * get confused by errant DMAs when rebooting.
2868 */
2869 void
2870 bge_shutdown(xsc)
2871 void *xsc;
2872 {
2873 struct bge_softc *sc = (struct bge_softc *)xsc;
2874
2875 bge_stop(sc);
2876 bge_reset(sc);
2877 }
2878