if_bge.c revision 1.151 1 /* $NetBSD: if_bge.c,v 1.151 2008/08/25 08:15:05 cegger Exp $ */
2
3 /*
4 * Copyright (c) 2001 Wind River Systems
5 * Copyright (c) 1997, 1998, 1999, 2001
6 * Bill Paul <wpaul (at) windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: if_bge.c,v 1.13 2002/04/04 06:01:31 wpaul Exp $
36 */
37
38 /*
39 * Broadcom BCM570x family gigabit ethernet driver for NetBSD.
40 *
41 * NetBSD version by:
42 *
43 * Frank van der Linden <fvdl (at) wasabisystems.com>
44 * Jason Thorpe <thorpej (at) wasabisystems.com>
45 * Jonathan Stone <jonathan (at) dsg.stanford.edu>
46 *
47 * Originally written for FreeBSD by Bill Paul <wpaul (at) windriver.com>
48 * Senior Engineer, Wind River Systems
49 */
50
51 /*
52 * The Broadcom BCM5700 is based on technology originally developed by
53 * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet
54 * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has
55 * two on-board MIPS R4000 CPUs and can have as much as 16MB of external
56 * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo
57 * frames, highly configurable RX filtering, and 16 RX and TX queues
58 * (which, along with RX filter rules, can be used for QOS applications).
59 * Other features, such as TCP segmentation, may be available as part
60 * of value-added firmware updates. Unlike the Tigon I and Tigon II,
61 * firmware images can be stored in hardware and need not be compiled
62 * into the driver.
63 *
64 * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will
65 * function in a 32-bit/64-bit 33/66MHz bus, or a 64-bit/133MHz bus.
66 *
67 * The BCM5701 is a single-chip solution incorporating both the BCM5700
68 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701
69 * does not support external SSRAM.
70 *
71 * Broadcom also produces a variation of the BCM5700 under the "Altima"
72 * brand name, which is functionally similar but lacks PCI-X support.
73 *
74 * Without external SSRAM, you can only have at most 4 TX rings,
75 * and the use of the mini RX ring is disabled. This seems to imply
76 * that these features are simply not available on the BCM5701. As a
77 * result, this driver does not implement any support for the mini RX
78 * ring.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_bge.c,v 1.151 2008/08/25 08:15:05 cegger Exp $");
83
84 #include "bpfilter.h"
85 #include "vlan.h"
86 #include "rnd.h"
87
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/sockio.h>
92 #include <sys/mbuf.h>
93 #include <sys/malloc.h>
94 #include <sys/kernel.h>
95 #include <sys/device.h>
96 #include <sys/socket.h>
97 #include <sys/sysctl.h>
98
99 #include <net/if.h>
100 #include <net/if_dl.h>
101 #include <net/if_media.h>
102 #include <net/if_ether.h>
103
104 #if NRND > 0
105 #include <sys/rnd.h>
106 #endif
107
108 #ifdef INET
109 #include <netinet/in.h>
110 #include <netinet/in_systm.h>
111 #include <netinet/in_var.h>
112 #include <netinet/ip.h>
113 #endif
114
115 /* Headers for TCP Segmentation Offload (TSO) */
116 #include <netinet/in_systm.h> /* n_time for <netinet/ip.h>... */
117 #include <netinet/in.h> /* ip_{src,dst}, for <netinet/ip.h> */
118 #include <netinet/ip.h> /* for struct ip */
119 #include <netinet/tcp.h> /* for struct tcphdr */
120
121
122 #if NBPFILTER > 0
123 #include <net/bpf.h>
124 #endif
125
126 #include <dev/pci/pcireg.h>
127 #include <dev/pci/pcivar.h>
128 #include <dev/pci/pcidevs.h>
129
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 #include <dev/mii/miidevs.h>
133 #include <dev/mii/brgphyreg.h>
134
135 #include <dev/pci/if_bgereg.h>
136
137 #include <uvm/uvm_extern.h>
138
139 #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */
140
141
142 /*
143 * Tunable thresholds for rx-side bge interrupt mitigation.
144 */
145
146 /*
147 * The pairs of values below were obtained from empirical measurement
148 * on bcm5700 rev B2; they ar designed to give roughly 1 receive
149 * interrupt for every N packets received, where N is, approximately,
150 * the second value (rx_max_bds) in each pair. The values are chosen
151 * such that moving from one pair to the succeeding pair was observed
152 * to roughly halve interrupt rate under sustained input packet load.
153 * The values were empirically chosen to avoid overflowing internal
154 * limits on the bcm5700: inreasing rx_ticks much beyond 600
155 * results in internal wrapping and higher interrupt rates.
156 * The limit of 46 frames was chosen to match NFS workloads.
157 *
158 * These values also work well on bcm5701, bcm5704C, and (less
159 * tested) bcm5703. On other chipsets, (including the Altima chip
160 * family), the larger values may overflow internal chip limits,
161 * leading to increasing interrupt rates rather than lower interrupt
162 * rates.
163 *
164 * Applications using heavy interrupt mitigation (interrupting every
165 * 32 or 46 frames) in both directions may need to increase the TCP
166 * windowsize to above 131072 bytes (e.g., to 199608 bytes) to sustain
167 * full link bandwidth, due to ACKs and window updates lingering
168 * in the RX queue during the 30-to-40-frame interrupt-mitigation window.
169 */
170 static const struct bge_load_rx_thresh {
171 int rx_ticks;
172 int rx_max_bds; }
173 bge_rx_threshes[] = {
174 { 32, 2 },
175 { 50, 4 },
176 { 100, 8 },
177 { 192, 16 },
178 { 416, 32 },
179 { 598, 46 }
180 };
181 #define NBGE_RX_THRESH (sizeof(bge_rx_threshes) / sizeof(bge_rx_threshes[0]))
182
183 /* XXX patchable; should be sysctl'able */
184 static int bge_auto_thresh = 1;
185 static int bge_rx_thresh_lvl;
186
187 static int bge_rxthresh_nodenum;
188
189 typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, u_int8_t[]);
190
191 static int bge_probe(device_t, cfdata_t, void *);
192 static void bge_attach(device_t, device_t, void *);
193 static void bge_release_resources(struct bge_softc *);
194 static void bge_txeof(struct bge_softc *);
195 static void bge_rxeof(struct bge_softc *);
196
197 static int bge_get_eaddr_mem(struct bge_softc *, u_int8_t[]);
198 static int bge_get_eaddr_nvram(struct bge_softc *, u_int8_t[]);
199 static int bge_get_eaddr_eeprom(struct bge_softc *, u_int8_t[]);
200 static int bge_get_eaddr(struct bge_softc *, u_int8_t[]);
201
202 static void bge_tick(void *);
203 static void bge_stats_update(struct bge_softc *);
204 static int bge_encap(struct bge_softc *, struct mbuf *, u_int32_t *);
205
206 static int bge_intr(void *);
207 static void bge_start(struct ifnet *);
208 static int bge_ioctl(struct ifnet *, u_long, void *);
209 static int bge_init(struct ifnet *);
210 static void bge_stop(struct ifnet *, int);
211 static void bge_watchdog(struct ifnet *);
212 static int bge_ifmedia_upd(struct ifnet *);
213 static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
214
215 static void bge_setmulti(struct bge_softc *);
216
217 static void bge_handle_events(struct bge_softc *);
218 static int bge_alloc_jumbo_mem(struct bge_softc *);
219 #if 0 /* XXX */
220 static void bge_free_jumbo_mem(struct bge_softc *);
221 #endif
222 static void *bge_jalloc(struct bge_softc *);
223 static void bge_jfree(struct mbuf *, void *, size_t, void *);
224 static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *,
225 bus_dmamap_t);
226 static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *);
227 static int bge_init_rx_ring_std(struct bge_softc *);
228 static void bge_free_rx_ring_std(struct bge_softc *);
229 static int bge_init_rx_ring_jumbo(struct bge_softc *);
230 static void bge_free_rx_ring_jumbo(struct bge_softc *);
231 static void bge_free_tx_ring(struct bge_softc *);
232 static int bge_init_tx_ring(struct bge_softc *);
233
234 static int bge_chipinit(struct bge_softc *);
235 static int bge_blockinit(struct bge_softc *);
236 static int bge_setpowerstate(struct bge_softc *, int);
237
238 static void bge_reset(struct bge_softc *);
239
240 #define BGE_DEBUG
241 #ifdef BGE_DEBUG
242 #define DPRINTF(x) if (bgedebug) printf x
243 #define DPRINTFN(n,x) if (bgedebug >= (n)) printf x
244 #define BGE_TSO_PRINTF(x) do { if (bge_tso_debug) printf x ;} while (0)
245 int bgedebug = 0;
246 int bge_tso_debug = 0;
247 #else
248 #define DPRINTF(x)
249 #define DPRINTFN(n,x)
250 #define BGE_TSO_PRINTF(x)
251 #endif
252
253 #ifdef BGE_EVENT_COUNTERS
254 #define BGE_EVCNT_INCR(ev) (ev).ev_count++
255 #define BGE_EVCNT_ADD(ev, val) (ev).ev_count += (val)
256 #define BGE_EVCNT_UPD(ev, val) (ev).ev_count = (val)
257 #else
258 #define BGE_EVCNT_INCR(ev) /* nothing */
259 #define BGE_EVCNT_ADD(ev, val) /* nothing */
260 #define BGE_EVCNT_UPD(ev, val) /* nothing */
261 #endif
262
263 /* Various chip quirks. */
264 #define BGE_QUIRK_LINK_STATE_BROKEN 0x00000001
265 #define BGE_QUIRK_CSUM_BROKEN 0x00000002
266 #define BGE_QUIRK_ONLY_PHY_1 0x00000004
267 #define BGE_QUIRK_5700_SMALLDMA 0x00000008
268 #define BGE_QUIRK_5700_PCIX_REG_BUG 0x00000010
269 #define BGE_QUIRK_PRODUCER_BUG 0x00000020
270 #define BGE_QUIRK_PCIX_DMA_ALIGN_BUG 0x00000040
271 #define BGE_QUIRK_5705_CORE 0x00000080
272 #define BGE_QUIRK_FEWER_MBUFS 0x00000100
273
274 /*
275 * XXX: how to handle variants based on 5750 and derivatives:
276 * 5750 5751, 5721, possibly 5714, 5752, and 5708?, which
277 * in general behave like a 5705, except with additional quirks.
278 * This driver's current handling of the 5721 is wrong;
279 * how we map ASIC revision to "quirks" needs more thought.
280 * (defined here until the thought is done).
281 */
282 #define BGE_IS_5714_FAMILY(sc) \
283 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714_A0 || \
284 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780 || \
285 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5714 )
286
287 #define BGE_IS_5750_OR_BEYOND(sc) \
288 (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 || \
289 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 || \
290 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 || \
291 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 || \
292 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 || \
293 BGE_IS_5714_FAMILY(sc) )
294
295 #define BGE_IS_5705_OR_BEYOND(sc) \
296 ( ((sc)->bge_quirks & BGE_QUIRK_5705_CORE) || \
297 BGE_IS_5750_OR_BEYOND(sc) )
298
299
300 /* following bugs are common to bcm5700 rev B, all flavours */
301 #define BGE_QUIRK_5700_COMMON \
302 (BGE_QUIRK_5700_SMALLDMA|BGE_QUIRK_PRODUCER_BUG)
303
304 CFATTACH_DECL_NEW(bge, sizeof(struct bge_softc),
305 bge_probe, bge_attach, NULL, NULL);
306
307 static u_int32_t
308 bge_readmem_ind(struct bge_softc *sc, int off)
309 {
310 pcireg_t val;
311
312 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
313 val = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA);
314 return val;
315 }
316
317 static void
318 bge_writemem_ind(struct bge_softc *sc, int off, int val)
319 {
320 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, off);
321 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_DATA, val);
322 }
323
324 #ifdef notdef
325 static u_int32_t
326 bge_readreg_ind(struct bge_softc *sc, int off)
327 {
328 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
329 return(pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA));
330 }
331 #endif
332
333 static void
334 bge_writereg_ind(struct bge_softc *sc, int off, int val)
335 {
336 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_BASEADDR, off);
337 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_REG_DATA, val);
338 }
339
340 static void
341 bge_writemem_direct(struct bge_softc *sc, int off, int val)
342 {
343 CSR_WRITE_4(sc, off, val);
344 }
345
346 static void
347 bge_writembx(struct bge_softc *sc, int off, int val)
348 {
349 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
350 off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI;
351
352 CSR_WRITE_4(sc, off, val);
353 }
354
355 #ifdef notdef
356 static u_int8_t
357 bge_vpd_readbyte(struct bge_softc *sc, int addr)
358 {
359 int i;
360 u_int32_t val;
361
362 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_VPD_ADDR, addr);
363 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
364 DELAY(10);
365 if (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_VPD_ADDR) &
366 BGE_VPD_FLAG)
367 break;
368 }
369
370 if (i == BGE_TIMEOUT) {
371 aprint_error_dev(sc->bge_dev, "VPD read timed out\n");
372 return(0);
373 }
374
375 val = pci_conf_read(sc->sc_pc, sc->sca_pcitag, BGE_PCI_VPD_DATA);
376
377 return((val >> ((addr % 4) * 8)) & 0xFF);
378 }
379
380 static void
381 bge_vpd_read_res(struct bge_softc *sc, struct vpd_res *res, int addr)
382 {
383 int i;
384 u_int8_t *ptr;
385
386 ptr = (u_int8_t *)res;
387 for (i = 0; i < sizeof(struct vpd_res); i++)
388 ptr[i] = bge_vpd_readbyte(sc, i + addr);
389 }
390
391 static void
392 bge_vpd_read(struct bge_softc *sc)
393 {
394 int pos = 0, i;
395 struct vpd_res res;
396
397 if (sc->bge_vpd_prodname != NULL)
398 free(sc->bge_vpd_prodname, M_DEVBUF);
399 if (sc->bge_vpd_readonly != NULL)
400 free(sc->bge_vpd_readonly, M_DEVBUF);
401 sc->bge_vpd_prodname = NULL;
402 sc->bge_vpd_readonly = NULL;
403
404 bge_vpd_read_res(sc, &res, pos);
405
406 if (res.vr_id != VPD_RES_ID) {
407 aprint_error_dev("bad VPD resource id: expected %x got %x\n",
408 VPD_RES_ID, res.vr_id);
409 return;
410 }
411
412 pos += sizeof(res);
413 sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
414 if (sc->bge_vpd_prodname == NULL)
415 panic("bge_vpd_read");
416 for (i = 0; i < res.vr_len; i++)
417 sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos);
418 sc->bge_vpd_prodname[i] = '\0';
419 pos += i;
420
421 bge_vpd_read_res(sc, &res, pos);
422
423 if (res.vr_id != VPD_RES_READ) {
424 aprint_error_dev(sc->bge_dev,
425 "bad VPD resource id: expected %x got %x\n",
426 VPD_RES_READ, res.vr_id);
427 return;
428 }
429
430 pos += sizeof(res);
431 sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
432 if (sc->bge_vpd_readonly == NULL)
433 panic("bge_vpd_read");
434 for (i = 0; i < res.vr_len + 1; i++)
435 sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos);
436 }
437 #endif
438
439 static u_int8_t
440 bge_nvram_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
441 {
442 u_int32_t access, byte = 0;
443 int i;
444
445 /* Lock. */
446 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1);
447 for (i = 0; i < 8000; i++) {
448 if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1)
449 break;
450 DELAY(20);
451 }
452 if (i == 8000)
453 return (1);
454
455 /* Enable access. */
456 access = CSR_READ_4(sc, BGE_NVRAM_ACCESS);
457 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE);
458
459 CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc);
460 CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD);
461 for (i = 0; i < BGE_TIMEOUT * 10; i++) {
462 DELAY(10);
463 if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) {
464 DELAY(10);
465 break;
466 }
467 }
468
469 if (i == BGE_TIMEOUT * 10) {
470 aprint_error_dev(sc->bge_dev, "nvram read timed out\n");
471 return (1);
472 }
473
474 /* Get result. */
475 byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA);
476
477 *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF;
478
479 /* Disable access. */
480 CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access);
481
482 /* Unlock. */
483 CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1);
484 CSR_READ_4(sc, BGE_NVRAM_SWARB);
485
486 return (0);
487 }
488
489 /*
490 * Read a sequence of bytes from NVRAM.
491 */
492 static int
493 bge_read_nvram(struct bge_softc *sc, u_int8_t *dest, int off, int cnt)
494 {
495 int err = 0, i;
496 u_int8_t byte = 0;
497
498 if (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)
499 return (1);
500
501 for (i = 0; i < cnt; i++) {
502 err = bge_nvram_getbyte(sc, off + i, &byte);
503 if (err)
504 break;
505 *(dest + i) = byte;
506 }
507
508 return (err ? 1 : 0);
509 }
510
511
512 /*
513 * Read a byte of data stored in the EEPROM at address 'addr.' The
514 * BCM570x supports both the traditional bitbang interface and an
515 * auto access interface for reading the EEPROM. We use the auto
516 * access method.
517 */
518 static u_int8_t
519 bge_eeprom_getbyte(struct bge_softc *sc, int addr, u_int8_t *dest)
520 {
521 int i;
522 u_int32_t byte = 0;
523
524 /*
525 * Enable use of auto EEPROM access so we can avoid
526 * having to use the bitbang method.
527 */
528 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM);
529
530 /* Reset the EEPROM, load the clock period. */
531 CSR_WRITE_4(sc, BGE_EE_ADDR,
532 BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL));
533 DELAY(20);
534
535 /* Issue the read EEPROM command. */
536 CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr);
537
538 /* Wait for completion */
539 for(i = 0; i < BGE_TIMEOUT * 10; i++) {
540 DELAY(10);
541 if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE)
542 break;
543 }
544
545 if (i == BGE_TIMEOUT) {
546 aprint_error_dev(sc->bge_dev, "eeprom read timed out\n");
547 return(0);
548 }
549
550 /* Get result. */
551 byte = CSR_READ_4(sc, BGE_EE_DATA);
552
553 *dest = (byte >> ((addr % 4) * 8)) & 0xFF;
554
555 return(0);
556 }
557
558 /*
559 * Read a sequence of bytes from the EEPROM.
560 */
561 static int
562 bge_read_eeprom(struct bge_softc *sc, void *destv, int off, int cnt)
563 {
564 int err = 0, i;
565 u_int8_t byte = 0;
566 char *dest = destv;
567
568 for (i = 0; i < cnt; i++) {
569 err = bge_eeprom_getbyte(sc, off + i, &byte);
570 if (err)
571 break;
572 *(dest + i) = byte;
573 }
574
575 return(err ? 1 : 0);
576 }
577
578 static int
579 bge_miibus_readreg(device_t dev, int phy, int reg)
580 {
581 struct bge_softc *sc = device_private(dev);
582 u_int32_t val;
583 u_int32_t saved_autopoll;
584 int i;
585
586 /*
587 * Several chips with builtin PHYs will incorrectly answer to
588 * other PHY instances than the builtin PHY at id 1.
589 */
590 if (phy != 1 && (sc->bge_quirks & BGE_QUIRK_ONLY_PHY_1))
591 return(0);
592
593 /* Reading with autopolling on may trigger PCI errors */
594 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
595 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
596 CSR_WRITE_4(sc, BGE_MI_MODE,
597 saved_autopoll &~ BGE_MIMODE_AUTOPOLL);
598 DELAY(40);
599 }
600
601 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY|
602 BGE_MIPHY(phy)|BGE_MIREG(reg));
603
604 for (i = 0; i < BGE_TIMEOUT; i++) {
605 val = CSR_READ_4(sc, BGE_MI_COMM);
606 if (!(val & BGE_MICOMM_BUSY))
607 break;
608 delay(10);
609 }
610
611 if (i == BGE_TIMEOUT) {
612 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
613 val = 0;
614 goto done;
615 }
616
617 val = CSR_READ_4(sc, BGE_MI_COMM);
618
619 done:
620 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
621 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
622 DELAY(40);
623 }
624
625 if (val & BGE_MICOMM_READFAIL)
626 return(0);
627
628 return(val & 0xFFFF);
629 }
630
631 static void
632 bge_miibus_writereg(device_t dev, int phy, int reg, int val)
633 {
634 struct bge_softc *sc = device_private(dev);
635 u_int32_t saved_autopoll;
636 int i;
637
638 if (phy!=1) {
639 return;
640 }
641
642 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906 &&
643 (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) {
644 return;
645 }
646
647 /* Touching the PHY while autopolling is on may trigger PCI errors */
648 saved_autopoll = CSR_READ_4(sc, BGE_MI_MODE);
649 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
650 delay(40);
651 CSR_WRITE_4(sc, BGE_MI_MODE,
652 saved_autopoll & (~BGE_MIMODE_AUTOPOLL));
653 delay(10); /* 40 usec is supposed to be adequate */
654 }
655
656 CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY|
657 BGE_MIPHY(phy)|BGE_MIREG(reg)|val);
658
659 for (i = 0; i < BGE_TIMEOUT; i++) {
660 delay(10);
661 if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) {
662 delay(5);
663 CSR_READ_4(sc, BGE_MI_COMM);
664 break;
665 }
666 }
667
668 if (saved_autopoll & BGE_MIMODE_AUTOPOLL) {
669 CSR_WRITE_4(sc, BGE_MI_MODE, saved_autopoll);
670 delay(40);
671 }
672
673 if (i == BGE_TIMEOUT)
674 aprint_error_dev(sc->bge_dev, "PHY read timed out\n");
675 }
676
677 static void
678 bge_miibus_statchg(device_t dev)
679 {
680 struct bge_softc *sc = device_private(dev);
681 struct mii_data *mii = &sc->bge_mii;
682
683 /*
684 * Get flow control negotiation result.
685 */
686 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
687 (mii->mii_media_active & IFM_ETH_FMASK) != sc->bge_flowflags) {
688 sc->bge_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
689 mii->mii_media_active &= ~IFM_ETH_FMASK;
690 }
691
692 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE);
693 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
694 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII);
695 } else {
696 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII);
697 }
698
699 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
700 BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
701 } else {
702 BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX);
703 }
704
705 /*
706 * 802.3x flow control
707 */
708 if (sc->bge_flowflags & IFM_ETH_RXPAUSE) {
709 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
710 } else {
711 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_FLOWCTL_ENABLE);
712 }
713 if (sc->bge_flowflags & IFM_ETH_TXPAUSE) {
714 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
715 } else {
716 BGE_CLRBIT(sc, BGE_TX_MODE, BGE_TXMODE_FLOWCTL_ENABLE);
717 }
718 }
719
720 /*
721 * Update rx threshold levels to values in a particular slot
722 * of the interrupt-mitigation table bge_rx_threshes.
723 */
724 static void
725 bge_set_thresh(struct ifnet *ifp, int lvl)
726 {
727 struct bge_softc *sc = ifp->if_softc;
728 int s;
729
730 /* For now, just save the new Rx-intr thresholds and record
731 * that a threshold update is pending. Updating the hardware
732 * registers here (even at splhigh()) is observed to
733 * occasionaly cause glitches where Rx-interrupts are not
734 * honoured for up to 10 seconds. jonathan (at) NetBSD.org, 2003-04-05
735 */
736 s = splnet();
737 sc->bge_rx_coal_ticks = bge_rx_threshes[lvl].rx_ticks;
738 sc->bge_rx_max_coal_bds = bge_rx_threshes[lvl].rx_max_bds;
739 sc->bge_pending_rxintr_change = 1;
740 splx(s);
741
742 return;
743 }
744
745
746 /*
747 * Update Rx thresholds of all bge devices
748 */
749 static void
750 bge_update_all_threshes(int lvl)
751 {
752 struct ifnet *ifp;
753 const char * const namebuf = "bge";
754 int namelen;
755
756 if (lvl < 0)
757 lvl = 0;
758 else if( lvl >= NBGE_RX_THRESH)
759 lvl = NBGE_RX_THRESH - 1;
760
761 namelen = strlen(namebuf);
762 /*
763 * Now search all the interfaces for this name/number
764 */
765 IFNET_FOREACH(ifp) {
766 if (strncmp(ifp->if_xname, namebuf, namelen) != 0)
767 continue;
768 /* We got a match: update if doing auto-threshold-tuning */
769 if (bge_auto_thresh)
770 bge_set_thresh(ifp, lvl);
771 }
772 }
773
774 /*
775 * Handle events that have triggered interrupts.
776 */
777 static void
778 bge_handle_events(struct bge_softc *sc)
779 {
780
781 return;
782 }
783
784 /*
785 * Memory management for jumbo frames.
786 */
787
788 static int
789 bge_alloc_jumbo_mem(struct bge_softc *sc)
790 {
791 char *ptr, *kva;
792 bus_dma_segment_t seg;
793 int i, rseg, state, error;
794 struct bge_jpool_entry *entry;
795
796 state = error = 0;
797
798 /* Grab a big chunk o' storage. */
799 if (bus_dmamem_alloc(sc->bge_dmatag, BGE_JMEM, PAGE_SIZE, 0,
800 &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
801 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
802 return ENOBUFS;
803 }
804
805 state = 1;
806 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg, BGE_JMEM, (void **)&kva,
807 BUS_DMA_NOWAIT)) {
808 aprint_error_dev(sc->bge_dev,
809 "can't map DMA buffers (%d bytes)\n", (int)BGE_JMEM);
810 error = ENOBUFS;
811 goto out;
812 }
813
814 state = 2;
815 if (bus_dmamap_create(sc->bge_dmatag, BGE_JMEM, 1, BGE_JMEM, 0,
816 BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_map)) {
817 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
818 error = ENOBUFS;
819 goto out;
820 }
821
822 state = 3;
823 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
824 kva, BGE_JMEM, NULL, BUS_DMA_NOWAIT)) {
825 aprint_error_dev(sc->bge_dev, "can't load DMA map\n");
826 error = ENOBUFS;
827 goto out;
828 }
829
830 state = 4;
831 sc->bge_cdata.bge_jumbo_buf = (void *)kva;
832 DPRINTFN(1,("bge_jumbo_buf = %p\n", sc->bge_cdata.bge_jumbo_buf));
833
834 SLIST_INIT(&sc->bge_jfree_listhead);
835 SLIST_INIT(&sc->bge_jinuse_listhead);
836
837 /*
838 * Now divide it up into 9K pieces and save the addresses
839 * in an array.
840 */
841 ptr = sc->bge_cdata.bge_jumbo_buf;
842 for (i = 0; i < BGE_JSLOTS; i++) {
843 sc->bge_cdata.bge_jslots[i] = ptr;
844 ptr += BGE_JLEN;
845 entry = malloc(sizeof(struct bge_jpool_entry),
846 M_DEVBUF, M_NOWAIT);
847 if (entry == NULL) {
848 aprint_error_dev(sc->bge_dev,
849 "no memory for jumbo buffer queue!\n");
850 error = ENOBUFS;
851 goto out;
852 }
853 entry->slot = i;
854 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead,
855 entry, jpool_entries);
856 }
857 out:
858 if (error != 0) {
859 switch (state) {
860 case 4:
861 bus_dmamap_unload(sc->bge_dmatag,
862 sc->bge_cdata.bge_rx_jumbo_map);
863 case 3:
864 bus_dmamap_destroy(sc->bge_dmatag,
865 sc->bge_cdata.bge_rx_jumbo_map);
866 case 2:
867 bus_dmamem_unmap(sc->bge_dmatag, kva, BGE_JMEM);
868 case 1:
869 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
870 break;
871 default:
872 break;
873 }
874 }
875
876 return error;
877 }
878
879 /*
880 * Allocate a jumbo buffer.
881 */
882 static void *
883 bge_jalloc(struct bge_softc *sc)
884 {
885 struct bge_jpool_entry *entry;
886
887 entry = SLIST_FIRST(&sc->bge_jfree_listhead);
888
889 if (entry == NULL) {
890 aprint_error_dev(sc->bge_dev, "no free jumbo buffers\n");
891 return(NULL);
892 }
893
894 SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries);
895 SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries);
896 return(sc->bge_cdata.bge_jslots[entry->slot]);
897 }
898
899 /*
900 * Release a jumbo buffer.
901 */
902 static void
903 bge_jfree(struct mbuf *m, void *buf, size_t size, void *arg)
904 {
905 struct bge_jpool_entry *entry;
906 struct bge_softc *sc;
907 int i, s;
908
909 /* Extract the softc struct pointer. */
910 sc = (struct bge_softc *)arg;
911
912 if (sc == NULL)
913 panic("bge_jfree: can't find softc pointer!");
914
915 /* calculate the slot this buffer belongs to */
916
917 i = ((char *)buf
918 - (char *)sc->bge_cdata.bge_jumbo_buf) / BGE_JLEN;
919
920 if ((i < 0) || (i >= BGE_JSLOTS))
921 panic("bge_jfree: asked to free buffer that we don't manage!");
922
923 s = splvm();
924 entry = SLIST_FIRST(&sc->bge_jinuse_listhead);
925 if (entry == NULL)
926 panic("bge_jfree: buffer not in use!");
927 entry->slot = i;
928 SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries);
929 SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries);
930
931 if (__predict_true(m != NULL))
932 pool_cache_put(mb_cache, m);
933 splx(s);
934 }
935
936
937 /*
938 * Intialize a standard receive ring descriptor.
939 */
940 static int
941 bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m, bus_dmamap_t dmamap)
942 {
943 struct mbuf *m_new = NULL;
944 struct bge_rx_bd *r;
945 int error;
946
947 if (dmamap == NULL) {
948 error = bus_dmamap_create(sc->bge_dmatag, MCLBYTES, 1,
949 MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap);
950 if (error != 0)
951 return error;
952 }
953
954 sc->bge_cdata.bge_rx_std_map[i] = dmamap;
955
956 if (m == NULL) {
957 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
958 if (m_new == NULL) {
959 return(ENOBUFS);
960 }
961
962 MCLGET(m_new, M_DONTWAIT);
963 if (!(m_new->m_flags & M_EXT)) {
964 m_freem(m_new);
965 return(ENOBUFS);
966 }
967 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
968
969 } else {
970 m_new = m;
971 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
972 m_new->m_data = m_new->m_ext.ext_buf;
973 }
974 if (!sc->bge_rx_alignment_bug)
975 m_adj(m_new, ETHER_ALIGN);
976 if (bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_new,
977 BUS_DMA_READ|BUS_DMA_NOWAIT))
978 return(ENOBUFS);
979 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
980 BUS_DMASYNC_PREREAD);
981
982 sc->bge_cdata.bge_rx_std_chain[i] = m_new;
983 r = &sc->bge_rdata->bge_rx_std_ring[i];
984 bge_set_hostaddr(&r->bge_addr,
985 dmamap->dm_segs[0].ds_addr);
986 r->bge_flags = BGE_RXBDFLAG_END;
987 r->bge_len = m_new->m_len;
988 r->bge_idx = i;
989
990 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
991 offsetof(struct bge_ring_data, bge_rx_std_ring) +
992 i * sizeof (struct bge_rx_bd),
993 sizeof (struct bge_rx_bd),
994 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
995
996 return(0);
997 }
998
999 /*
1000 * Initialize a jumbo receive ring descriptor. This allocates
1001 * a jumbo buffer from the pool managed internally by the driver.
1002 */
1003 static int
1004 bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
1005 {
1006 struct mbuf *m_new = NULL;
1007 struct bge_rx_bd *r;
1008 void *buf = NULL;
1009
1010 if (m == NULL) {
1011
1012 /* Allocate the mbuf. */
1013 MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1014 if (m_new == NULL) {
1015 return(ENOBUFS);
1016 }
1017
1018 /* Allocate the jumbo buffer */
1019 buf = bge_jalloc(sc);
1020 if (buf == NULL) {
1021 m_freem(m_new);
1022 aprint_error_dev(sc->bge_dev,
1023 "jumbo allocation failed -- packet dropped!\n");
1024 return(ENOBUFS);
1025 }
1026
1027 /* Attach the buffer to the mbuf. */
1028 m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN;
1029 MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, M_DEVBUF,
1030 bge_jfree, sc);
1031 m_new->m_flags |= M_EXT_RW;
1032 } else {
1033 m_new = m;
1034 buf = m_new->m_data = m_new->m_ext.ext_buf;
1035 m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN;
1036 }
1037 if (!sc->bge_rx_alignment_bug)
1038 m_adj(m_new, ETHER_ALIGN);
1039 bus_dmamap_sync(sc->bge_dmatag, sc->bge_cdata.bge_rx_jumbo_map,
1040 mtod(m_new, char *) - (char *)sc->bge_cdata.bge_jumbo_buf, BGE_JLEN,
1041 BUS_DMASYNC_PREREAD);
1042 /* Set up the descriptor. */
1043 r = &sc->bge_rdata->bge_rx_jumbo_ring[i];
1044 sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new;
1045 bge_set_hostaddr(&r->bge_addr, BGE_JUMBO_DMA_ADDR(sc, m_new));
1046 r->bge_flags = BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING;
1047 r->bge_len = m_new->m_len;
1048 r->bge_idx = i;
1049
1050 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1051 offsetof(struct bge_ring_data, bge_rx_jumbo_ring) +
1052 i * sizeof (struct bge_rx_bd),
1053 sizeof (struct bge_rx_bd),
1054 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1055
1056 return(0);
1057 }
1058
1059 /*
1060 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster,
1061 * that's 1MB or memory, which is a lot. For now, we fill only the first
1062 * 256 ring entries and hope that our CPU is fast enough to keep up with
1063 * the NIC.
1064 */
1065 static int
1066 bge_init_rx_ring_std(struct bge_softc *sc)
1067 {
1068 int i;
1069
1070 if (sc->bge_flags & BGE_RXRING_VALID)
1071 return 0;
1072
1073 for (i = 0; i < BGE_SSLOTS; i++) {
1074 if (bge_newbuf_std(sc, i, NULL, 0) == ENOBUFS)
1075 return(ENOBUFS);
1076 }
1077
1078 sc->bge_std = i - 1;
1079 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
1080
1081 sc->bge_flags |= BGE_RXRING_VALID;
1082
1083 return(0);
1084 }
1085
1086 static void
1087 bge_free_rx_ring_std(struct bge_softc *sc)
1088 {
1089 int i;
1090
1091 if (!(sc->bge_flags & BGE_RXRING_VALID))
1092 return;
1093
1094 for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
1095 if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
1096 m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
1097 sc->bge_cdata.bge_rx_std_chain[i] = NULL;
1098 bus_dmamap_destroy(sc->bge_dmatag,
1099 sc->bge_cdata.bge_rx_std_map[i]);
1100 }
1101 memset((char *)&sc->bge_rdata->bge_rx_std_ring[i], 0,
1102 sizeof(struct bge_rx_bd));
1103 }
1104
1105 sc->bge_flags &= ~BGE_RXRING_VALID;
1106 }
1107
1108 static int
1109 bge_init_rx_ring_jumbo(struct bge_softc *sc)
1110 {
1111 int i;
1112 volatile struct bge_rcb *rcb;
1113
1114 if (sc->bge_flags & BGE_JUMBO_RXRING_VALID)
1115 return 0;
1116
1117 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1118 if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
1119 return(ENOBUFS);
1120 };
1121
1122 sc->bge_jumbo = i - 1;
1123 sc->bge_flags |= BGE_JUMBO_RXRING_VALID;
1124
1125 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1126 rcb->bge_maxlen_flags = 0;
1127 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1128
1129 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
1130
1131 return(0);
1132 }
1133
1134 static void
1135 bge_free_rx_ring_jumbo(struct bge_softc *sc)
1136 {
1137 int i;
1138
1139 if (!(sc->bge_flags & BGE_JUMBO_RXRING_VALID))
1140 return;
1141
1142 for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
1143 if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) {
1144 m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]);
1145 sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL;
1146 }
1147 memset((char *)&sc->bge_rdata->bge_rx_jumbo_ring[i], 0,
1148 sizeof(struct bge_rx_bd));
1149 }
1150
1151 sc->bge_flags &= ~BGE_JUMBO_RXRING_VALID;
1152 }
1153
1154 static void
1155 bge_free_tx_ring(struct bge_softc *sc)
1156 {
1157 int i, freed;
1158 struct txdmamap_pool_entry *dma;
1159
1160 if (!(sc->bge_flags & BGE_TXRING_VALID))
1161 return;
1162
1163 freed = 0;
1164
1165 for (i = 0; i < BGE_TX_RING_CNT; i++) {
1166 if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
1167 freed++;
1168 m_freem(sc->bge_cdata.bge_tx_chain[i]);
1169 sc->bge_cdata.bge_tx_chain[i] = NULL;
1170 SLIST_INSERT_HEAD(&sc->txdma_list, sc->txdma[i],
1171 link);
1172 sc->txdma[i] = 0;
1173 }
1174 memset((char *)&sc->bge_rdata->bge_tx_ring[i], 0,
1175 sizeof(struct bge_tx_bd));
1176 }
1177
1178 while ((dma = SLIST_FIRST(&sc->txdma_list))) {
1179 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
1180 bus_dmamap_destroy(sc->bge_dmatag, dma->dmamap);
1181 free(dma, M_DEVBUF);
1182 }
1183
1184 sc->bge_flags &= ~BGE_TXRING_VALID;
1185 }
1186
1187 static int
1188 bge_init_tx_ring(struct bge_softc *sc)
1189 {
1190 int i;
1191 bus_dmamap_t dmamap;
1192 struct txdmamap_pool_entry *dma;
1193
1194 if (sc->bge_flags & BGE_TXRING_VALID)
1195 return 0;
1196
1197 sc->bge_txcnt = 0;
1198 sc->bge_tx_saved_considx = 0;
1199
1200 /* Initialize transmit producer index for host-memory send ring. */
1201 sc->bge_tx_prodidx = 0;
1202 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1203 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1204 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx);
1205
1206 /* NIC-memory send ring not used; initialize to zero. */
1207 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1208 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
1209 bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0);
1210
1211 SLIST_INIT(&sc->txdma_list);
1212 for (i = 0; i < BGE_RSLOTS; i++) {
1213 if (bus_dmamap_create(sc->bge_dmatag, BGE_TXDMA_MAX,
1214 BGE_NTXSEG, ETHER_MAX_LEN_JUMBO, 0, BUS_DMA_NOWAIT,
1215 &dmamap))
1216 return(ENOBUFS);
1217 if (dmamap == NULL)
1218 panic("dmamap NULL in bge_init_tx_ring");
1219 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT);
1220 if (dma == NULL) {
1221 aprint_error_dev(sc->bge_dev,
1222 "can't alloc txdmamap_pool_entry\n");
1223 bus_dmamap_destroy(sc->bge_dmatag, dmamap);
1224 return (ENOMEM);
1225 }
1226 dma->dmamap = dmamap;
1227 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
1228 }
1229
1230 sc->bge_flags |= BGE_TXRING_VALID;
1231
1232 return(0);
1233 }
1234
1235 static void
1236 bge_setmulti(struct bge_softc *sc)
1237 {
1238 struct ethercom *ac = &sc->ethercom;
1239 struct ifnet *ifp = &ac->ec_if;
1240 struct ether_multi *enm;
1241 struct ether_multistep step;
1242 u_int32_t hashes[4] = { 0, 0, 0, 0 };
1243 u_int32_t h;
1244 int i;
1245
1246 if (ifp->if_flags & IFF_PROMISC)
1247 goto allmulti;
1248
1249 /* Now program new ones. */
1250 ETHER_FIRST_MULTI(step, ac, enm);
1251 while (enm != NULL) {
1252 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1253 /*
1254 * We must listen to a range of multicast addresses.
1255 * For now, just accept all multicasts, rather than
1256 * trying to set only those filter bits needed to match
1257 * the range. (At this time, the only use of address
1258 * ranges is for IP multicast routing, for which the
1259 * range is big enough to require all bits set.)
1260 */
1261 goto allmulti;
1262 }
1263
1264 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN);
1265
1266 /* Just want the 7 least-significant bits. */
1267 h &= 0x7f;
1268
1269 hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
1270 ETHER_NEXT_MULTI(step, enm);
1271 }
1272
1273 ifp->if_flags &= ~IFF_ALLMULTI;
1274 goto setit;
1275
1276 allmulti:
1277 ifp->if_flags |= IFF_ALLMULTI;
1278 hashes[0] = hashes[1] = hashes[2] = hashes[3] = 0xffffffff;
1279
1280 setit:
1281 for (i = 0; i < 4; i++)
1282 CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
1283 }
1284
1285 const int bge_swapbits[] = {
1286 0,
1287 BGE_MODECTL_BYTESWAP_DATA,
1288 BGE_MODECTL_WORDSWAP_DATA,
1289 BGE_MODECTL_BYTESWAP_NONFRAME,
1290 BGE_MODECTL_WORDSWAP_NONFRAME,
1291
1292 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA,
1293 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1294 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1295
1296 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME,
1297 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_WORDSWAP_NONFRAME,
1298
1299 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1300
1301 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1302 BGE_MODECTL_BYTESWAP_NONFRAME,
1303 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1304 BGE_MODECTL_WORDSWAP_NONFRAME,
1305 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1306 BGE_MODECTL_WORDSWAP_NONFRAME,
1307 BGE_MODECTL_WORDSWAP_DATA|BGE_MODECTL_BYTESWAP_NONFRAME|
1308 BGE_MODECTL_WORDSWAP_NONFRAME,
1309
1310 BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA|
1311 BGE_MODECTL_BYTESWAP_NONFRAME|BGE_MODECTL_WORDSWAP_NONFRAME,
1312 };
1313
1314 int bge_swapindex = 0;
1315
1316 /*
1317 * Do endian, PCI and DMA initialization. Also check the on-board ROM
1318 * self-test results.
1319 */
1320 static int
1321 bge_chipinit(struct bge_softc *sc)
1322 {
1323 u_int32_t cachesize;
1324 int i;
1325 u_int32_t dma_rw_ctl;
1326
1327
1328 /* Set endianness before we access any non-PCI registers. */
1329 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
1330 BGE_INIT);
1331
1332 /* Set power state to D0. */
1333 bge_setpowerstate(sc, 0);
1334
1335 /*
1336 * Check the 'ROM failed' bit on the RX CPU to see if
1337 * self-tests passed.
1338 */
1339 if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) {
1340 aprint_error_dev(sc->bge_dev,
1341 "RX CPU self-diagnostics failed!\n");
1342 return(ENODEV);
1343 }
1344
1345 /* Clear the MAC control register */
1346 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
1347
1348 /*
1349 * Clear the MAC statistics block in the NIC's
1350 * internal memory.
1351 */
1352 for (i = BGE_STATS_BLOCK;
1353 i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t))
1354 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1355
1356 for (i = BGE_STATUS_BLOCK;
1357 i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t))
1358 BGE_MEMWIN_WRITE(sc->sc_pc, sc->sc_pcitag, i, 0);
1359
1360 /* Set up the PCI DMA control register. */
1361 if (sc->bge_pcie) {
1362 u_int32_t device_ctl;
1363
1364 /* From FreeBSD */
1365 DPRINTFN(4, ("(%s: PCI-Express DMA setting)\n",
1366 device_xname(sc->bge_dev)));
1367 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1368 (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1369 (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1370
1371 /* jonathan: alternative from Linux driver */
1372 #define DMA_CTRL_WRITE_PCIE_H20MARK_128 0x00180000
1373 #define DMA_CTRL_WRITE_PCIE_H20MARK_256 0x00380000
1374
1375 dma_rw_ctl = 0x76000000; /* XXX XXX XXX */;
1376 device_ctl = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
1377 BGE_PCI_CONF_DEV_CTRL);
1378 aprint_debug_dev(sc->bge_dev, "pcie mode=0x%x\n", device_ctl);
1379
1380 if ((device_ctl & 0x00e0) && 0) {
1381 /*
1382 * XXX jonathan (at) NetBSD.org:
1383 * This clause is exactly what the Broadcom-supplied
1384 * Linux does; but given overall register programming
1385 * by if_bge(4), this larger DMA-write watermark
1386 * value causes bcm5721 chips to totally wedge.
1387 */
1388 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_256;
1389 } else {
1390 dma_rw_ctl |= BGE_PCIDMA_RWCTL_PCIE_WRITE_WATRMARK_128;
1391 }
1392 } else if (pci_conf_read(sc->sc_pc, sc->sc_pcitag,BGE_PCI_PCISTATE) &
1393 BGE_PCISTATE_PCI_BUSMODE) {
1394 /* Conventional PCI bus */
1395 DPRINTFN(4, ("(%s: PCI 2.2 DMA setting)\n",
1396 device_xname(sc->bge_dev)));
1397 dma_rw_ctl = (BGE_PCI_READ_CMD | BGE_PCI_WRITE_CMD |
1398 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1399 (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT));
1400 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1401 dma_rw_ctl |= 0x0F;
1402 }
1403 } else {
1404 DPRINTFN(4, ("(:%s: PCI-X DMA setting)\n",
1405 device_xname(sc->bge_dev)));
1406 /* PCI-X bus */
1407 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1408 (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1409 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) |
1410 (0x0F);
1411 /*
1412 * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround
1413 * for hardware bugs, which means we should also clear
1414 * the low-order MINDMA bits. In addition, the 5704
1415 * uses a different encoding of read/write watermarks.
1416 */
1417 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5704) {
1418 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD |
1419 /* should be 0x1f0000 */
1420 (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) |
1421 (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT);
1422 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1423 }
1424 else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5703) {
1425 dma_rw_ctl &= 0xfffffff0;
1426 dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1427 }
1428 else if (BGE_IS_5714_FAMILY(sc)) {
1429 dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD;
1430 dma_rw_ctl &= ~BGE_PCIDMARWCTL_ONEDMA_ATONCE; /* XXX */
1431 /* XXX magic values, Broadcom-supplied Linux driver */
1432 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5780)
1433 dma_rw_ctl |= (1 << 20) | (1 << 18) |
1434 BGE_PCIDMARWCTL_ONEDMA_ATONCE;
1435 else
1436 dma_rw_ctl |= (1<<20) | (1<<18) | (1 << 15);
1437 }
1438 }
1439
1440 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL, dma_rw_ctl);
1441
1442 /*
1443 * Set up general mode register.
1444 */
1445 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS|
1446 BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS|
1447 BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM);
1448
1449 /* Get cache line size. */
1450 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
1451
1452 /*
1453 * Avoid violating PCI spec on certain chip revs.
1454 */
1455 if (pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD) &
1456 PCIM_CMD_MWIEN) {
1457 switch(cachesize) {
1458 case 1:
1459 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1460 BGE_PCI_WRITE_BNDRY_16BYTES);
1461 break;
1462 case 2:
1463 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1464 BGE_PCI_WRITE_BNDRY_32BYTES);
1465 break;
1466 case 4:
1467 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1468 BGE_PCI_WRITE_BNDRY_64BYTES);
1469 break;
1470 case 8:
1471 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1472 BGE_PCI_WRITE_BNDRY_128BYTES);
1473 break;
1474 case 16:
1475 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1476 BGE_PCI_WRITE_BNDRY_256BYTES);
1477 break;
1478 case 32:
1479 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1480 BGE_PCI_WRITE_BNDRY_512BYTES);
1481 break;
1482 case 64:
1483 PCI_SETBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_DMA_RW_CTL,
1484 BGE_PCI_WRITE_BNDRY_1024BYTES);
1485 break;
1486 default:
1487 /* Disable PCI memory write and invalidate. */
1488 #if 0
1489 if (bootverbose)
1490 aprint_error_dev(sc->bge_dev,
1491 "cache line size %d not supported "
1492 "disabling PCI MWI\n",
1493 #endif
1494 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD,
1495 PCIM_CMD_MWIEN);
1496 break;
1497 }
1498 }
1499
1500 /*
1501 * Disable memory write invalidate. Apparently it is not supported
1502 * properly by these devices.
1503 */
1504 PCI_CLRBIT(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, PCIM_CMD_MWIEN);
1505
1506
1507 #ifdef __brokenalpha__
1508 /*
1509 * Must insure that we do not cross an 8K (bytes) boundary
1510 * for DMA reads. Our highest limit is 1K bytes. This is a
1511 * restriction on some ALPHA platforms with early revision
1512 * 21174 PCI chipsets, such as the AlphaPC 164lx
1513 */
1514 PCI_SETBIT(sc, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024, 4);
1515 #endif
1516
1517 /* Set the timer prescaler (always 66MHz) */
1518 CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/);
1519
1520 return(0);
1521 }
1522
1523 static int
1524 bge_blockinit(struct bge_softc *sc)
1525 {
1526 volatile struct bge_rcb *rcb;
1527 bus_size_t rcb_addr;
1528 int i;
1529 struct ifnet *ifp = &sc->ethercom.ec_if;
1530 bge_hostaddr taddr;
1531
1532 /*
1533 * Initialize the memory window pointer register so that
1534 * we can access the first 32K of internal NIC RAM. This will
1535 * allow us to set up the TX send ring RCBs and the RX return
1536 * ring RCBs, plus other things which live in NIC memory.
1537 */
1538
1539 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MEMWIN_BASEADDR, 0);
1540
1541 /* Configure mbuf memory pool */
1542 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1543 if (sc->bge_extram) {
1544 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1545 BGE_EXT_SSRAM);
1546 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1547 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1548 else
1549 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1550 } else {
1551 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR,
1552 BGE_BUFFPOOL_1);
1553 if ((sc->bge_quirks & BGE_QUIRK_FEWER_MBUFS) != 0)
1554 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000);
1555 else
1556 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000);
1557 }
1558
1559 /* Configure DMA resource pool */
1560 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR,
1561 BGE_DMA_DESCRIPTORS);
1562 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000);
1563 }
1564
1565 /* Configure mbuf pool watermarks */
1566 #ifdef ORIG_WPAUL_VALUES
1567 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 24);
1568 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 24);
1569 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 48);
1570 #else
1571 /* new broadcom docs strongly recommend these: */
1572 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1573 if (ifp->if_mtu > ETHER_MAX_LEN) {
1574 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50);
1575 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20);
1576 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1577 } else if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
1578 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1579 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04);
1580 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10);
1581 } else {
1582 /* Values from Linux driver... */
1583 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 304);
1584 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 152);
1585 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 380);
1586 }
1587 } else {
1588 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
1589 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10);
1590 CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60);
1591 }
1592 #endif
1593
1594 /* Configure DMA resource watermarks */
1595 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5);
1596 CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10);
1597
1598 /* Enable buffer manager */
1599 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1600 CSR_WRITE_4(sc, BGE_BMAN_MODE,
1601 BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN);
1602
1603 /* Poll for buffer manager start indication */
1604 for (i = 0; i < BGE_TIMEOUT; i++) {
1605 if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE)
1606 break;
1607 DELAY(10);
1608 }
1609
1610 if (i == BGE_TIMEOUT) {
1611 aprint_error_dev(sc->bge_dev,
1612 "buffer manager failed to start\n");
1613 return(ENXIO);
1614 }
1615 }
1616
1617 /* Enable flow-through queues */
1618 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
1619 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
1620
1621 /* Wait until queue initialization is complete */
1622 for (i = 0; i < BGE_TIMEOUT; i++) {
1623 if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0)
1624 break;
1625 DELAY(10);
1626 }
1627
1628 if (i == BGE_TIMEOUT) {
1629 aprint_error_dev(sc->bge_dev,
1630 "flow-through queue init failed\n");
1631 return(ENXIO);
1632 }
1633
1634 /* Initialize the standard RX ring control block */
1635 rcb = &sc->bge_rdata->bge_info.bge_std_rx_rcb;
1636 bge_set_hostaddr(&rcb->bge_hostaddr,
1637 BGE_RING_DMA_ADDR(sc, bge_rx_std_ring));
1638 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1639 rcb->bge_maxlen_flags =
1640 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0);
1641 } else {
1642 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0);
1643 }
1644 if (sc->bge_extram)
1645 rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS;
1646 else
1647 rcb->bge_nicaddr = BGE_STD_RX_RINGS;
1648 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi);
1649 CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo);
1650 CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags);
1651 CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr);
1652
1653 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1654 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT;
1655 } else {
1656 sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705;
1657 }
1658
1659 /*
1660 * Initialize the jumbo RX ring control block
1661 * We set the 'ring disabled' bit in the flags
1662 * field until we're actually ready to start
1663 * using this ring (i.e. once we set the MTU
1664 * high enough to require it).
1665 */
1666 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1667 rcb = &sc->bge_rdata->bge_info.bge_jumbo_rx_rcb;
1668 bge_set_hostaddr(&rcb->bge_hostaddr,
1669 BGE_RING_DMA_ADDR(sc, bge_rx_jumbo_ring));
1670 rcb->bge_maxlen_flags =
1671 BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN,
1672 BGE_RCB_FLAG_RING_DISABLED);
1673 if (sc->bge_extram)
1674 rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS;
1675 else
1676 rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS;
1677
1678 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI,
1679 rcb->bge_hostaddr.bge_addr_hi);
1680 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO,
1681 rcb->bge_hostaddr.bge_addr_lo);
1682 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS,
1683 rcb->bge_maxlen_flags);
1684 CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr);
1685
1686 /* Set up dummy disabled mini ring RCB */
1687 rcb = &sc->bge_rdata->bge_info.bge_mini_rx_rcb;
1688 rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0,
1689 BGE_RCB_FLAG_RING_DISABLED);
1690 CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS,
1691 rcb->bge_maxlen_flags);
1692
1693 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
1694 offsetof(struct bge_ring_data, bge_info),
1695 sizeof (struct bge_gib),
1696 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1697 }
1698
1699 /*
1700 * Set the BD ring replenish thresholds. The recommended
1701 * values are 1/8th the number of descriptors allocated to
1702 * each ring.
1703 */
1704 i = BGE_STD_RX_RING_CNT / 8;
1705
1706 /*
1707 * Use a value of 8 for the following chips to workaround HW errata.
1708 * Some of these chips have been added based on empirical
1709 * evidence (they don't work unless this is done).
1710 */
1711 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5750 ||
1712 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
1713 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1714 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787 ||
1715 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906)
1716 i = 8;
1717
1718 CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, i);
1719 CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8);
1720
1721 /*
1722 * Disable all unused send rings by setting the 'ring disabled'
1723 * bit in the flags field of all the TX send ring control blocks.
1724 * These are located in NIC memory.
1725 */
1726 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1727 for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) {
1728 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1729 BGE_RCB_MAXLEN_FLAGS(0,BGE_RCB_FLAG_RING_DISABLED));
1730 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1731 rcb_addr += sizeof(struct bge_rcb);
1732 }
1733
1734 /* Configure TX RCB 0 (we use only the first ring) */
1735 rcb_addr = BGE_MEMWIN_START + BGE_SEND_RING_RCB;
1736 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_tx_ring));
1737 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1738 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1739 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr,
1740 BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT));
1741 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1742 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1743 BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0));
1744 }
1745
1746 /* Disable all unused RX return rings */
1747 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1748 for (i = 0; i < BGE_RX_RINGS_MAX; i++) {
1749 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, 0);
1750 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, 0);
1751 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1752 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt,
1753 BGE_RCB_FLAG_RING_DISABLED));
1754 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0);
1755 bge_writembx(sc, BGE_MBX_RX_CONS0_LO +
1756 (i * (sizeof(u_int64_t))), 0);
1757 rcb_addr += sizeof(struct bge_rcb);
1758 }
1759
1760 /* Initialize RX ring indexes */
1761 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0);
1762 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0);
1763 bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0);
1764
1765 /*
1766 * Set up RX return ring 0
1767 * Note that the NIC address for RX return rings is 0x00000000.
1768 * The return rings live entirely within the host, so the
1769 * nicaddr field in the RCB isn't used.
1770 */
1771 rcb_addr = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB;
1772 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_rx_return_ring));
1773 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi);
1774 RCB_WRITE_4(sc, rcb_addr, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo);
1775 RCB_WRITE_4(sc, rcb_addr, bge_nicaddr, 0x00000000);
1776 RCB_WRITE_4(sc, rcb_addr, bge_maxlen_flags,
1777 BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0));
1778
1779 /* Set random backoff seed for TX */
1780 CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF,
1781 CLLADDR(ifp->if_sadl)[0] + CLLADDR(ifp->if_sadl)[1] +
1782 CLLADDR(ifp->if_sadl)[2] + CLLADDR(ifp->if_sadl)[3] +
1783 CLLADDR(ifp->if_sadl)[4] + CLLADDR(ifp->if_sadl)[5] +
1784 BGE_TX_BACKOFF_SEED_MASK);
1785
1786 /* Set inter-packet gap */
1787 CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620);
1788
1789 /*
1790 * Specify which ring to use for packets that don't match
1791 * any RX rules.
1792 */
1793 CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08);
1794
1795 /*
1796 * Configure number of RX lists. One interrupt distribution
1797 * list, sixteen active lists, one bad frames class.
1798 */
1799 CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181);
1800
1801 /* Inialize RX list placement stats mask. */
1802 CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF);
1803 CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1);
1804
1805 /* Disable host coalescing until we get it set up */
1806 CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000);
1807
1808 /* Poll to make sure it's shut down. */
1809 for (i = 0; i < BGE_TIMEOUT; i++) {
1810 if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE))
1811 break;
1812 DELAY(10);
1813 }
1814
1815 if (i == BGE_TIMEOUT) {
1816 aprint_error_dev(sc->bge_dev,
1817 "host coalescing engine failed to idle\n");
1818 return(ENXIO);
1819 }
1820
1821 /* Set up host coalescing defaults */
1822 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks);
1823 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks);
1824 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds);
1825 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds);
1826 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1827 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0);
1828 CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0);
1829 }
1830 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0);
1831 CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0);
1832
1833 /* Set up address of statistics block */
1834 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1835 bge_set_hostaddr(&taddr,
1836 BGE_RING_DMA_ADDR(sc, bge_info.bge_stats));
1837 CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks);
1838 CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK);
1839 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, taddr.bge_addr_hi);
1840 CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, taddr.bge_addr_lo);
1841 }
1842
1843 /* Set up address of status block */
1844 bge_set_hostaddr(&taddr, BGE_RING_DMA_ADDR(sc, bge_status_block));
1845 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK);
1846 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, taddr.bge_addr_hi);
1847 CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, taddr.bge_addr_lo);
1848 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx = 0;
1849 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx = 0;
1850
1851 /* Turn on host coalescing state machine */
1852 CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
1853
1854 /* Turn on RX BD completion state machine and enable attentions */
1855 CSR_WRITE_4(sc, BGE_RBDC_MODE,
1856 BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN);
1857
1858 /* Turn on RX list placement state machine */
1859 CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
1860
1861 /* Turn on RX list selector state machine. */
1862 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1863 CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
1864 }
1865
1866 /* Turn on DMA, clear stats */
1867 CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB|
1868 BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR|
1869 BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB|
1870 BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB|
1871 (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII));
1872
1873 /* Set misc. local control, enable interrupts on attentions */
1874 sc->bge_local_ctrl_reg = BGE_MLC_INTR_ONATTN | BGE_MLC_AUTO_EEPROM;
1875
1876 #ifdef notdef
1877 /* Assert GPIO pins for PHY reset */
1878 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0|
1879 BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2);
1880 BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0|
1881 BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2);
1882 #endif
1883
1884 #if defined(not_quite_yet)
1885 /* Linux driver enables enable gpio pin #1 on 5700s */
1886 if (sc->bge_chipid == BGE_CHIPID_BCM5700) {
1887 sc->bge_local_ctrl_reg |=
1888 (BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUTEN1);
1889 }
1890 #endif
1891 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
1892
1893 /* Turn on DMA completion state machine */
1894 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1895 CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
1896 }
1897
1898 /* Turn on write DMA state machine */
1899 {
1900 uint32_t bge_wdma_mode =
1901 BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS;
1902
1903 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
1904 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
1905 /* Enable host coalescing bug fix; see Linux tg3.c */
1906 bge_wdma_mode |= (1 << 29);
1907
1908 CSR_WRITE_4(sc, BGE_WDMA_MODE, bge_wdma_mode);
1909 }
1910
1911 /* Turn on read DMA state machine */
1912 {
1913 uint32_t dma_read_modebits;
1914
1915 dma_read_modebits =
1916 BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS;
1917
1918 if (sc->bge_pcie && 0) {
1919 dma_read_modebits |= BGE_RDMA_MODE_FIFO_LONG_BURST;
1920 } else if ((sc->bge_quirks & BGE_QUIRK_5705_CORE)) {
1921 dma_read_modebits |= BGE_RDMA_MODE_FIFO_SIZE_128;
1922 }
1923
1924 /* XXX broadcom-supplied linux driver; undocumented */
1925 if (BGE_IS_5750_OR_BEYOND(sc)) {
1926 /*
1927 * XXX: magic values.
1928 * From Broadcom-supplied Linux driver; apparently
1929 * required to workaround a DMA bug affecting TSO
1930 * on bcm575x/bcm5721?
1931 */
1932 dma_read_modebits |= (1 << 27);
1933 }
1934 CSR_WRITE_4(sc, BGE_RDMA_MODE, dma_read_modebits);
1935 }
1936
1937 /* Turn on RX data completion state machine */
1938 CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
1939
1940 /* Turn on RX BD initiator state machine */
1941 CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
1942
1943 /* Turn on RX data and RX BD initiator state machine */
1944 CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE);
1945
1946 /* Turn on Mbuf cluster free state machine */
1947 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
1948 CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
1949 }
1950
1951 /* Turn on send BD completion state machine */
1952 CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
1953
1954 /* Turn on send data completion state machine */
1955 CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
1956
1957 /* Turn on send data initiator state machine */
1958 if (BGE_IS_5750_OR_BEYOND(sc)) {
1959 /* XXX: magic value from Linux driver */
1960 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | 0x08);
1961 } else {
1962 CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
1963 }
1964
1965 /* Turn on send BD initiator state machine */
1966 CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
1967
1968 /* Turn on send BD selector state machine */
1969 CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
1970
1971 CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF);
1972 CSR_WRITE_4(sc, BGE_SDI_STATS_CTL,
1973 BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER);
1974
1975 /* ack/clear link change events */
1976 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
1977 BGE_MACSTAT_CFG_CHANGED);
1978 CSR_WRITE_4(sc, BGE_MI_STS, 0);
1979
1980 /* Enable PHY auto polling (for MII/GMII only) */
1981 if (sc->bge_tbi) {
1982 CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK);
1983 } else {
1984 BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16);
1985 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN)
1986 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
1987 BGE_EVTENB_MI_INTERRUPT);
1988 }
1989
1990 /* Enable link state change attentions. */
1991 BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED);
1992
1993 return(0);
1994 }
1995
1996 static const struct bge_revision {
1997 uint32_t br_chipid;
1998 uint32_t br_quirks;
1999 const char *br_name;
2000 } bge_revisions[] = {
2001 { BGE_CHIPID_BCM5700_A0,
2002 BGE_QUIRK_LINK_STATE_BROKEN,
2003 "BCM5700 A0" },
2004
2005 { BGE_CHIPID_BCM5700_A1,
2006 BGE_QUIRK_LINK_STATE_BROKEN,
2007 "BCM5700 A1" },
2008
2009 { BGE_CHIPID_BCM5700_B0,
2010 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_CSUM_BROKEN|BGE_QUIRK_5700_COMMON,
2011 "BCM5700 B0" },
2012
2013 { BGE_CHIPID_BCM5700_B1,
2014 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
2015 "BCM5700 B1" },
2016
2017 { BGE_CHIPID_BCM5700_B2,
2018 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
2019 "BCM5700 B2" },
2020
2021 { BGE_CHIPID_BCM5700_B3,
2022 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
2023 "BCM5700 B3" },
2024
2025 /* This is treated like a BCM5700 Bx */
2026 { BGE_CHIPID_BCM5700_ALTIMA,
2027 BGE_QUIRK_LINK_STATE_BROKEN|BGE_QUIRK_5700_COMMON,
2028 "BCM5700 Altima" },
2029
2030 { BGE_CHIPID_BCM5700_C0,
2031 0,
2032 "BCM5700 C0" },
2033
2034 { BGE_CHIPID_BCM5701_A0,
2035 0, /*XXX really, just not known */
2036 "BCM5701 A0" },
2037
2038 { BGE_CHIPID_BCM5701_B0,
2039 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
2040 "BCM5701 B0" },
2041
2042 { BGE_CHIPID_BCM5701_B2,
2043 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
2044 "BCM5701 B2" },
2045
2046 { BGE_CHIPID_BCM5701_B5,
2047 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
2048 "BCM5701 B5" },
2049
2050 { BGE_CHIPID_BCM5703_A0,
2051 0,
2052 "BCM5703 A0" },
2053
2054 { BGE_CHIPID_BCM5703_A1,
2055 0,
2056 "BCM5703 A1" },
2057
2058 { BGE_CHIPID_BCM5703_A2,
2059 BGE_QUIRK_ONLY_PHY_1,
2060 "BCM5703 A2" },
2061
2062 { BGE_CHIPID_BCM5703_A3,
2063 BGE_QUIRK_ONLY_PHY_1,
2064 "BCM5703 A3" },
2065
2066 { BGE_CHIPID_BCM5703_B0,
2067 BGE_QUIRK_ONLY_PHY_1,
2068 "BCM5703 B0" },
2069
2070 { BGE_CHIPID_BCM5704_A0,
2071 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
2072 "BCM5704 A0" },
2073
2074 { BGE_CHIPID_BCM5704_A1,
2075 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
2076 "BCM5704 A1" },
2077
2078 { BGE_CHIPID_BCM5704_A2,
2079 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
2080 "BCM5704 A2" },
2081
2082 { BGE_CHIPID_BCM5704_A3,
2083 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_FEWER_MBUFS,
2084 "BCM5704 A3" },
2085
2086 { BGE_CHIPID_BCM5705_A0,
2087 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2088 "BCM5705 A0" },
2089
2090 { BGE_CHIPID_BCM5705_A1,
2091 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2092 "BCM5705 A1" },
2093
2094 { BGE_CHIPID_BCM5705_A2,
2095 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2096 "BCM5705 A2" },
2097
2098 { BGE_CHIPID_BCM5705_A3,
2099 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2100 "BCM5705 A3" },
2101
2102 { BGE_CHIPID_BCM5750_A0,
2103 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2104 "BCM5750 A0" },
2105
2106 { BGE_CHIPID_BCM5750_A1,
2107 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2108 "BCM5750 A1" },
2109
2110 { BGE_CHIPID_BCM5751_A1,
2111 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2112 "BCM5751 A1" },
2113
2114 { BGE_CHIPID_BCM5752_A0,
2115 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2116 "BCM5752 A0" },
2117
2118 { BGE_CHIPID_BCM5752_A1,
2119 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2120 "BCM5752 A1" },
2121
2122 { BGE_CHIPID_BCM5752_A2,
2123 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2124 "BCM5752 A2" },
2125
2126 { BGE_CHIPID_BCM5755_A0,
2127 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2128 "BCM5755 A0" },
2129
2130 { BGE_CHIPID_BCM5755_A1,
2131 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2132 "BCM5755 A1" },
2133
2134 { BGE_CHIPID_BCM5755_A2,
2135 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2136 "BCM5755 A2" },
2137
2138 { BGE_CHIPID_BCM5755_C0,
2139 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2140 "BCM5755 C0" },
2141
2142 { BGE_CHIPID_BCM5787_A0,
2143 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2144 "BCM5754/5787 A0" },
2145
2146 { BGE_CHIPID_BCM5787_A1,
2147 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2148 "BCM5754/5787 A1" },
2149
2150 { BGE_CHIPID_BCM5787_A2,
2151 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2152 "BCM5754/5787 A2" },
2153
2154 { 0, 0, NULL }
2155 };
2156
2157 /*
2158 * Some defaults for major revisions, so that newer steppings
2159 * that we don't know about have a shot at working.
2160 */
2161 static const struct bge_revision bge_majorrevs[] = {
2162 { BGE_ASICREV_BCM5700,
2163 BGE_QUIRK_LINK_STATE_BROKEN,
2164 "unknown BCM5700" },
2165
2166 { BGE_ASICREV_BCM5701,
2167 BGE_QUIRK_PCIX_DMA_ALIGN_BUG,
2168 "unknown BCM5701" },
2169
2170 { BGE_ASICREV_BCM5703,
2171 0,
2172 "unknown BCM5703" },
2173
2174 { BGE_ASICREV_BCM5704,
2175 BGE_QUIRK_ONLY_PHY_1,
2176 "unknown BCM5704" },
2177
2178 { BGE_ASICREV_BCM5705,
2179 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2180 "unknown BCM5705" },
2181
2182 { BGE_ASICREV_BCM5750,
2183 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2184 "unknown BCM575x family" },
2185
2186 { BGE_ASICREV_BCM5714_A0,
2187 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2188 "unknown BCM5714" },
2189
2190 { BGE_ASICREV_BCM5714,
2191 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2192 "unknown BCM5714" },
2193
2194 { BGE_ASICREV_BCM5752,
2195 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2196 "unknown BCM5752 family" },
2197
2198 { BGE_ASICREV_BCM5755,
2199 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2200 "unknown BCM5755" },
2201
2202 { BGE_ASICREV_BCM5780,
2203 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2204 "unknown BCM5780" },
2205
2206 { BGE_ASICREV_BCM5787,
2207 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2208 "unknown BCM5787" },
2209
2210 { BGE_ASICREV_BCM5906,
2211 BGE_QUIRK_ONLY_PHY_1|BGE_QUIRK_5705_CORE,
2212 "unknown BCM5906" },
2213
2214 { 0,
2215 0,
2216 NULL }
2217 };
2218
2219
2220 static const struct bge_revision *
2221 bge_lookup_rev(uint32_t chipid)
2222 {
2223 const struct bge_revision *br;
2224
2225 for (br = bge_revisions; br->br_name != NULL; br++) {
2226 if (br->br_chipid == chipid)
2227 return (br);
2228 }
2229
2230 for (br = bge_majorrevs; br->br_name != NULL; br++) {
2231 if (br->br_chipid == BGE_ASICREV(chipid))
2232 return (br);
2233 }
2234
2235 return (NULL);
2236 }
2237
2238 static const struct bge_product {
2239 pci_vendor_id_t bp_vendor;
2240 pci_product_id_t bp_product;
2241 const char *bp_name;
2242 } bge_products[] = {
2243 /*
2244 * The BCM5700 documentation seems to indicate that the hardware
2245 * still has the Alteon vendor ID burned into it, though it
2246 * should always be overridden by the value in the EEPROM. We'll
2247 * check for it anyway.
2248 */
2249 { PCI_VENDOR_ALTEON,
2250 PCI_PRODUCT_ALTEON_BCM5700,
2251 "Broadcom BCM5700 Gigabit Ethernet",
2252 },
2253 { PCI_VENDOR_ALTEON,
2254 PCI_PRODUCT_ALTEON_BCM5701,
2255 "Broadcom BCM5701 Gigabit Ethernet",
2256 },
2257
2258 { PCI_VENDOR_ALTIMA,
2259 PCI_PRODUCT_ALTIMA_AC1000,
2260 "Altima AC1000 Gigabit Ethernet",
2261 },
2262 { PCI_VENDOR_ALTIMA,
2263 PCI_PRODUCT_ALTIMA_AC1001,
2264 "Altima AC1001 Gigabit Ethernet",
2265 },
2266 { PCI_VENDOR_ALTIMA,
2267 PCI_PRODUCT_ALTIMA_AC9100,
2268 "Altima AC9100 Gigabit Ethernet",
2269 },
2270
2271 { PCI_VENDOR_BROADCOM,
2272 PCI_PRODUCT_BROADCOM_BCM5700,
2273 "Broadcom BCM5700 Gigabit Ethernet",
2274 },
2275 { PCI_VENDOR_BROADCOM,
2276 PCI_PRODUCT_BROADCOM_BCM5701,
2277 "Broadcom BCM5701 Gigabit Ethernet",
2278 },
2279 { PCI_VENDOR_BROADCOM,
2280 PCI_PRODUCT_BROADCOM_BCM5702,
2281 "Broadcom BCM5702 Gigabit Ethernet",
2282 },
2283 { PCI_VENDOR_BROADCOM,
2284 PCI_PRODUCT_BROADCOM_BCM5702X,
2285 "Broadcom BCM5702X Gigabit Ethernet" },
2286
2287 { PCI_VENDOR_BROADCOM,
2288 PCI_PRODUCT_BROADCOM_BCM5703,
2289 "Broadcom BCM5703 Gigabit Ethernet",
2290 },
2291 { PCI_VENDOR_BROADCOM,
2292 PCI_PRODUCT_BROADCOM_BCM5703X,
2293 "Broadcom BCM5703X Gigabit Ethernet",
2294 },
2295 { PCI_VENDOR_BROADCOM,
2296 PCI_PRODUCT_BROADCOM_BCM5703_ALT,
2297 "Broadcom BCM5703 Gigabit Ethernet",
2298 },
2299
2300 { PCI_VENDOR_BROADCOM,
2301 PCI_PRODUCT_BROADCOM_BCM5704C,
2302 "Broadcom BCM5704C Dual Gigabit Ethernet",
2303 },
2304 { PCI_VENDOR_BROADCOM,
2305 PCI_PRODUCT_BROADCOM_BCM5704S,
2306 "Broadcom BCM5704S Dual Gigabit Ethernet",
2307 },
2308
2309 { PCI_VENDOR_BROADCOM,
2310 PCI_PRODUCT_BROADCOM_BCM5705,
2311 "Broadcom BCM5705 Gigabit Ethernet",
2312 },
2313 { PCI_VENDOR_BROADCOM,
2314 PCI_PRODUCT_BROADCOM_BCM5705K,
2315 "Broadcom BCM5705K Gigabit Ethernet",
2316 },
2317 { PCI_VENDOR_BROADCOM,
2318 PCI_PRODUCT_BROADCOM_BCM5705M,
2319 "Broadcom BCM5705M Gigabit Ethernet",
2320 },
2321 { PCI_VENDOR_BROADCOM,
2322 PCI_PRODUCT_BROADCOM_BCM5705M_ALT,
2323 "Broadcom BCM5705M Gigabit Ethernet",
2324 },
2325
2326 { PCI_VENDOR_BROADCOM,
2327 PCI_PRODUCT_BROADCOM_BCM5714,
2328 "Broadcom BCM5714/5715 Gigabit Ethernet",
2329 },
2330 { PCI_VENDOR_BROADCOM,
2331 PCI_PRODUCT_BROADCOM_BCM5715,
2332 "Broadcom BCM5714/5715 Gigabit Ethernet",
2333 },
2334 { PCI_VENDOR_BROADCOM,
2335 PCI_PRODUCT_BROADCOM_BCM5789,
2336 "Broadcom BCM5789 Gigabit Ethernet",
2337 },
2338
2339 { PCI_VENDOR_BROADCOM,
2340 PCI_PRODUCT_BROADCOM_BCM5721,
2341 "Broadcom BCM5721 Gigabit Ethernet",
2342 },
2343
2344 { PCI_VENDOR_BROADCOM,
2345 PCI_PRODUCT_BROADCOM_BCM5722,
2346 "Broadcom BCM5722 Gigabit Ethernet",
2347 },
2348
2349 { PCI_VENDOR_BROADCOM,
2350 PCI_PRODUCT_BROADCOM_BCM5750,
2351 "Broadcom BCM5750 Gigabit Ethernet",
2352 },
2353
2354 { PCI_VENDOR_BROADCOM,
2355 PCI_PRODUCT_BROADCOM_BCM5750M,
2356 "Broadcom BCM5750M Gigabit Ethernet",
2357 },
2358
2359 { PCI_VENDOR_BROADCOM,
2360 PCI_PRODUCT_BROADCOM_BCM5751,
2361 "Broadcom BCM5751 Gigabit Ethernet",
2362 },
2363
2364 { PCI_VENDOR_BROADCOM,
2365 PCI_PRODUCT_BROADCOM_BCM5751M,
2366 "Broadcom BCM5751M Gigabit Ethernet",
2367 },
2368
2369 { PCI_VENDOR_BROADCOM,
2370 PCI_PRODUCT_BROADCOM_BCM5752,
2371 "Broadcom BCM5752 Gigabit Ethernet",
2372 },
2373
2374 { PCI_VENDOR_BROADCOM,
2375 PCI_PRODUCT_BROADCOM_BCM5752M,
2376 "Broadcom BCM5752M Gigabit Ethernet",
2377 },
2378
2379 { PCI_VENDOR_BROADCOM,
2380 PCI_PRODUCT_BROADCOM_BCM5753,
2381 "Broadcom BCM5753 Gigabit Ethernet",
2382 },
2383
2384 { PCI_VENDOR_BROADCOM,
2385 PCI_PRODUCT_BROADCOM_BCM5753M,
2386 "Broadcom BCM5753M Gigabit Ethernet",
2387 },
2388
2389 { PCI_VENDOR_BROADCOM,
2390 PCI_PRODUCT_BROADCOM_BCM5754,
2391 "Broadcom BCM5754 Gigabit Ethernet",
2392 },
2393
2394 { PCI_VENDOR_BROADCOM,
2395 PCI_PRODUCT_BROADCOM_BCM5754M,
2396 "Broadcom BCM5754M Gigabit Ethernet",
2397 },
2398
2399 { PCI_VENDOR_BROADCOM,
2400 PCI_PRODUCT_BROADCOM_BCM5755,
2401 "Broadcom BCM5755 Gigabit Ethernet",
2402 },
2403
2404 { PCI_VENDOR_BROADCOM,
2405 PCI_PRODUCT_BROADCOM_BCM5755M,
2406 "Broadcom BCM5755M Gigabit Ethernet",
2407 },
2408
2409 { PCI_VENDOR_BROADCOM,
2410 PCI_PRODUCT_BROADCOM_BCM5780,
2411 "Broadcom BCM5780 Gigabit Ethernet",
2412 },
2413
2414 { PCI_VENDOR_BROADCOM,
2415 PCI_PRODUCT_BROADCOM_BCM5780S,
2416 "Broadcom BCM5780S Gigabit Ethernet",
2417 },
2418
2419 { PCI_VENDOR_BROADCOM,
2420 PCI_PRODUCT_BROADCOM_BCM5782,
2421 "Broadcom BCM5782 Gigabit Ethernet",
2422 },
2423
2424 { PCI_VENDOR_BROADCOM,
2425 PCI_PRODUCT_BROADCOM_BCM5786,
2426 "Broadcom BCM5786 Gigabit Ethernet",
2427 },
2428
2429 { PCI_VENDOR_BROADCOM,
2430 PCI_PRODUCT_BROADCOM_BCM5787,
2431 "Broadcom BCM5787 Gigabit Ethernet",
2432 },
2433
2434 { PCI_VENDOR_BROADCOM,
2435 PCI_PRODUCT_BROADCOM_BCM5787M,
2436 "Broadcom BCM5787M Gigabit Ethernet",
2437 },
2438
2439 { PCI_VENDOR_BROADCOM,
2440 PCI_PRODUCT_BROADCOM_BCM5788,
2441 "Broadcom BCM5788 Gigabit Ethernet",
2442 },
2443 { PCI_VENDOR_BROADCOM,
2444 PCI_PRODUCT_BROADCOM_BCM5789,
2445 "Broadcom BCM5789 Gigabit Ethernet",
2446 },
2447
2448 { PCI_VENDOR_BROADCOM,
2449 PCI_PRODUCT_BROADCOM_BCM5901,
2450 "Broadcom BCM5901 Fast Ethernet",
2451 },
2452 { PCI_VENDOR_BROADCOM,
2453 PCI_PRODUCT_BROADCOM_BCM5901A2,
2454 "Broadcom BCM5901A2 Fast Ethernet",
2455 },
2456
2457 { PCI_VENDOR_SCHNEIDERKOCH,
2458 PCI_PRODUCT_SCHNEIDERKOCH_SK_9DX1,
2459 "SysKonnect SK-9Dx1 Gigabit Ethernet",
2460 },
2461
2462 { PCI_VENDOR_3COM,
2463 PCI_PRODUCT_3COM_3C996,
2464 "3Com 3c996 Gigabit Ethernet",
2465 },
2466
2467 { PCI_VENDOR_BROADCOM,
2468 PCI_PRODUCT_BROADCOM_BCM5906,
2469 "Broadcom BCM5906 Fast Ethernet",
2470 },
2471
2472 { PCI_VENDOR_BROADCOM,
2473 PCI_PRODUCT_BROADCOM_BCM5906M,
2474 "Broadcom BCM5906M Fast Ethernet",
2475 },
2476
2477 { 0,
2478 0,
2479 NULL },
2480 };
2481
2482 static const struct bge_product *
2483 bge_lookup(const struct pci_attach_args *pa)
2484 {
2485 const struct bge_product *bp;
2486
2487 for (bp = bge_products; bp->bp_name != NULL; bp++) {
2488 if (PCI_VENDOR(pa->pa_id) == bp->bp_vendor &&
2489 PCI_PRODUCT(pa->pa_id) == bp->bp_product)
2490 return (bp);
2491 }
2492
2493 return (NULL);
2494 }
2495
2496 static int
2497 bge_setpowerstate(struct bge_softc *sc, int powerlevel)
2498 {
2499 #ifdef NOTYET
2500 u_int32_t pm_ctl = 0;
2501
2502 /* XXX FIXME: make sure indirect accesses enabled? */
2503 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_MISC_CTL, 4);
2504 pm_ctl |= BGE_PCIMISCCTL_INDIRECT_ACCESS;
2505 pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, pm_ctl, 4);
2506
2507 /* clear the PME_assert bit and power state bits, enable PME */
2508 pm_ctl = pci_conf_read(sc->bge_dev, BGE_PCI_PWRMGMT_CMD, 2);
2509 pm_ctl &= ~PCIM_PSTAT_DMASK;
2510 pm_ctl |= (1 << 8);
2511
2512 if (powerlevel == 0) {
2513 pm_ctl |= PCIM_PSTAT_D0;
2514 pci_write_config(sc->bge_dev, BGE_PCI_PWRMGMT_CMD,
2515 pm_ctl, 2);
2516 DELAY(10000);
2517 CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, sc->bge_local_ctrl_reg);
2518 DELAY(10000);
2519
2520 #ifdef NOTYET
2521 /* XXX FIXME: write 0x02 to phy aux_Ctrl reg */
2522 bge_miibus_writereg(sc->bge_dev, 1, 0x18, 0x02);
2523 #endif
2524 DELAY(40); DELAY(40); DELAY(40);
2525 DELAY(10000); /* above not quite adequate on 5700 */
2526 return 0;
2527 }
2528
2529
2530 /*
2531 * Entering ACPI power states D1-D3 is achieved by wiggling
2532 * GMII gpio pins. Example code assumes all hardware vendors
2533 * followed Broadom's sample pcb layout. Until we verify that
2534 * for all supported OEM cards, states D1-D3 are unsupported.
2535 */
2536 aprint_error_dev(sc->bge_dev,
2537 "power state %d unimplemented; check GPIO pins\n",
2538 powerlevel);
2539 #endif
2540 return EOPNOTSUPP;
2541 }
2542
2543
2544 /*
2545 * Probe for a Broadcom chip. Check the PCI vendor and device IDs
2546 * against our list and return its name if we find a match. Note
2547 * that since the Broadcom controller contains VPD support, we
2548 * can get the device name string from the controller itself instead
2549 * of the compiled-in string. This is a little slow, but it guarantees
2550 * we'll always announce the right product name.
2551 */
2552 static int
2553 bge_probe(device_t parent, cfdata_t match, void *aux)
2554 {
2555 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
2556
2557 if (bge_lookup(pa) != NULL)
2558 return (1);
2559
2560 return (0);
2561 }
2562
2563 static void
2564 bge_attach(device_t parent, device_t self, void *aux)
2565 {
2566 struct bge_softc *sc = device_private(self);
2567 struct pci_attach_args *pa = aux;
2568 const struct bge_product *bp;
2569 const struct bge_revision *br;
2570 pci_chipset_tag_t pc;
2571 pci_intr_handle_t ih;
2572 const char *intrstr = NULL;
2573 bus_dma_segment_t seg;
2574 int rseg;
2575 u_int32_t hwcfg = 0;
2576 u_int32_t command;
2577 struct ifnet *ifp;
2578 void * kva;
2579 u_char eaddr[ETHER_ADDR_LEN];
2580 pcireg_t memtype;
2581 bus_addr_t memaddr;
2582 bus_size_t memsize;
2583 u_int32_t pm_ctl;
2584
2585 bp = bge_lookup(pa);
2586 KASSERT(bp != NULL);
2587
2588 sc->sc_pc = pa->pa_pc;
2589 sc->sc_pcitag = pa->pa_tag;
2590 sc->bge_dev = self;
2591
2592 aprint_naive(": Ethernet controller\n");
2593 aprint_normal(": %s\n", bp->bp_name);
2594
2595 /*
2596 * Map control/status registers.
2597 */
2598 DPRINTFN(5, ("Map control/status regs\n"));
2599 pc = sc->sc_pc;
2600 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
2601 command |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
2602 pci_conf_write(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, command);
2603 command = pci_conf_read(pc, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
2604
2605 if (!(command & PCI_COMMAND_MEM_ENABLE)) {
2606 aprint_error_dev(sc->bge_dev,
2607 "failed to enable memory mapping!\n");
2608 return;
2609 }
2610
2611 DPRINTFN(5, ("pci_mem_find\n"));
2612 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_pcitag, BGE_PCI_BAR0);
2613 switch (memtype) {
2614 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
2615 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
2616 if (pci_mapreg_map(pa, BGE_PCI_BAR0,
2617 memtype, 0, &sc->bge_btag, &sc->bge_bhandle,
2618 &memaddr, &memsize) == 0)
2619 break;
2620 default:
2621 aprint_error_dev(sc->bge_dev, "can't find mem space\n");
2622 return;
2623 }
2624
2625 DPRINTFN(5, ("pci_intr_map\n"));
2626 if (pci_intr_map(pa, &ih)) {
2627 aprint_error_dev(sc->bge_dev, "couldn't map interrupt\n");
2628 return;
2629 }
2630
2631 DPRINTFN(5, ("pci_intr_string\n"));
2632 intrstr = pci_intr_string(pc, ih);
2633
2634 DPRINTFN(5, ("pci_intr_establish\n"));
2635 sc->bge_intrhand = pci_intr_establish(pc, ih, IPL_NET, bge_intr, sc);
2636
2637 if (sc->bge_intrhand == NULL) {
2638 aprint_error_dev(sc->bge_dev,
2639 "couldn't establish interrupt%s%s\n",
2640 intrstr ? " at " : "", intrstr ? intrstr : "");
2641 return;
2642 }
2643 aprint_normal_dev(sc->bge_dev, "interrupting at %s\n", intrstr);
2644
2645 /*
2646 * Kludge for 5700 Bx bug: a hardware bug (PCIX byte enable?)
2647 * can clobber the chip's PCI config-space power control registers,
2648 * leaving the card in D3 powersave state.
2649 * We do not have memory-mapped registers in this state,
2650 * so force device into D0 state before starting initialization.
2651 */
2652 pm_ctl = pci_conf_read(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD);
2653 pm_ctl &= ~(PCI_PWR_D0|PCI_PWR_D1|PCI_PWR_D2|PCI_PWR_D3);
2654 pm_ctl |= (1 << 8) | PCI_PWR_D0 ; /* D0 state */
2655 pci_conf_write(pc, sc->sc_pcitag, BGE_PCI_PWRMGMT_CMD, pm_ctl);
2656 DELAY(1000); /* 27 usec is allegedly sufficent */
2657
2658 /*
2659 * Save ASIC rev. Look up any quirks associated with this
2660 * ASIC.
2661 */
2662 sc->bge_chipid =
2663 pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL) &
2664 BGE_PCIMISCCTL_ASICREV;
2665
2666 /*
2667 * Detect PCI-Express devices
2668 * XXX: guessed from Linux/FreeBSD; no documentation
2669 */
2670 if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PCIEXPRESS,
2671 NULL, NULL) != 0)
2672 sc->bge_pcie = 1;
2673 else
2674 sc->bge_pcie = 0;
2675
2676 /* Try to reset the chip. */
2677 DPRINTFN(5, ("bge_reset\n"));
2678 bge_reset(sc);
2679
2680 if (bge_chipinit(sc)) {
2681 aprint_error_dev(sc->bge_dev, "chip initialization failed\n");
2682 bge_release_resources(sc);
2683 return;
2684 }
2685
2686 /*
2687 * Get station address from the EEPROM.
2688 */
2689 if (bge_get_eaddr(sc, eaddr)) {
2690 aprint_error_dev(sc->bge_dev,
2691 "failed to reade station address\n");
2692 bge_release_resources(sc);
2693 return;
2694 }
2695
2696 br = bge_lookup_rev(sc->bge_chipid);
2697
2698 if (br == NULL) {
2699 aprint_normal_dev(sc->bge_dev, "unknown ASIC (0x%04x)",
2700 sc->bge_chipid >> 16);
2701 sc->bge_quirks = 0;
2702 } else {
2703 aprint_normal_dev(sc->bge_dev, "ASIC %s (0x%04x)",
2704 br->br_name, sc->bge_chipid >> 16);
2705 sc->bge_quirks |= br->br_quirks;
2706 }
2707 aprint_normal(", Ethernet address %s\n", ether_sprintf(eaddr));
2708
2709 /* Allocate the general information block and ring buffers. */
2710 if (pci_dma64_available(pa))
2711 sc->bge_dmatag = pa->pa_dmat64;
2712 else
2713 sc->bge_dmatag = pa->pa_dmat;
2714 DPRINTFN(5, ("bus_dmamem_alloc\n"));
2715 if (bus_dmamem_alloc(sc->bge_dmatag, sizeof(struct bge_ring_data),
2716 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
2717 aprint_error_dev(sc->bge_dev, "can't alloc rx buffers\n");
2718 return;
2719 }
2720 DPRINTFN(5, ("bus_dmamem_map\n"));
2721 if (bus_dmamem_map(sc->bge_dmatag, &seg, rseg,
2722 sizeof(struct bge_ring_data), &kva,
2723 BUS_DMA_NOWAIT)) {
2724 aprint_error_dev(sc->bge_dev,
2725 "can't map DMA buffers (%zu bytes)\n",
2726 sizeof(struct bge_ring_data));
2727 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2728 return;
2729 }
2730 DPRINTFN(5, ("bus_dmamem_create\n"));
2731 if (bus_dmamap_create(sc->bge_dmatag, sizeof(struct bge_ring_data), 1,
2732 sizeof(struct bge_ring_data), 0,
2733 BUS_DMA_NOWAIT, &sc->bge_ring_map)) {
2734 aprint_error_dev(sc->bge_dev, "can't create DMA map\n");
2735 bus_dmamem_unmap(sc->bge_dmatag, kva,
2736 sizeof(struct bge_ring_data));
2737 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2738 return;
2739 }
2740 DPRINTFN(5, ("bus_dmamem_load\n"));
2741 if (bus_dmamap_load(sc->bge_dmatag, sc->bge_ring_map, kva,
2742 sizeof(struct bge_ring_data), NULL,
2743 BUS_DMA_NOWAIT)) {
2744 bus_dmamap_destroy(sc->bge_dmatag, sc->bge_ring_map);
2745 bus_dmamem_unmap(sc->bge_dmatag, kva,
2746 sizeof(struct bge_ring_data));
2747 bus_dmamem_free(sc->bge_dmatag, &seg, rseg);
2748 return;
2749 }
2750
2751 DPRINTFN(5, ("bzero\n"));
2752 sc->bge_rdata = (struct bge_ring_data *)kva;
2753
2754 memset(sc->bge_rdata, 0, sizeof(struct bge_ring_data));
2755
2756 /* Try to allocate memory for jumbo buffers. */
2757 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
2758 if (bge_alloc_jumbo_mem(sc)) {
2759 aprint_error_dev(sc->bge_dev,
2760 "jumbo buffer allocation failed\n");
2761 } else
2762 sc->ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2763 }
2764
2765 /* Set default tuneable values. */
2766 sc->bge_stat_ticks = BGE_TICKS_PER_SEC;
2767 sc->bge_rx_coal_ticks = 150;
2768 sc->bge_rx_max_coal_bds = 64;
2769 #ifdef ORIG_WPAUL_VALUES
2770 sc->bge_tx_coal_ticks = 150;
2771 sc->bge_tx_max_coal_bds = 128;
2772 #else
2773 sc->bge_tx_coal_ticks = 300;
2774 sc->bge_tx_max_coal_bds = 400;
2775 #endif
2776 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
2777 sc->bge_tx_coal_ticks = (12 * 5);
2778 sc->bge_tx_max_coal_bds = (12 * 5);
2779 aprint_verbose_dev(sc->bge_dev,
2780 "setting short Tx thresholds\n");
2781 }
2782
2783 /* Set up ifnet structure */
2784 ifp = &sc->ethercom.ec_if;
2785 ifp->if_softc = sc;
2786 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2787 ifp->if_ioctl = bge_ioctl;
2788 ifp->if_stop = bge_stop;
2789 ifp->if_start = bge_start;
2790 ifp->if_init = bge_init;
2791 ifp->if_watchdog = bge_watchdog;
2792 IFQ_SET_MAXLEN(&ifp->if_snd, max(BGE_TX_RING_CNT - 1, IFQ_MAXLEN));
2793 IFQ_SET_READY(&ifp->if_snd);
2794 DPRINTFN(5, ("strcpy if_xname\n"));
2795 strcpy(ifp->if_xname, device_xname(sc->bge_dev));
2796
2797 if ((sc->bge_quirks & BGE_QUIRK_CSUM_BROKEN) == 0)
2798 sc->ethercom.ec_if.if_capabilities |=
2799 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2800 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2801 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
2802 sc->ethercom.ec_capabilities |=
2803 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU;
2804
2805 if (sc->bge_pcie)
2806 sc->ethercom.ec_if.if_capabilities |= IFCAP_TSOv4;
2807
2808 /*
2809 * Do MII setup.
2810 */
2811 DPRINTFN(5, ("mii setup\n"));
2812 sc->bge_mii.mii_ifp = ifp;
2813 sc->bge_mii.mii_readreg = bge_miibus_readreg;
2814 sc->bge_mii.mii_writereg = bge_miibus_writereg;
2815 sc->bge_mii.mii_statchg = bge_miibus_statchg;
2816
2817 /*
2818 * Figure out what sort of media we have by checking the
2819 * hardware config word in the first 32k of NIC internal memory,
2820 * or fall back to the config word in the EEPROM. Note: on some BCM5700
2821 * cards, this value appears to be unset. If that's the
2822 * case, we have to rely on identifying the NIC by its PCI
2823 * subsystem ID, as we do below for the SysKonnect SK-9D41.
2824 */
2825 if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) {
2826 hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG);
2827 } else {
2828 bge_read_eeprom(sc, (void *)&hwcfg,
2829 BGE_EE_HWCFG_OFFSET, sizeof(hwcfg));
2830 hwcfg = be32toh(hwcfg);
2831 }
2832 if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER)
2833 sc->bge_tbi = 1;
2834
2835 /* The SysKonnect SK-9D41 is a 1000baseSX card. */
2836 if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_SUBSYS) >> 16) ==
2837 SK_SUBSYSID_9D41)
2838 sc->bge_tbi = 1;
2839
2840 if (sc->bge_tbi) {
2841 ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
2842 bge_ifmedia_sts);
2843 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL);
2844 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX,
2845 0, NULL);
2846 ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
2847 ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO);
2848 } else {
2849 /*
2850 * Do transceiver setup.
2851 */
2852 ifmedia_init(&sc->bge_mii.mii_media, 0, bge_ifmedia_upd,
2853 bge_ifmedia_sts);
2854 mii_attach(sc->bge_dev, &sc->bge_mii, 0xffffffff,
2855 MII_PHY_ANY, MII_OFFSET_ANY,
2856 MIIF_FORCEANEG|MIIF_DOPAUSE);
2857
2858 if (LIST_EMPTY(&sc->bge_mii.mii_phys)) {
2859 aprint_error_dev(sc->bge_dev, "no PHY found!\n");
2860 ifmedia_add(&sc->bge_mii.mii_media,
2861 IFM_ETHER|IFM_MANUAL, 0, NULL);
2862 ifmedia_set(&sc->bge_mii.mii_media,
2863 IFM_ETHER|IFM_MANUAL);
2864 } else
2865 ifmedia_set(&sc->bge_mii.mii_media,
2866 IFM_ETHER|IFM_AUTO);
2867 }
2868
2869 /*
2870 * When using the BCM5701 in PCI-X mode, data corruption has
2871 * been observed in the first few bytes of some received packets.
2872 * Aligning the packet buffer in memory eliminates the corruption.
2873 * Unfortunately, this misaligns the packet payloads. On platforms
2874 * which do not support unaligned accesses, we will realign the
2875 * payloads by copying the received packets.
2876 */
2877 if (sc->bge_quirks & BGE_QUIRK_PCIX_DMA_ALIGN_BUG) {
2878 /* If in PCI-X mode, work around the alignment bug. */
2879 if ((pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE) &
2880 (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) ==
2881 BGE_PCISTATE_PCI_BUSSPEED)
2882 sc->bge_rx_alignment_bug = 1;
2883 }
2884
2885 /*
2886 * Call MI attach routine.
2887 */
2888 DPRINTFN(5, ("if_attach\n"));
2889 if_attach(ifp);
2890 DPRINTFN(5, ("ether_ifattach\n"));
2891 ether_ifattach(ifp, eaddr);
2892 #if NRND > 0
2893 rnd_attach_source(&sc->rnd_source, device_xname(sc->bge_dev),
2894 RND_TYPE_NET, 0);
2895 #endif
2896 #ifdef BGE_EVENT_COUNTERS
2897 /*
2898 * Attach event counters.
2899 */
2900 evcnt_attach_dynamic(&sc->bge_ev_intr, EVCNT_TYPE_INTR,
2901 NULL, device_xname(sc->bge_dev), "intr");
2902 evcnt_attach_dynamic(&sc->bge_ev_tx_xoff, EVCNT_TYPE_MISC,
2903 NULL, device_xname(sc->bge_dev), "tx_xoff");
2904 evcnt_attach_dynamic(&sc->bge_ev_tx_xon, EVCNT_TYPE_MISC,
2905 NULL, device_xname(sc->bge_dev), "tx_xon");
2906 evcnt_attach_dynamic(&sc->bge_ev_rx_xoff, EVCNT_TYPE_MISC,
2907 NULL, device_xname(sc->bge_dev), "rx_xoff");
2908 evcnt_attach_dynamic(&sc->bge_ev_rx_xon, EVCNT_TYPE_MISC,
2909 NULL, device_xname(sc->bge_dev), "rx_xon");
2910 evcnt_attach_dynamic(&sc->bge_ev_rx_macctl, EVCNT_TYPE_MISC,
2911 NULL, device_xname(sc->bge_dev), "rx_macctl");
2912 evcnt_attach_dynamic(&sc->bge_ev_xoffentered, EVCNT_TYPE_MISC,
2913 NULL, device_xname(sc->bge_dev), "xoffentered");
2914 #endif /* BGE_EVENT_COUNTERS */
2915 DPRINTFN(5, ("callout_init\n"));
2916 callout_init(&sc->bge_timeout, 0);
2917
2918 if (!pmf_device_register(self, NULL, NULL))
2919 aprint_error_dev(self, "couldn't establish power handler\n");
2920 else
2921 pmf_class_network_register(self, ifp);
2922 }
2923
2924 static void
2925 bge_release_resources(struct bge_softc *sc)
2926 {
2927 if (sc->bge_vpd_prodname != NULL)
2928 free(sc->bge_vpd_prodname, M_DEVBUF);
2929
2930 if (sc->bge_vpd_readonly != NULL)
2931 free(sc->bge_vpd_readonly, M_DEVBUF);
2932 }
2933
2934 static void
2935 bge_reset(struct bge_softc *sc)
2936 {
2937 u_int32_t cachesize, command, pcistate, new_pcistate;
2938 int i, val;
2939 void (*write_op)(struct bge_softc *, int, int);
2940
2941 if (BGE_IS_5750_OR_BEYOND(sc) && !BGE_IS_5714_FAMILY(sc) &&
2942 (BGE_ASICREV(sc->bge_chipid) != BGE_ASICREV_BCM5906)) {
2943 if (sc->bge_pcie) {
2944 write_op = bge_writemem_direct;
2945 } else {
2946 write_op = bge_writemem_ind;
2947 }
2948 } else {
2949 write_op = bge_writereg_ind;
2950 }
2951
2952
2953 /* Save some important PCI state. */
2954 cachesize = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ);
2955 command = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD);
2956 pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE);
2957
2958 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
2959 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
2960 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
2961
2962 /*
2963 * Disable the firmware fastboot feature on 5752 ASIC
2964 * to avoid firmware timeout.
2965 */
2966 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5752 ||
2967 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5755 ||
2968 BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5787)
2969 CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0);
2970
2971 val = BGE_MISCCFG_RESET_CORE_CLOCKS | (65<<1);
2972 /*
2973 * XXX: from FreeBSD/Linux; no documentation
2974 */
2975 if (sc->bge_pcie) {
2976 if (CSR_READ_4(sc, BGE_PCIE_CTL1) == 0x60)
2977 CSR_WRITE_4(sc, BGE_PCIE_CTL1, 0x20);
2978 if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) {
2979 /* No idea what that actually means */
2980 CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29);
2981 val |= (1<<29);
2982 }
2983 }
2984
2985 /* Issue global reset */
2986 write_op(sc, BGE_MISC_CFG, val);
2987
2988 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
2989 i = CSR_READ_4(sc, BGE_VCPU_STATUS);
2990 CSR_WRITE_4(sc, BGE_VCPU_STATUS,
2991 i | BGE_VCPU_STATUS_DRV_RESET);
2992 i = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL);
2993 CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL,
2994 i & ~BGE_VCPU_EXT_CTRL_HALT_CPU);
2995 }
2996
2997
2998
2999 DELAY(1000);
3000
3001 /*
3002 * XXX: from FreeBSD/Linux; no documentation
3003 */
3004 if (sc->bge_pcie) {
3005 if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) {
3006 pcireg_t reg;
3007
3008 DELAY(500000);
3009 /* XXX: Magic Numbers */
3010 reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_UNKNOWN0);
3011 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_UNKNOWN0,
3012 reg | (1 << 15));
3013 }
3014 /*
3015 * XXX: Magic Numbers.
3016 * Sets maximal PCI-e payload and clears any PCI-e errors.
3017 * Should be replaced with references to PCI config-space
3018 * capability block for PCI-Express.
3019 */
3020 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3021 BGE_PCI_CONF_DEV_CTRL, 0xf5000);
3022
3023 }
3024
3025 /* Reset some of the PCI state that got zapped by reset */
3026 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_MISC_CTL,
3027 BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR|
3028 BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_PCISTATE_RW);
3029 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CMD, command);
3030 pci_conf_write(sc->sc_pc, sc->sc_pcitag, BGE_PCI_CACHESZ, cachesize);
3031 write_op(sc, BGE_MISC_CFG, (65 << 1));
3032
3033 /* Enable memory arbiter. */
3034 {
3035 uint32_t marbmode = 0;
3036 if (BGE_IS_5714_FAMILY(sc)) {
3037 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
3038 }
3039 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
3040 }
3041
3042
3043 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
3044 for (i = 0; i < BGE_TIMEOUT; i++) {
3045 val = CSR_READ_4(sc, BGE_VCPU_STATUS);
3046 if (val & BGE_VCPU_STATUS_INIT_DONE)
3047 break;
3048 DELAY(100);
3049 }
3050 if (i == BGE_TIMEOUT) {
3051 aprint_error_dev(sc->bge_dev, "reset timed out\n");
3052 return;
3053 }
3054 } else {
3055 /*
3056 * Write the magic number to the firmware mailbox at 0xb50
3057 * so that the driver can synchronize with the firmware.
3058 */
3059 bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER);
3060
3061 /*
3062 * Poll the value location we just wrote until
3063 * we see the 1's complement of the magic number.
3064 * This indicates that the firmware initialization
3065 * is complete.
3066 */
3067 for (i = 0; i < BGE_TIMEOUT; i++) {
3068 val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM);
3069 if (val == ~BGE_MAGIC_NUMBER)
3070 break;
3071 DELAY(1000);
3072 }
3073
3074 if (i >= BGE_TIMEOUT) {
3075 aprint_error_dev(sc->bge_dev,
3076 "firmware handshake timed out, val = %x\n", val);
3077 /*
3078 * XXX: occasionally fired on bcm5721, but without
3079 * apparent harm. For now, keep going if we timeout
3080 * against PCI-E devices.
3081 */
3082 if (!sc->bge_pcie)
3083 return;
3084 }
3085 }
3086
3087 /*
3088 * XXX Wait for the value of the PCISTATE register to
3089 * return to its original pre-reset state. This is a
3090 * fairly good indicator of reset completion. If we don't
3091 * wait for the reset to fully complete, trying to read
3092 * from the device's non-PCI registers may yield garbage
3093 * results.
3094 */
3095 for (i = 0; i < 10000; i++) {
3096 new_pcistate = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3097 BGE_PCI_PCISTATE);
3098 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) ==
3099 (pcistate & ~BGE_PCISTATE_RESERVED))
3100 break;
3101 DELAY(10);
3102 }
3103 if ((new_pcistate & ~BGE_PCISTATE_RESERVED) !=
3104 (pcistate & ~BGE_PCISTATE_RESERVED)) {
3105 aprint_error_dev(sc->bge_dev, "pcistate failed to revert\n");
3106 }
3107
3108 /* XXX: from FreeBSD/Linux; no documentation */
3109 if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0)
3110 CSR_WRITE_4(sc, BGE_PCIE_CTL0, CSR_READ_4(sc, BGE_PCIE_CTL0) | (1<<25));
3111
3112 /* Enable memory arbiter. */
3113 /* XXX why do this twice? */
3114 {
3115 uint32_t marbmode = 0;
3116 if (BGE_IS_5714_FAMILY(sc)) {
3117 marbmode = CSR_READ_4(sc, BGE_MARB_MODE);
3118 }
3119 CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | marbmode);
3120 }
3121
3122 /* Fix up byte swapping */
3123 CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS);
3124
3125 CSR_WRITE_4(sc, BGE_MAC_MODE, 0);
3126
3127 DELAY(10000);
3128 }
3129
3130 /*
3131 * Frame reception handling. This is called if there's a frame
3132 * on the receive return list.
3133 *
3134 * Note: we have to be able to handle two possibilities here:
3135 * 1) the frame is from the jumbo recieve ring
3136 * 2) the frame is from the standard receive ring
3137 */
3138
3139 static void
3140 bge_rxeof(struct bge_softc *sc)
3141 {
3142 struct ifnet *ifp;
3143 int stdcnt = 0, jumbocnt = 0;
3144 bus_dmamap_t dmamap;
3145 bus_addr_t offset, toff;
3146 bus_size_t tlen;
3147 int tosync;
3148
3149 ifp = &sc->ethercom.ec_if;
3150
3151 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3152 offsetof(struct bge_ring_data, bge_status_block),
3153 sizeof (struct bge_status_block),
3154 BUS_DMASYNC_POSTREAD);
3155
3156 offset = offsetof(struct bge_ring_data, bge_rx_return_ring);
3157 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx -
3158 sc->bge_rx_saved_considx;
3159
3160 #if NRND > 0
3161 if (tosync != 0 && RND_ENABLED(&sc->rnd_source))
3162 rnd_add_uint32(&sc->rnd_source, tosync);
3163 #endif
3164
3165 toff = offset + (sc->bge_rx_saved_considx * sizeof (struct bge_rx_bd));
3166
3167 if (tosync < 0) {
3168 tlen = (sc->bge_return_ring_cnt - sc->bge_rx_saved_considx) *
3169 sizeof (struct bge_rx_bd);
3170 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3171 toff, tlen, BUS_DMASYNC_POSTREAD);
3172 tosync = -tosync;
3173 }
3174
3175 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3176 offset, tosync * sizeof (struct bge_rx_bd),
3177 BUS_DMASYNC_POSTREAD);
3178
3179 while(sc->bge_rx_saved_considx !=
3180 sc->bge_rdata->bge_status_block.bge_idx[0].bge_rx_prod_idx) {
3181 struct bge_rx_bd *cur_rx;
3182 u_int32_t rxidx;
3183 struct mbuf *m = NULL;
3184
3185 cur_rx = &sc->bge_rdata->
3186 bge_rx_return_ring[sc->bge_rx_saved_considx];
3187
3188 rxidx = cur_rx->bge_idx;
3189 BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt);
3190
3191 if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) {
3192 BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
3193 m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx];
3194 sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL;
3195 jumbocnt++;
3196 bus_dmamap_sync(sc->bge_dmatag,
3197 sc->bge_cdata.bge_rx_jumbo_map,
3198 mtod(m, char *) - (char *)sc->bge_cdata.bge_jumbo_buf,
3199 BGE_JLEN, BUS_DMASYNC_POSTREAD);
3200 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3201 ifp->if_ierrors++;
3202 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3203 continue;
3204 }
3205 if (bge_newbuf_jumbo(sc, sc->bge_jumbo,
3206 NULL)== ENOBUFS) {
3207 ifp->if_ierrors++;
3208 bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
3209 continue;
3210 }
3211 } else {
3212 BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
3213 m = sc->bge_cdata.bge_rx_std_chain[rxidx];
3214
3215 sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
3216 stdcnt++;
3217 dmamap = sc->bge_cdata.bge_rx_std_map[rxidx];
3218 sc->bge_cdata.bge_rx_std_map[rxidx] = 0;
3219 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0,
3220 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3221 bus_dmamap_unload(sc->bge_dmatag, dmamap);
3222 if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) {
3223 ifp->if_ierrors++;
3224 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3225 continue;
3226 }
3227 if (bge_newbuf_std(sc, sc->bge_std,
3228 NULL, dmamap) == ENOBUFS) {
3229 ifp->if_ierrors++;
3230 bge_newbuf_std(sc, sc->bge_std, m, dmamap);
3231 continue;
3232 }
3233 }
3234
3235 ifp->if_ipackets++;
3236 #ifndef __NO_STRICT_ALIGNMENT
3237 /*
3238 * XXX: if the 5701 PCIX-Rx-DMA workaround is in effect,
3239 * the Rx buffer has the layer-2 header unaligned.
3240 * If our CPU requires alignment, re-align by copying.
3241 */
3242 if (sc->bge_rx_alignment_bug) {
3243 memmove(mtod(m, char *) + ETHER_ALIGN, m->m_data,
3244 cur_rx->bge_len);
3245 m->m_data += ETHER_ALIGN;
3246 }
3247 #endif
3248
3249 m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
3250 m->m_pkthdr.rcvif = ifp;
3251
3252 #if NBPFILTER > 0
3253 /*
3254 * Handle BPF listeners. Let the BPF user see the packet.
3255 */
3256 if (ifp->if_bpf)
3257 bpf_mtap(ifp->if_bpf, m);
3258 #endif
3259
3260 m->m_pkthdr.csum_flags = M_CSUM_IPv4;
3261
3262 if ((cur_rx->bge_ip_csum ^ 0xffff) != 0)
3263 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
3264 /*
3265 * Rx transport checksum-offload may also
3266 * have bugs with packets which, when transmitted,
3267 * were `runts' requiring padding.
3268 */
3269 if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM &&
3270 (/* (sc->_bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||*/
3271 m->m_pkthdr.len >= ETHER_MIN_NOPAD)) {
3272 m->m_pkthdr.csum_data =
3273 cur_rx->bge_tcp_udp_csum;
3274 m->m_pkthdr.csum_flags |=
3275 (M_CSUM_TCPv4|M_CSUM_UDPv4|
3276 M_CSUM_DATA|M_CSUM_NO_PSEUDOHDR);
3277 }
3278
3279 /*
3280 * If we received a packet with a vlan tag, pass it
3281 * to vlan_input() instead of ether_input().
3282 */
3283 if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
3284 VLAN_INPUT_TAG(ifp, m, cur_rx->bge_vlan_tag, continue);
3285 }
3286
3287 (*ifp->if_input)(ifp, m);
3288 }
3289
3290 bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx);
3291 if (stdcnt)
3292 bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std);
3293 if (jumbocnt)
3294 bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo);
3295 }
3296
3297 static void
3298 bge_txeof(struct bge_softc *sc)
3299 {
3300 struct bge_tx_bd *cur_tx = NULL;
3301 struct ifnet *ifp;
3302 struct txdmamap_pool_entry *dma;
3303 bus_addr_t offset, toff;
3304 bus_size_t tlen;
3305 int tosync;
3306 struct mbuf *m;
3307
3308 ifp = &sc->ethercom.ec_if;
3309
3310 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3311 offsetof(struct bge_ring_data, bge_status_block),
3312 sizeof (struct bge_status_block),
3313 BUS_DMASYNC_POSTREAD);
3314
3315 offset = offsetof(struct bge_ring_data, bge_tx_ring);
3316 tosync = sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx -
3317 sc->bge_tx_saved_considx;
3318
3319 #if NRND > 0
3320 if (tosync != 0 && RND_ENABLED(&sc->rnd_source))
3321 rnd_add_uint32(&sc->rnd_source, tosync);
3322 #endif
3323
3324 toff = offset + (sc->bge_tx_saved_considx * sizeof (struct bge_tx_bd));
3325
3326 if (tosync < 0) {
3327 tlen = (BGE_TX_RING_CNT - sc->bge_tx_saved_considx) *
3328 sizeof (struct bge_tx_bd);
3329 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3330 toff, tlen, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3331 tosync = -tosync;
3332 }
3333
3334 bus_dmamap_sync(sc->bge_dmatag, sc->bge_ring_map,
3335 offset, tosync * sizeof (struct bge_tx_bd),
3336 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3337
3338 /*
3339 * Go through our tx ring and free mbufs for those
3340 * frames that have been sent.
3341 */
3342 while (sc->bge_tx_saved_considx !=
3343 sc->bge_rdata->bge_status_block.bge_idx[0].bge_tx_cons_idx) {
3344 u_int32_t idx = 0;
3345
3346 idx = sc->bge_tx_saved_considx;
3347 cur_tx = &sc->bge_rdata->bge_tx_ring[idx];
3348 if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
3349 ifp->if_opackets++;
3350 m = sc->bge_cdata.bge_tx_chain[idx];
3351 if (m != NULL) {
3352 sc->bge_cdata.bge_tx_chain[idx] = NULL;
3353 dma = sc->txdma[idx];
3354 bus_dmamap_sync(sc->bge_dmatag, dma->dmamap, 0,
3355 dma->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3356 bus_dmamap_unload(sc->bge_dmatag, dma->dmamap);
3357 SLIST_INSERT_HEAD(&sc->txdma_list, dma, link);
3358 sc->txdma[idx] = NULL;
3359
3360 m_freem(m);
3361 }
3362 sc->bge_txcnt--;
3363 BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
3364 ifp->if_timer = 0;
3365 }
3366
3367 if (cur_tx != NULL)
3368 ifp->if_flags &= ~IFF_OACTIVE;
3369 }
3370
3371 static int
3372 bge_intr(void *xsc)
3373 {
3374 struct bge_softc *sc;
3375 struct ifnet *ifp;
3376
3377 sc = xsc;
3378 ifp = &sc->ethercom.ec_if;
3379
3380 /*
3381 * Ascertain whether the interrupt is from this bge device.
3382 * Do the cheap test first.
3383 */
3384 if ((sc->bge_rdata->bge_status_block.bge_status &
3385 BGE_STATFLAG_UPDATED) == 0) {
3386 /*
3387 * Sometimes, the interrupt comes in before the
3388 * DMA update of the status block (performed prior
3389 * to the interrupt itself) has completed.
3390 * In that case, do the (extremely expensive!)
3391 * PCI-config-space register read.
3392 */
3393 uint32_t pcistate =
3394 pci_conf_read(sc->sc_pc, sc->sc_pcitag, BGE_PCI_PCISTATE);
3395
3396 if (pcistate & BGE_PCISTATE_INTR_STATE)
3397 return (0);
3398
3399 }
3400 /*
3401 * If we reach here, then the interrupt is for us.
3402 */
3403
3404 /* Ack interrupt and stop others from occuring. */
3405 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
3406
3407 BGE_EVCNT_INCR(sc->bge_ev_intr);
3408
3409 /*
3410 * Process link state changes.
3411 * Grrr. The link status word in the status block does
3412 * not work correctly on the BCM5700 rev AX and BX chips,
3413 * according to all available information. Hence, we have
3414 * to enable MII interrupts in order to properly obtain
3415 * async link changes. Unfortunately, this also means that
3416 * we have to read the MAC status register to detect link
3417 * changes, thereby adding an additional register access to
3418 * the interrupt handler.
3419 */
3420
3421 if (sc->bge_quirks & BGE_QUIRK_LINK_STATE_BROKEN) {
3422 u_int32_t status;
3423
3424 status = CSR_READ_4(sc, BGE_MAC_STS);
3425 if (status & BGE_MACSTAT_MI_INTERRUPT) {
3426 sc->bge_link = 0;
3427 callout_stop(&sc->bge_timeout);
3428 bge_tick(sc);
3429 /* Clear the interrupt */
3430 CSR_WRITE_4(sc, BGE_MAC_EVT_ENB,
3431 BGE_EVTENB_MI_INTERRUPT);
3432 bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR);
3433 bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR,
3434 BRGPHY_INTRS);
3435 }
3436 } else {
3437 u_int32_t status;
3438
3439 status = CSR_READ_4(sc, BGE_MAC_STS);
3440 if (status & BGE_MACSTAT_LINK_CHANGED) {
3441 sc->bge_link = 0;
3442 callout_stop(&sc->bge_timeout);
3443 bge_tick(sc);
3444 /* Clear the interrupt */
3445 CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED|
3446 BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE|
3447 BGE_MACSTAT_LINK_CHANGED);
3448 }
3449 }
3450
3451 if (ifp->if_flags & IFF_RUNNING) {
3452 /* Check RX return ring producer/consumer */
3453 bge_rxeof(sc);
3454
3455 /* Check TX ring producer/consumer */
3456 bge_txeof(sc);
3457 }
3458
3459 if (sc->bge_pending_rxintr_change) {
3460 uint32_t rx_ticks = sc->bge_rx_coal_ticks;
3461 uint32_t rx_bds = sc->bge_rx_max_coal_bds;
3462 uint32_t junk;
3463
3464 CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, rx_ticks);
3465 DELAY(10);
3466 junk = CSR_READ_4(sc, BGE_HCC_RX_COAL_TICKS);
3467
3468 CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, rx_bds);
3469 DELAY(10);
3470 junk = CSR_READ_4(sc, BGE_HCC_RX_MAX_COAL_BDS);
3471
3472 sc->bge_pending_rxintr_change = 0;
3473 }
3474 bge_handle_events(sc);
3475
3476 /* Re-enable interrupts. */
3477 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
3478
3479 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
3480 bge_start(ifp);
3481
3482 return (1);
3483 }
3484
3485 static void
3486 bge_tick(void *xsc)
3487 {
3488 struct bge_softc *sc = xsc;
3489 struct mii_data *mii = &sc->bge_mii;
3490 int s;
3491
3492 s = splnet();
3493
3494 bge_stats_update(sc);
3495 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
3496
3497 if (sc->bge_tbi) {
3498 if (CSR_READ_4(sc, BGE_MAC_STS) &
3499 BGE_MACSTAT_TBI_PCS_SYNCHED) {
3500 CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF);
3501 }
3502 } else
3503 mii_tick(mii);
3504
3505 splx(s);
3506 }
3507
3508 static void
3509 bge_stats_update(struct bge_softc *sc)
3510 {
3511 struct ifnet *ifp = &sc->ethercom.ec_if;
3512 bus_size_t stats = BGE_MEMWIN_START + BGE_STATS_BLOCK;
3513 bus_size_t rstats = BGE_RX_STATS;
3514
3515 #define READ_RSTAT(sc, stats, stat) \
3516 CSR_READ_4(sc, stats + offsetof(struct bge_mac_stats_regs, stat))
3517
3518 if (sc->bge_quirks & BGE_QUIRK_5705_CORE) {
3519 ifp->if_collisions +=
3520 READ_RSTAT(sc, rstats, dot3StatsSingleCollisionFrames) +
3521 READ_RSTAT(sc, rstats, dot3StatsMultipleCollisionFrames) +
3522 READ_RSTAT(sc, rstats, dot3StatsExcessiveCollisions) +
3523 READ_RSTAT(sc, rstats, dot3StatsLateCollisions);
3524
3525 BGE_EVCNT_ADD(sc->bge_ev_tx_xoff,
3526 READ_RSTAT(sc, rstats, outXoffSent));
3527 BGE_EVCNT_ADD(sc->bge_ev_tx_xon,
3528 READ_RSTAT(sc, rstats, outXonSent));
3529 BGE_EVCNT_ADD(sc->bge_ev_rx_xoff,
3530 READ_RSTAT(sc, rstats, xoffPauseFramesReceived));
3531 BGE_EVCNT_ADD(sc->bge_ev_rx_xon,
3532 READ_RSTAT(sc, rstats, xonPauseFramesReceived));
3533 BGE_EVCNT_ADD(sc->bge_ev_rx_macctl,
3534 READ_RSTAT(sc, rstats, macControlFramesReceived));
3535 BGE_EVCNT_ADD(sc->bge_ev_xoffentered,
3536 READ_RSTAT(sc, rstats, xoffStateEntered));
3537 return;
3538 }
3539
3540 #undef READ_RSTAT
3541 #define READ_STAT(sc, stats, stat) \
3542 CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
3543
3544 ifp->if_collisions +=
3545 (READ_STAT(sc, stats, dot3StatsSingleCollisionFrames.bge_addr_lo) +
3546 READ_STAT(sc, stats, dot3StatsMultipleCollisionFrames.bge_addr_lo) +
3547 READ_STAT(sc, stats, dot3StatsExcessiveCollisions.bge_addr_lo) +
3548 READ_STAT(sc, stats, dot3StatsLateCollisions.bge_addr_lo)) -
3549 ifp->if_collisions;
3550
3551 BGE_EVCNT_UPD(sc->bge_ev_tx_xoff,
3552 READ_STAT(sc, stats, outXoffSent.bge_addr_lo));
3553 BGE_EVCNT_UPD(sc->bge_ev_tx_xon,
3554 READ_STAT(sc, stats, outXonSent.bge_addr_lo));
3555 BGE_EVCNT_UPD(sc->bge_ev_rx_xoff,
3556 READ_STAT(sc, stats,
3557 xoffPauseFramesReceived.bge_addr_lo));
3558 BGE_EVCNT_UPD(sc->bge_ev_rx_xon,
3559 READ_STAT(sc, stats, xonPauseFramesReceived.bge_addr_lo));
3560 BGE_EVCNT_UPD(sc->bge_ev_rx_macctl,
3561 READ_STAT(sc, stats,
3562 macControlFramesReceived.bge_addr_lo));
3563 BGE_EVCNT_UPD(sc->bge_ev_xoffentered,
3564 READ_STAT(sc, stats, xoffStateEntered.bge_addr_lo));
3565
3566 #undef READ_STAT
3567
3568 #ifdef notdef
3569 ifp->if_collisions +=
3570 (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames +
3571 sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames +
3572 sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions +
3573 sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) -
3574 ifp->if_collisions;
3575 #endif
3576 }
3577
3578 /*
3579 * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason.
3580 * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD,
3581 * but when such padded frames employ the bge IP/TCP checksum offload,
3582 * the hardware checksum assist gives incorrect results (possibly
3583 * from incorporating its own padding into the UDP/TCP checksum; who knows).
3584 * If we pad such runts with zeros, the onboard checksum comes out correct.
3585 */
3586 static inline int
3587 bge_cksum_pad(struct mbuf *pkt)
3588 {
3589 struct mbuf *last = NULL;
3590 int padlen;
3591
3592 padlen = ETHER_MIN_NOPAD - pkt->m_pkthdr.len;
3593
3594 /* if there's only the packet-header and we can pad there, use it. */
3595 if (pkt->m_pkthdr.len == pkt->m_len &&
3596 M_TRAILINGSPACE(pkt) >= padlen) {
3597 last = pkt;
3598 } else {
3599 /*
3600 * Walk packet chain to find last mbuf. We will either
3601 * pad there, or append a new mbuf and pad it
3602 * (thus perhaps avoiding the bcm5700 dma-min bug).
3603 */
3604 for (last = pkt; last->m_next != NULL; last = last->m_next) {
3605 continue; /* do nothing */
3606 }
3607
3608 /* `last' now points to last in chain. */
3609 if (M_TRAILINGSPACE(last) < padlen) {
3610 /* Allocate new empty mbuf, pad it. Compact later. */
3611 struct mbuf *n;
3612 MGET(n, M_DONTWAIT, MT_DATA);
3613 if (n == NULL)
3614 return ENOBUFS;
3615 n->m_len = 0;
3616 last->m_next = n;
3617 last = n;
3618 }
3619 }
3620
3621 KDASSERT(!M_READONLY(last));
3622 KDASSERT(M_TRAILINGSPACE(last) >= padlen);
3623
3624 /* Now zero the pad area, to avoid the bge cksum-assist bug */
3625 memset(mtod(last, char *) + last->m_len, 0, padlen);
3626 last->m_len += padlen;
3627 pkt->m_pkthdr.len += padlen;
3628 return 0;
3629 }
3630
3631 /*
3632 * Compact outbound packets to avoid bug with DMA segments less than 8 bytes.
3633 */
3634 static inline int
3635 bge_compact_dma_runt(struct mbuf *pkt)
3636 {
3637 struct mbuf *m, *prev;
3638 int totlen, prevlen;
3639
3640 prev = NULL;
3641 totlen = 0;
3642 prevlen = -1;
3643
3644 for (m = pkt; m != NULL; prev = m,m = m->m_next) {
3645 int mlen = m->m_len;
3646 int shortfall = 8 - mlen ;
3647
3648 totlen += mlen;
3649 if (mlen == 0) {
3650 continue;
3651 }
3652 if (mlen >= 8)
3653 continue;
3654
3655 /* If we get here, mbuf data is too small for DMA engine.
3656 * Try to fix by shuffling data to prev or next in chain.
3657 * If that fails, do a compacting deep-copy of the whole chain.
3658 */
3659
3660 /* Internal frag. If fits in prev, copy it there. */
3661 if (prev && M_TRAILINGSPACE(prev) >= m->m_len) {
3662 memcpy(prev->m_data + prev->m_len, m->m_data, mlen);
3663 prev->m_len += mlen;
3664 m->m_len = 0;
3665 /* XXX stitch chain */
3666 prev->m_next = m_free(m);
3667 m = prev;
3668 continue;
3669 }
3670 else if (m->m_next != NULL &&
3671 M_TRAILINGSPACE(m) >= shortfall &&
3672 m->m_next->m_len >= (8 + shortfall)) {
3673 /* m is writable and have enough data in next, pull up. */
3674
3675 memcpy(m->m_data + m->m_len, m->m_next->m_data,
3676 shortfall);
3677 m->m_len += shortfall;
3678 m->m_next->m_len -= shortfall;
3679 m->m_next->m_data += shortfall;
3680 }
3681 else if (m->m_next == NULL || 1) {
3682 /* Got a runt at the very end of the packet.
3683 * borrow data from the tail of the preceding mbuf and
3684 * update its length in-place. (The original data is still
3685 * valid, so we can do this even if prev is not writable.)
3686 */
3687
3688 /* if we'd make prev a runt, just move all of its data. */
3689 KASSERT(prev != NULL /*, ("runt but null PREV")*/);
3690 KASSERT(prev->m_len >= 8 /*, ("runt prev")*/);
3691
3692 if ((prev->m_len - shortfall) < 8)
3693 shortfall = prev->m_len;
3694
3695 #ifdef notyet /* just do the safe slow thing for now */
3696 if (!M_READONLY(m)) {
3697 if (M_LEADINGSPACE(m) < shorfall) {
3698 void *m_dat;
3699 m_dat = (m->m_flags & M_PKTHDR) ?
3700 m->m_pktdat : m->dat;
3701 memmove(m_dat, mtod(m, void*), m->m_len);
3702 m->m_data = m_dat;
3703 }
3704 } else
3705 #endif /* just do the safe slow thing */
3706 {
3707 struct mbuf * n = NULL;
3708 int newprevlen = prev->m_len - shortfall;
3709
3710 MGET(n, M_NOWAIT, MT_DATA);
3711 if (n == NULL)
3712 return ENOBUFS;
3713 KASSERT(m->m_len + shortfall < MLEN
3714 /*,
3715 ("runt %d +prev %d too big\n", m->m_len, shortfall)*/);
3716
3717 /* first copy the data we're stealing from prev */
3718 memcpy(n->m_data, prev->m_data + newprevlen,
3719 shortfall);
3720
3721 /* update prev->m_len accordingly */
3722 prev->m_len -= shortfall;
3723
3724 /* copy data from runt m */
3725 memcpy(n->m_data + shortfall, m->m_data,
3726 m->m_len);
3727
3728 /* n holds what we stole from prev, plus m */
3729 n->m_len = shortfall + m->m_len;
3730
3731 /* stitch n into chain and free m */
3732 n->m_next = m->m_next;
3733 prev->m_next = n;
3734 /* KASSERT(m->m_next == NULL); */
3735 m->m_next = NULL;
3736 m_free(m);
3737 m = n; /* for continuing loop */
3738 }
3739 }
3740 prevlen = m->m_len;
3741 }
3742 return 0;
3743 }
3744
3745 /*
3746 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data
3747 * pointers to descriptors.
3748 */
3749 static int
3750 bge_encap(struct bge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
3751 {
3752 struct bge_tx_bd *f = NULL;
3753 u_int32_t frag, cur;
3754 u_int16_t csum_flags = 0;
3755 u_int16_t txbd_tso_flags = 0;
3756 struct txdmamap_pool_entry *dma;
3757 bus_dmamap_t dmamap;
3758 int i = 0;
3759 struct m_tag *mtag;
3760 int use_tso, maxsegsize, error;
3761
3762 cur = frag = *txidx;
3763
3764 if (m_head->m_pkthdr.csum_flags) {
3765 if (m_head->m_pkthdr.csum_flags & M_CSUM_IPv4)
3766 csum_flags |= BGE_TXBDFLAG_IP_CSUM;
3767 if (m_head->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4))
3768 csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM;
3769 }
3770
3771 /*
3772 * If we were asked to do an outboard checksum, and the NIC
3773 * has the bug where it sometimes adds in the Ethernet padding,
3774 * explicitly pad with zeros so the cksum will be correct either way.
3775 * (For now, do this for all chip versions, until newer
3776 * are confirmed to not require the workaround.)
3777 */
3778 if ((csum_flags & BGE_TXBDFLAG_TCP_UDP_CSUM) == 0 ||
3779 #ifdef notyet
3780 (sc->bge_quirks & BGE_QUIRK_SHORT_CKSUM_BUG) == 0 ||
3781 #endif
3782 m_head->m_pkthdr.len >= ETHER_MIN_NOPAD)
3783 goto check_dma_bug;
3784
3785 if (bge_cksum_pad(m_head) != 0) {
3786 return ENOBUFS;
3787 }
3788
3789 check_dma_bug:
3790 if (!(sc->bge_quirks & BGE_QUIRK_5700_SMALLDMA))
3791 goto doit;
3792 /*
3793 * bcm5700 Revision B silicon cannot handle DMA descriptors with
3794 * less than eight bytes. If we encounter a teeny mbuf
3795 * at the end of a chain, we can pad. Otherwise, copy.
3796 */
3797 if (bge_compact_dma_runt(m_head) != 0)
3798 return ENOBUFS;
3799
3800 doit:
3801 dma = SLIST_FIRST(&sc->txdma_list);
3802 if (dma == NULL)
3803 return ENOBUFS;
3804 dmamap = dma->dmamap;
3805
3806 /*
3807 * Set up any necessary TSO state before we start packing...
3808 */
3809 use_tso = (m_head->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
3810 if (!use_tso) {
3811 maxsegsize = 0;
3812 } else { /* TSO setup */
3813 unsigned mss;
3814 struct ether_header *eh;
3815 unsigned ip_tcp_hlen, iptcp_opt_words, tcp_seg_flags, offset;
3816 struct mbuf * m0 = m_head;
3817 struct ip *ip;
3818 struct tcphdr *th;
3819 int iphl, hlen;
3820
3821 /*
3822 * XXX It would be nice if the mbuf pkthdr had offset
3823 * fields for the protocol headers.
3824 */
3825
3826 eh = mtod(m0, struct ether_header *);
3827 switch (htons(eh->ether_type)) {
3828 case ETHERTYPE_IP:
3829 offset = ETHER_HDR_LEN;
3830 break;
3831
3832 case ETHERTYPE_VLAN:
3833 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3834 break;
3835
3836 default:
3837 /*
3838 * Don't support this protocol or encapsulation.
3839 */
3840 return (ENOBUFS);
3841 }
3842
3843 /*
3844 * TCP/IP headers are in the first mbuf; we can do
3845 * this the easy way.
3846 */
3847 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
3848 hlen = iphl + offset;
3849 if (__predict_false(m0->m_len <
3850 (hlen + sizeof(struct tcphdr)))) {
3851
3852 aprint_debug_dev(sc->bge_dev,
3853 "TSO: hard case m0->m_len == %d < ip/tcp hlen %zd,"
3854 "not handled yet\n",
3855 m0->m_len, hlen+ sizeof(struct tcphdr));
3856 #ifdef NOTYET
3857 /*
3858 * XXX jonathan (at) NetBSD.org: untested.
3859 * how to force this branch to be taken?
3860 */
3861 BGE_EVCNT_INCR(&sc->sc_ev_txtsopain);
3862
3863 m_copydata(m0, offset, sizeof(ip), &ip);
3864 m_copydata(m0, hlen, sizeof(th), &th);
3865
3866 ip.ip_len = 0;
3867
3868 m_copyback(m0, hlen + offsetof(struct ip, ip_len),
3869 sizeof(ip.ip_len), &ip.ip_len);
3870
3871 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
3872 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
3873
3874 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
3875 sizeof(th.th_sum), &th.th_sum);
3876
3877 hlen += th.th_off << 2;
3878 iptcp_opt_words = hlen;
3879 #else
3880 /*
3881 * if_wm "hard" case not yet supported, can we not
3882 * mandate it out of existence?
3883 */
3884 (void) ip; (void)th; (void) ip_tcp_hlen;
3885
3886 return ENOBUFS;
3887 #endif
3888 } else {
3889 ip = (struct ip *) (mtod(m0, char *) + offset);
3890 th = (struct tcphdr *) (mtod(m0, char *) + hlen);
3891 ip_tcp_hlen = iphl + (th->th_off << 2);
3892
3893 /* Total IP/TCP options, in 32-bit words */
3894 iptcp_opt_words = (ip_tcp_hlen
3895 - sizeof(struct tcphdr)
3896 - sizeof(struct ip)) >> 2;
3897 }
3898 if (BGE_IS_5750_OR_BEYOND(sc)) {
3899 th->th_sum = 0;
3900 csum_flags &= ~(BGE_TXBDFLAG_TCP_UDP_CSUM);
3901 } else {
3902 /*
3903 * XXX jonathan (at) NetBSD.org: 5705 untested.
3904 * Requires TSO firmware patch for 5701/5703/5704.
3905 */
3906 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
3907 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
3908 }
3909
3910 mss = m_head->m_pkthdr.segsz;
3911 txbd_tso_flags |=
3912 BGE_TXBDFLAG_CPU_PRE_DMA |
3913 BGE_TXBDFLAG_CPU_POST_DMA;
3914
3915 /*
3916 * Our NIC TSO-assist assumes TSO has standard, optionless
3917 * IPv4 and TCP headers, which total 40 bytes. By default,
3918 * the NIC copies 40 bytes of IP/TCP header from the
3919 * supplied header into the IP/TCP header portion of
3920 * each post-TSO-segment. If the supplied packet has IP or
3921 * TCP options, we need to tell the NIC to copy those extra
3922 * bytes into each post-TSO header, in addition to the normal
3923 * 40-byte IP/TCP header (and to leave space accordingly).
3924 * Unfortunately, the driver encoding of option length
3925 * varies across different ASIC families.
3926 */
3927 tcp_seg_flags = 0;
3928 if (iptcp_opt_words) {
3929 if ( BGE_IS_5705_OR_BEYOND(sc)) {
3930 tcp_seg_flags =
3931 iptcp_opt_words << 11;
3932 } else {
3933 txbd_tso_flags |=
3934 iptcp_opt_words << 12;
3935 }
3936 }
3937 maxsegsize = mss | tcp_seg_flags;
3938 ip->ip_len = htons(mss + ip_tcp_hlen);
3939
3940 } /* TSO setup */
3941
3942 /*
3943 * Start packing the mbufs in this chain into
3944 * the fragment pointers. Stop when we run out
3945 * of fragments or hit the end of the mbuf chain.
3946 */
3947 error = bus_dmamap_load_mbuf(sc->bge_dmatag, dmamap, m_head,
3948 BUS_DMA_NOWAIT);
3949 if (error) {
3950 return(ENOBUFS);
3951 }
3952 /*
3953 * Sanity check: avoid coming within 16 descriptors
3954 * of the end of the ring.
3955 */
3956 if (dmamap->dm_nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
3957 BGE_TSO_PRINTF(("%s: "
3958 " dmamap_load_mbuf too close to ring wrap\n",
3959 device_xname(sc->bge_dev)));
3960 goto fail_unload;
3961 }
3962
3963 mtag = sc->ethercom.ec_nvlans ?
3964 m_tag_find(m_head, PACKET_TAG_VLAN, NULL) : NULL;
3965
3966
3967 /* Iterate over dmap-map fragments. */
3968 for (i = 0; i < dmamap->dm_nsegs; i++) {
3969 f = &sc->bge_rdata->bge_tx_ring[frag];
3970 if (sc->bge_cdata.bge_tx_chain[frag] != NULL)
3971 break;
3972
3973 bge_set_hostaddr(&f->bge_addr, dmamap->dm_segs[i].ds_addr);
3974 f->bge_len = dmamap->dm_segs[i].ds_len;
3975
3976 /*
3977 * For 5751 and follow-ons, for TSO we must turn
3978 * off checksum-assist flag in the tx-descr, and
3979 * supply the ASIC-revision-specific encoding
3980 * of TSO flags and segsize.
3981 */
3982 if (use_tso) {
3983 if (BGE_IS_5750_OR_BEYOND(sc) || i == 0) {
3984 f->bge_rsvd = maxsegsize;
3985 f->bge_flags = csum_flags | txbd_tso_flags;
3986 } else {
3987 f->bge_rsvd = 0;
3988 f->bge_flags =
3989 (csum_flags | txbd_tso_flags) & 0x0fff;
3990 }
3991 } else {
3992 f->bge_rsvd = 0;
3993 f->bge_flags = csum_flags;
3994 }
3995
3996 if (mtag != NULL) {
3997 f->bge_flags |= BGE_TXBDFLAG_VLAN_TAG;
3998 f->bge_vlan_tag = VLAN_TAG_VALUE(mtag);
3999 } else {
4000 f->bge_vlan_tag = 0;
4001 }
4002 cur = frag;
4003 BGE_INC(frag, BGE_TX_RING_CNT);
4004 }
4005
4006 if (i < dmamap->dm_nsegs) {
4007 BGE_TSO_PRINTF(("%s: reached %d < dm_nsegs %d\n",
4008 device_xname(sc->bge_dev), i, dmamap->dm_nsegs));
4009 goto fail_unload;
4010 }
4011
4012 bus_dmamap_sync(sc->bge_dmatag, dmamap, 0, dmamap->dm_mapsize,
4013 BUS_DMASYNC_PREWRITE);
4014
4015 if (frag == sc->bge_tx_saved_considx) {
4016 BGE_TSO_PRINTF(("%s: frag %d = wrapped id %d?\n",
4017 device_xname(sc->bge_dev), frag, sc->bge_tx_saved_considx));
4018
4019 goto fail_unload;
4020 }
4021
4022 sc->bge_rdata->bge_tx_ring[cur].bge_flags |= BGE_TXBDFLAG_END;
4023 sc->bge_cdata.bge_tx_chain[cur] = m_head;
4024 SLIST_REMOVE_HEAD(&sc->txdma_list, link);
4025 sc->txdma[cur] = dma;
4026 sc->bge_txcnt += dmamap->dm_nsegs;
4027
4028 *txidx = frag;
4029
4030 return(0);
4031
4032 fail_unload:
4033 bus_dmamap_unload(sc->bge_dmatag, dmamap);
4034
4035 return ENOBUFS;
4036 }
4037
4038 /*
4039 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
4040 * to the mbuf data regions directly in the transmit descriptors.
4041 */
4042 static void
4043 bge_start(struct ifnet *ifp)
4044 {
4045 struct bge_softc *sc;
4046 struct mbuf *m_head = NULL;
4047 u_int32_t prodidx;
4048 int pkts = 0;
4049
4050 sc = ifp->if_softc;
4051
4052 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
4053 return;
4054
4055 prodidx = sc->bge_tx_prodidx;
4056
4057 while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) {
4058 IFQ_POLL(&ifp->if_snd, m_head);
4059 if (m_head == NULL)
4060 break;
4061
4062 #if 0
4063 /*
4064 * XXX
4065 * safety overkill. If this is a fragmented packet chain
4066 * with delayed TCP/UDP checksums, then only encapsulate
4067 * it if we have enough descriptors to handle the entire
4068 * chain at once.
4069 * (paranoia -- may not actually be needed)
4070 */
4071 if (m_head->m_flags & M_FIRSTFRAG &&
4072 m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) {
4073 if ((BGE_TX_RING_CNT - sc->bge_txcnt) <
4074 M_CSUM_DATA_IPv4_OFFSET(m_head->m_pkthdr.csum_data) + 16) {
4075 ifp->if_flags |= IFF_OACTIVE;
4076 break;
4077 }
4078 }
4079 #endif
4080
4081 /*
4082 * Pack the data into the transmit ring. If we
4083 * don't have room, set the OACTIVE flag and wait
4084 * for the NIC to drain the ring.
4085 */
4086 if (bge_encap(sc, m_head, &prodidx)) {
4087 ifp->if_flags |= IFF_OACTIVE;
4088 break;
4089 }
4090
4091 /* now we are committed to transmit the packet */
4092 IFQ_DEQUEUE(&ifp->if_snd, m_head);
4093 pkts++;
4094
4095 #if NBPFILTER > 0
4096 /*
4097 * If there's a BPF listener, bounce a copy of this frame
4098 * to him.
4099 */
4100 if (ifp->if_bpf)
4101 bpf_mtap(ifp->if_bpf, m_head);
4102 #endif
4103 }
4104 if (pkts == 0)
4105 return;
4106
4107 /* Transmit */
4108 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4109 if (sc->bge_quirks & BGE_QUIRK_PRODUCER_BUG) /* 5700 b2 errata */
4110 bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx);
4111
4112 sc->bge_tx_prodidx = prodidx;
4113
4114 /*
4115 * Set a timeout in case the chip goes out to lunch.
4116 */
4117 ifp->if_timer = 5;
4118 }
4119
4120 static int
4121 bge_init(struct ifnet *ifp)
4122 {
4123 struct bge_softc *sc = ifp->if_softc;
4124 const u_int16_t *m;
4125 int s, error = 0;
4126
4127 s = splnet();
4128
4129 ifp = &sc->ethercom.ec_if;
4130
4131 /* Cancel pending I/O and flush buffers. */
4132 bge_stop(ifp, 0);
4133 bge_reset(sc);
4134 bge_chipinit(sc);
4135
4136 /*
4137 * Init the various state machines, ring
4138 * control blocks and firmware.
4139 */
4140 error = bge_blockinit(sc);
4141 if (error != 0) {
4142 aprint_error_dev(sc->bge_dev, "initialization error %d\n",
4143 error);
4144 splx(s);
4145 return error;
4146 }
4147
4148 ifp = &sc->ethercom.ec_if;
4149
4150 /* Specify MTU. */
4151 CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
4152 ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN);
4153
4154 /* Load our MAC address. */
4155 m = (const u_int16_t *)&(CLLADDR(ifp->if_sadl)[0]);
4156 CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0]));
4157 CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2]));
4158
4159 /* Enable or disable promiscuous mode as needed. */
4160 if (ifp->if_flags & IFF_PROMISC) {
4161 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4162 } else {
4163 BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
4164 }
4165
4166 /* Program multicast filter. */
4167 bge_setmulti(sc);
4168
4169 /* Init RX ring. */
4170 bge_init_rx_ring_std(sc);
4171
4172 /* Init jumbo RX ring. */
4173 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
4174 bge_init_rx_ring_jumbo(sc);
4175
4176 /* Init our RX return ring index */
4177 sc->bge_rx_saved_considx = 0;
4178
4179 /* Init TX ring. */
4180 bge_init_tx_ring(sc);
4181
4182 /* Turn on transmitter */
4183 BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE);
4184
4185 /* Turn on receiver */
4186 BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4187
4188 CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2);
4189
4190 /* Tell firmware we're alive. */
4191 BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4192
4193 /* Enable host interrupts. */
4194 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA);
4195 BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4196 bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
4197
4198 if ((error = bge_ifmedia_upd(ifp)) != 0)
4199 goto out;
4200
4201 ifp->if_flags |= IFF_RUNNING;
4202 ifp->if_flags &= ~IFF_OACTIVE;
4203
4204 callout_reset(&sc->bge_timeout, hz, bge_tick, sc);
4205
4206 out:
4207 splx(s);
4208
4209 return error;
4210 }
4211
4212 /*
4213 * Set media options.
4214 */
4215 static int
4216 bge_ifmedia_upd(struct ifnet *ifp)
4217 {
4218 struct bge_softc *sc = ifp->if_softc;
4219 struct mii_data *mii = &sc->bge_mii;
4220 struct ifmedia *ifm = &sc->bge_ifmedia;
4221 int rc;
4222
4223 /* If this is a 1000baseX NIC, enable the TBI port. */
4224 if (sc->bge_tbi) {
4225 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
4226 return(EINVAL);
4227 switch(IFM_SUBTYPE(ifm->ifm_media)) {
4228 case IFM_AUTO:
4229 break;
4230 case IFM_1000_SX:
4231 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
4232 BGE_CLRBIT(sc, BGE_MAC_MODE,
4233 BGE_MACMODE_HALF_DUPLEX);
4234 } else {
4235 BGE_SETBIT(sc, BGE_MAC_MODE,
4236 BGE_MACMODE_HALF_DUPLEX);
4237 }
4238 break;
4239 default:
4240 return(EINVAL);
4241 }
4242 /* XXX 802.3x flow control for 1000BASE-SX */
4243 return(0);
4244 }
4245
4246 sc->bge_link = 0;
4247 if ((rc = mii_mediachg(mii)) == ENXIO)
4248 return 0;
4249 return rc;
4250 }
4251
4252 /*
4253 * Report current media status.
4254 */
4255 static void
4256 bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4257 {
4258 struct bge_softc *sc = ifp->if_softc;
4259 struct mii_data *mii = &sc->bge_mii;
4260
4261 if (sc->bge_tbi) {
4262 ifmr->ifm_status = IFM_AVALID;
4263 ifmr->ifm_active = IFM_ETHER;
4264 if (CSR_READ_4(sc, BGE_MAC_STS) &
4265 BGE_MACSTAT_TBI_PCS_SYNCHED)
4266 ifmr->ifm_status |= IFM_ACTIVE;
4267 ifmr->ifm_active |= IFM_1000_SX;
4268 if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX)
4269 ifmr->ifm_active |= IFM_HDX;
4270 else
4271 ifmr->ifm_active |= IFM_FDX;
4272 return;
4273 }
4274
4275 mii_pollstat(mii);
4276 ifmr->ifm_status = mii->mii_media_status;
4277 ifmr->ifm_active = (mii->mii_media_active & ~IFM_ETH_FMASK) |
4278 sc->bge_flowflags;
4279 }
4280
4281 static int
4282 bge_ioctl(struct ifnet *ifp, u_long command, void *data)
4283 {
4284 struct bge_softc *sc = ifp->if_softc;
4285 struct ifreq *ifr = (struct ifreq *) data;
4286 int s, error = 0;
4287 struct mii_data *mii;
4288
4289 s = splnet();
4290
4291 switch(command) {
4292 case SIOCSIFFLAGS:
4293 if (ifp->if_flags & IFF_UP) {
4294 /*
4295 * If only the state of the PROMISC flag changed,
4296 * then just use the 'set promisc mode' command
4297 * instead of reinitializing the entire NIC. Doing
4298 * a full re-init means reloading the firmware and
4299 * waiting for it to start up, which may take a
4300 * second or two.
4301 */
4302 if (ifp->if_flags & IFF_RUNNING &&
4303 ifp->if_flags & IFF_PROMISC &&
4304 !(sc->bge_if_flags & IFF_PROMISC)) {
4305 BGE_SETBIT(sc, BGE_RX_MODE,
4306 BGE_RXMODE_RX_PROMISC);
4307 } else if (ifp->if_flags & IFF_RUNNING &&
4308 !(ifp->if_flags & IFF_PROMISC) &&
4309 sc->bge_if_flags & IFF_PROMISC) {
4310 BGE_CLRBIT(sc, BGE_RX_MODE,
4311 BGE_RXMODE_RX_PROMISC);
4312 } else if (!(sc->bge_if_flags & IFF_UP))
4313 bge_init(ifp);
4314 } else {
4315 if (ifp->if_flags & IFF_RUNNING)
4316 bge_stop(ifp, 1);
4317 }
4318 sc->bge_if_flags = ifp->if_flags;
4319 error = 0;
4320 break;
4321 case SIOCADDMULTI:
4322 case SIOCDELMULTI:
4323 if (ifp->if_flags & IFF_RUNNING) {
4324 bge_setmulti(sc);
4325 error = 0;
4326 }
4327 break;
4328 case SIOCSIFMEDIA:
4329 /* XXX Flow control is not supported for 1000BASE-SX */
4330 if (sc->bge_tbi) {
4331 ifr->ifr_media &= ~IFM_ETH_FMASK;
4332 sc->bge_flowflags = 0;
4333 }
4334
4335 /* Flow control requires full-duplex mode. */
4336 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
4337 (ifr->ifr_media & IFM_FDX) == 0) {
4338 ifr->ifr_media &= ~IFM_ETH_FMASK;
4339 }
4340 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
4341 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
4342 /* We an do both TXPAUSE and RXPAUSE. */
4343 ifr->ifr_media |=
4344 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
4345 }
4346 sc->bge_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
4347 }
4348 /* FALLTHROUGH */
4349 case SIOCGIFMEDIA:
4350 if (sc->bge_tbi) {
4351 error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia,
4352 command);
4353 } else {
4354 mii = &sc->bge_mii;
4355 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
4356 command);
4357 }
4358 break;
4359 default:
4360 error = ether_ioctl(ifp, command, data);
4361 if (error == ENETRESET) {
4362 error = 0;
4363 }
4364 break;
4365 }
4366
4367 splx(s);
4368
4369 return(error);
4370 }
4371
4372 static void
4373 bge_watchdog(struct ifnet *ifp)
4374 {
4375 struct bge_softc *sc;
4376
4377 sc = ifp->if_softc;
4378
4379 aprint_error_dev(sc->bge_dev, "watchdog timeout -- resetting\n");
4380
4381 ifp->if_flags &= ~IFF_RUNNING;
4382 bge_init(ifp);
4383
4384 ifp->if_oerrors++;
4385 }
4386
4387 static void
4388 bge_stop_block(struct bge_softc *sc, bus_addr_t reg, uint32_t bit)
4389 {
4390 int i;
4391
4392 BGE_CLRBIT(sc, reg, bit);
4393
4394 for (i = 0; i < BGE_TIMEOUT; i++) {
4395 if ((CSR_READ_4(sc, reg) & bit) == 0)
4396 return;
4397 delay(100);
4398 if (sc->bge_pcie)
4399 DELAY(1000);
4400 }
4401
4402 aprint_error_dev(sc->bge_dev,
4403 "block failed to stop: reg 0x%lx, bit 0x%08x\n", (u_long)reg, bit);
4404 }
4405
4406 /*
4407 * Stop the adapter and free any mbufs allocated to the
4408 * RX and TX lists.
4409 */
4410 static void
4411 bge_stop(struct ifnet *ifp, int disable)
4412 {
4413 struct bge_softc *sc = ifp->if_softc;
4414
4415 callout_stop(&sc->bge_timeout);
4416
4417 /*
4418 * Disable all of the receiver blocks
4419 */
4420 bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE);
4421 bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE);
4422 bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE);
4423 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4424 bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE);
4425 }
4426 bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE);
4427 bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE);
4428 bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE);
4429
4430 /*
4431 * Disable all of the transmit blocks
4432 */
4433 bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE);
4434 bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE);
4435 bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE);
4436 bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE);
4437 bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE);
4438 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4439 bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE);
4440 }
4441 bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE);
4442
4443 /*
4444 * Shut down all of the memory managers and related
4445 * state machines.
4446 */
4447 bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE);
4448 bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE);
4449 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4450 bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE);
4451 }
4452
4453 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF);
4454 CSR_WRITE_4(sc, BGE_FTQ_RESET, 0);
4455
4456 if ((sc->bge_quirks & BGE_QUIRK_5705_CORE) == 0) {
4457 bge_stop_block(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE);
4458 bge_stop_block(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE);
4459 }
4460
4461 /* Disable host interrupts. */
4462 BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR);
4463 bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
4464
4465 /*
4466 * Tell firmware we're shutting down.
4467 */
4468 BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP);
4469
4470 /* Free the RX lists. */
4471 bge_free_rx_ring_std(sc);
4472
4473 /* Free jumbo RX list. */
4474 bge_free_rx_ring_jumbo(sc);
4475
4476 /* Free TX buffers. */
4477 bge_free_tx_ring(sc);
4478
4479 /*
4480 * Isolate/power down the PHY.
4481 */
4482 if (!sc->bge_tbi)
4483 mii_down(&sc->bge_mii);
4484
4485 sc->bge_link = 0;
4486
4487 sc->bge_tx_saved_considx = BGE_TXCONS_UNSET;
4488
4489 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4490 }
4491
4492 static int
4493 sysctl_bge_verify(SYSCTLFN_ARGS)
4494 {
4495 int error, t;
4496 struct sysctlnode node;
4497
4498 node = *rnode;
4499 t = *(int*)rnode->sysctl_data;
4500 node.sysctl_data = &t;
4501 error = sysctl_lookup(SYSCTLFN_CALL(&node));
4502 if (error || newp == NULL)
4503 return (error);
4504
4505 #if 0
4506 DPRINTF2(("%s: t = %d, nodenum = %d, rnodenum = %d\n", __func__, t,
4507 node.sysctl_num, rnode->sysctl_num));
4508 #endif
4509
4510 if (node.sysctl_num == bge_rxthresh_nodenum) {
4511 if (t < 0 || t >= NBGE_RX_THRESH)
4512 return (EINVAL);
4513 bge_update_all_threshes(t);
4514 } else
4515 return (EINVAL);
4516
4517 *(int*)rnode->sysctl_data = t;
4518
4519 return (0);
4520 }
4521
4522 /*
4523 * Set up sysctl(3) MIB, hw.bge.*.
4524 *
4525 * TBD condition SYSCTL_PERMANENT on being an LKM or not
4526 */
4527 SYSCTL_SETUP(sysctl_bge, "sysctl bge subtree setup")
4528 {
4529 int rc, bge_root_num;
4530 const struct sysctlnode *node;
4531
4532 if ((rc = sysctl_createv(clog, 0, NULL, NULL,
4533 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
4534 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
4535 goto err;
4536 }
4537
4538 if ((rc = sysctl_createv(clog, 0, NULL, &node,
4539 CTLFLAG_PERMANENT, CTLTYPE_NODE, "bge",
4540 SYSCTL_DESCR("BGE interface controls"),
4541 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
4542 goto err;
4543 }
4544
4545 bge_root_num = node->sysctl_num;
4546
4547 /* BGE Rx interrupt mitigation level */
4548 if ((rc = sysctl_createv(clog, 0, NULL, &node,
4549 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4550 CTLTYPE_INT, "rx_lvl",
4551 SYSCTL_DESCR("BGE receive interrupt mitigation level"),
4552 sysctl_bge_verify, 0,
4553 &bge_rx_thresh_lvl,
4554 0, CTL_HW, bge_root_num, CTL_CREATE,
4555 CTL_EOL)) != 0) {
4556 goto err;
4557 }
4558
4559 bge_rxthresh_nodenum = node->sysctl_num;
4560
4561 return;
4562
4563 err:
4564 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
4565 }
4566
4567 static int
4568 bge_get_eaddr_mem(struct bge_softc *sc, u_int8_t ether_addr[])
4569 {
4570 u_int32_t mac_addr;
4571
4572 mac_addr = bge_readmem_ind(sc, 0x0c14);
4573 if ((mac_addr >> 16) == 0x484b) {
4574 ether_addr[0] = (uint8_t)(mac_addr >> 8);
4575 ether_addr[1] = (uint8_t)mac_addr;
4576 mac_addr = bge_readmem_ind(sc, 0x0c18);
4577 ether_addr[2] = (uint8_t)(mac_addr >> 24);
4578 ether_addr[3] = (uint8_t)(mac_addr >> 16);
4579 ether_addr[4] = (uint8_t)(mac_addr >> 8);
4580 ether_addr[5] = (uint8_t)mac_addr;
4581 return (0);
4582 }
4583 return (1);
4584 }
4585
4586 static int
4587 bge_get_eaddr_nvram(struct bge_softc *sc, u_int8_t ether_addr[])
4588 {
4589 int mac_offset = BGE_EE_MAC_OFFSET;
4590
4591 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
4592 mac_offset = BGE_EE_MAC_OFFSET_5906;
4593 }
4594
4595 return (bge_read_nvram(sc, ether_addr, mac_offset + 2,
4596 ETHER_ADDR_LEN));
4597 }
4598
4599 static int
4600 bge_get_eaddr_eeprom(struct bge_softc *sc, u_int8_t ether_addr[])
4601 {
4602
4603 if (BGE_ASICREV(sc->bge_chipid) == BGE_ASICREV_BCM5906) {
4604 return (1);
4605 }
4606
4607 return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2,
4608 ETHER_ADDR_LEN));
4609 }
4610
4611 static int
4612 bge_get_eaddr(struct bge_softc *sc, u_int8_t eaddr[])
4613 {
4614 static const bge_eaddr_fcn_t bge_eaddr_funcs[] = {
4615 /* NOTE: Order is critical */
4616 bge_get_eaddr_mem,
4617 bge_get_eaddr_nvram,
4618 bge_get_eaddr_eeprom,
4619 NULL
4620 };
4621 const bge_eaddr_fcn_t *func;
4622
4623 for (func = bge_eaddr_funcs; *func != NULL; ++func) {
4624 if ((*func)(sc, eaddr) == 0)
4625 break;
4626 }
4627 return (*func == NULL ? ENXIO : 0);
4628 }
4629